language
stringclasses
1 value
repo
stringclasses
60 values
path
stringlengths
22
294
class_span
dict
source
stringlengths
13
1.16M
target
stringlengths
1
113
java
FasterXML__jackson-databind
src/main/java/tools/jackson/databind/util/ClassUtil.java
{ "start": 2412, "end": 4078 }
class ____ ending in the most distant one. * Class itself is included if <code>addClassItself</code> is true. *<p> * NOTE: mostly/only called to resolve mix-ins as that's where we do not care * about fully-resolved types, just associated annotations. */ public static List<Class<?>> findSuperClasses(Class<?> cls, Class<?> endBefore, boolean addClassItself) { List<Class<?>> result = new ArrayList<Class<?>>(8); if ((cls != null) && (cls != endBefore)) { if (addClassItself) { result.add(cls); } while ((cls = cls.getSuperclass()) != null) { if (cls == endBefore) { break; } result.add(cls); } } return result; } private static void _addRawSuperTypes(Class<?> cls, Class<?> endBefore, Collection<Class<?>> result, boolean addClassItself) { if (cls == endBefore || cls == null || cls == Object.class) { return; } if (addClassItself) { if (result.contains(cls)) { // already added, no need to check supers return; } result.add(cls); } for (Class<?> intCls : cls.getInterfaces()) { _addRawSuperTypes(intCls, endBefore, result, true); } _addRawSuperTypes(cls.getSuperclass(), endBefore, result, true); } /* /********************************************************************** /* Class type detection methods /********************************************************************** */ /** * @return Null if
and
java
spring-projects__spring-framework
spring-websocket/src/test/java/org/springframework/web/socket/sockjs/transport/session/HttpSockJsSessionTests.java
{ "start": 3611, "end": 4793 }
class ____ extends StreamingSockJsSession { private IOException exceptionOnWriteFrame; private boolean cacheFlushed; private boolean heartbeatScheduled; TestAbstractHttpSockJsSession(SockJsServiceConfig config, WebSocketHandler handler, Map<String, Object> attributes) { super("1", config, handler, attributes); } @Override protected byte[] getPrelude(ServerHttpRequest request) { return "hhh\n".getBytes(); } boolean wasCacheFlushed() { return this.cacheFlushed; } boolean wasHeartbeatScheduled() { return this.heartbeatScheduled; } void setExceptionOnWriteFrame(IOException exceptionOnWriteFrame) { this.exceptionOnWriteFrame = exceptionOnWriteFrame; } @Override protected void flushCache() { this.cacheFlushed = true; scheduleHeartbeat(); } @Override protected void scheduleHeartbeat() { this.heartbeatScheduled = true; } @Override protected synchronized void writeFrameInternal(SockJsFrame frame) throws IOException { if (this.exceptionOnWriteFrame != null) { throw this.exceptionOnWriteFrame; } else { super.writeFrameInternal(frame); } } } }
TestAbstractHttpSockJsSession
java
apache__dubbo
dubbo-config/dubbo-config-api/src/test/java/org/apache/dubbo/config/ProtocolConfigTest.java
{ "start": 1544, "end": 14548 }
class ____ { @BeforeEach public void setUp() { DubboBootstrap.reset(); SysProps.clear(); SysProps.setProperty("dubbo.metrics.enabled", "false"); SysProps.setProperty("dubbo.metrics.protocol", "disabled"); } @AfterEach public void afterEach() { DubboBootstrap.reset(); SysProps.clear(); } @AfterAll public static void afterAll() { DubboBootstrap.reset(); } @Test void testName() { ProtocolConfig protocol = new ProtocolConfig(); String protocolName = "xprotocol"; protocol.setName(protocolName); Map<String, String> parameters = new HashMap<>(); ProtocolConfig.appendParameters(parameters, protocol); assertThat(protocol.getName(), equalTo(protocolName)); assertThat(protocol.getId(), equalTo(null)); assertThat(parameters.isEmpty(), is(true)); } @Test void testHost() { ProtocolConfig protocol = new ProtocolConfig(); protocol.setHost("host"); Map<String, String> parameters = new HashMap<String, String>(); ProtocolConfig.appendParameters(parameters, protocol); assertThat(protocol.getHost(), equalTo("host")); assertThat(parameters.isEmpty(), is(true)); } @Test void testPort() { ProtocolConfig protocol = new ProtocolConfig(); int port = NetUtils.getAvailablePort(); protocol.setPort(port); Map<String, String> parameters = new HashMap<>(); ProtocolConfig.appendParameters(parameters, protocol); assertThat(protocol.getPort(), equalTo(port)); assertThat(parameters.isEmpty(), is(true)); } @Test void testPath() { ProtocolConfig protocol = new ProtocolConfig(); protocol.setContextpath("context-path"); Map<String, String> parameters = new HashMap<>(); ProtocolConfig.appendParameters(parameters, protocol); assertThat(protocol.getPath(), equalTo("context-path")); assertThat(protocol.getContextpath(), equalTo("context-path")); assertThat(parameters.isEmpty(), is(true)); protocol.setPath("path"); assertThat(protocol.getPath(), equalTo("path")); assertThat(protocol.getContextpath(), equalTo("path")); } @Test void testCorethreads() { ProtocolConfig protocol = new ProtocolConfig(); protocol.setCorethreads(10); assertThat(protocol.getCorethreads(), is(10)); } @Test void testThreads() { ProtocolConfig protocol = new ProtocolConfig(); protocol.setThreads(10); assertThat(protocol.getThreads(), is(10)); } @Test void testIothreads() { ProtocolConfig protocol = new ProtocolConfig(); protocol.setIothreads(10); assertThat(protocol.getIothreads(), is(10)); } @Test void testQueues() { ProtocolConfig protocol = new ProtocolConfig(); protocol.setQueues(10); assertThat(protocol.getQueues(), is(10)); } @Test void testAccepts() { ProtocolConfig protocol = new ProtocolConfig(); protocol.setAccepts(10); assertThat(protocol.getAccepts(), is(10)); } @Test void testCodec() { ProtocolConfig protocol = new ProtocolConfig(); protocol.setName("dubbo"); protocol.setCodec("mockcodec"); assertThat(protocol.getCodec(), equalTo("mockcodec")); } @Test void testAccesslog() { ProtocolConfig protocol = new ProtocolConfig(); protocol.setAccesslog("access.log"); assertThat(protocol.getAccesslog(), equalTo("access.log")); } @Test void testTelnet() { ProtocolConfig protocol = new ProtocolConfig(); protocol.setTelnet("mocktelnethandler"); assertThat(protocol.getTelnet(), equalTo("mocktelnethandler")); } @Test void testRegister() { ProtocolConfig protocol = new ProtocolConfig(); protocol.setRegister(true); assertThat(protocol.isRegister(), is(true)); } @Test void testTransporter() { ProtocolConfig protocol = new ProtocolConfig(); protocol.setTransporter("mocktransporter"); assertThat(protocol.getTransporter(), equalTo("mocktransporter")); } @Test void testExchanger() { ProtocolConfig protocol = new ProtocolConfig(); protocol.setExchanger("mockexchanger"); assertThat(protocol.getExchanger(), equalTo("mockexchanger")); } @Test void testDispatcher() { ProtocolConfig protocol = new ProtocolConfig(); protocol.setDispatcher("mockdispatcher"); assertThat(protocol.getDispatcher(), equalTo("mockdispatcher")); } @Test void testNetworker() { ProtocolConfig protocol = new ProtocolConfig(); protocol.setNetworker("networker"); assertThat(protocol.getNetworker(), equalTo("networker")); } @Test void testParameters() { ProtocolConfig protocol = new ProtocolConfig(); protocol.setParameters(Collections.singletonMap("k1", "v1")); assertThat(protocol.getParameters(), hasEntry("k1", "v1")); } @Test void testDefault() { ProtocolConfig protocol = new ProtocolConfig(); protocol.setDefault(true); assertThat(protocol.isDefault(), is(true)); } @Test void testKeepAlive() { ProtocolConfig protocol = new ProtocolConfig(); protocol.setKeepAlive(true); assertThat(protocol.getKeepAlive(), is(true)); } @Test void testOptimizer() { ProtocolConfig protocol = new ProtocolConfig(); protocol.setOptimizer("optimizer"); assertThat(protocol.getOptimizer(), equalTo("optimizer")); } @Test void testExtension() { ProtocolConfig protocol = new ProtocolConfig(); protocol.setExtension("extension"); assertThat(protocol.getExtension(), equalTo("extension")); } @Test void testMetaData() { ProtocolConfig config = new ProtocolConfig(); Map<String, String> metaData = config.getMetaData(); Assertions.assertEquals(0, metaData.size(), "actual: " + metaData); } @Test void testOverrideEmptyConfig() { int port = NetUtils.getAvailablePort(); // dubbo.protocol.name=rest // dubbo.protocol.port=port SysProps.setProperty("dubbo.protocol.name", "rest"); SysProps.setProperty("dubbo.protocol.port", String.valueOf(port)); try { ProtocolConfig protocolConfig = new ProtocolConfig(); DubboBootstrap.getInstance() .application("test-app") .protocol(protocolConfig) .initialize(); Assertions.assertEquals("rest", protocolConfig.getName()); Assertions.assertEquals(port, protocolConfig.getPort()); } finally { DubboBootstrap.getInstance().stop(); } } @Test void testOverrideConfigByName() { int port = NetUtils.getAvailablePort(); SysProps.setProperty("dubbo.protocols.rest.port", String.valueOf(port)); try { ProtocolConfig protocolConfig = new ProtocolConfig(); protocolConfig.setName("rest"); DubboBootstrap.getInstance() .application("test-app") .protocol(protocolConfig) .initialize(); Assertions.assertEquals("rest", protocolConfig.getName()); Assertions.assertEquals(port, protocolConfig.getPort()); } finally { DubboBootstrap.getInstance().stop(); } } @Test void testOverrideConfigById() { int port = NetUtils.getAvailablePort(); SysProps.setProperty("dubbo.protocols.rest1.name", "rest"); SysProps.setProperty("dubbo.protocols.rest1.port", String.valueOf(port)); try { ProtocolConfig protocolConfig = new ProtocolConfig(); protocolConfig.setName("xxx"); protocolConfig.setId("rest1"); DubboBootstrap.getInstance() .application("test-app") .protocol(protocolConfig) .initialize(); Assertions.assertEquals("rest", protocolConfig.getName()); Assertions.assertEquals(port, protocolConfig.getPort()); } finally { DubboBootstrap.getInstance().stop(); } } @Test void testCreateConfigFromPropsWithId() { int port1 = NetUtils.getAvailablePort(); int port2 = NetUtils.getAvailablePort(); SysProps.setProperty("dubbo.protocols.rest1.name", "rest"); SysProps.setProperty("dubbo.protocols.rest1.port", String.valueOf(port1)); SysProps.setProperty("dubbo.protocol.name", "dubbo"); // ignore SysProps.setProperty("dubbo.protocol.port", String.valueOf(port2)); try { DubboBootstrap bootstrap = DubboBootstrap.getInstance(); bootstrap.application("test-app").initialize(); ConfigManager configManager = bootstrap.getConfigManager(); Collection<ProtocolConfig> protocols = configManager.getProtocols(); Assertions.assertEquals(1, protocols.size()); ProtocolConfig protocol = configManager.getProtocol("rest1").get(); Assertions.assertEquals("rest", protocol.getName()); Assertions.assertEquals(port1, protocol.getPort()); } finally { DubboBootstrap.getInstance().stop(); } } @Test void testCreateConfigFromPropsWithName() { int port1 = NetUtils.getAvailablePort(); int port2 = NetUtils.getAvailablePort(); SysProps.setProperty("dubbo.protocols.rest.port", String.valueOf(port1)); SysProps.setProperty("dubbo.protocol.name", "dubbo"); // ignore SysProps.setProperty("dubbo.protocol.port", String.valueOf(port2)); try { DubboBootstrap bootstrap = DubboBootstrap.getInstance(); bootstrap.application("test-app").initialize(); ConfigManager configManager = bootstrap.getConfigManager(); Collection<ProtocolConfig> protocols = configManager.getProtocols(); Assertions.assertEquals(1, protocols.size()); ProtocolConfig protocol = configManager.getProtocol("rest").get(); Assertions.assertEquals("rest", protocol.getName()); Assertions.assertEquals(port1, protocol.getPort()); } finally { DubboBootstrap.getInstance().stop(); } } @Test void testCreateDefaultConfigFromProps() { int port = NetUtils.getAvailablePort(); SysProps.setProperty("dubbo.protocol.name", "rest"); SysProps.setProperty("dubbo.protocol.port", String.valueOf(port)); String protocolId = "rest-protocol"; SysProps.setProperty("dubbo.protocol.id", protocolId); // Allow override config id from props try { DubboBootstrap bootstrap = DubboBootstrap.getInstance(); bootstrap.application("test-app").initialize(); ConfigManager configManager = bootstrap.getConfigManager(); Collection<ProtocolConfig> protocols = configManager.getProtocols(); Assertions.assertEquals(1, protocols.size()); ProtocolConfig protocol = configManager.getProtocol("rest").get(); Assertions.assertEquals("rest", protocol.getName()); Assertions.assertEquals(port, protocol.getPort()); Assertions.assertEquals(protocolId, protocol.getId()); } finally { DubboBootstrap.getInstance().stop(); } } @Test void testPreferSerializationDefault1() { ProtocolConfig protocolConfig = new ProtocolConfig(); assertNull(protocolConfig.getPreferSerialization()); protocolConfig.checkDefault(); assertThat(protocolConfig.getPreferSerialization(), equalTo("hessian2,fastjson2")); protocolConfig = new ProtocolConfig(); protocolConfig.setSerialization("x-serialization"); assertNull(protocolConfig.getPreferSerialization()); protocolConfig.checkDefault(); assertThat(protocolConfig.getPreferSerialization(), equalTo("x-serialization")); } @Test void testPreferSerializationDefault2() { ProtocolConfig protocolConfig = new ProtocolConfig(); assertNull(protocolConfig.getPreferSerialization()); protocolConfig.refresh(); assertThat(protocolConfig.getPreferSerialization(), equalTo("hessian2,fastjson2")); protocolConfig = new ProtocolConfig(); protocolConfig.setSerialization("x-serialization"); assertNull(protocolConfig.getPreferSerialization()); protocolConfig.refresh(); assertThat(protocolConfig.getPreferSerialization(), equalTo("x-serialization")); } }
ProtocolConfigTest
java
google__error-prone
core/src/test/java/com/google/errorprone/bugpatterns/MixedMutabilityReturnTypeTest.java
{ "start": 2236, "end": 2835 }
class ____ { @SuppressWarnings("MixedMutabilityReturnType") List<Integer> foo() { if (hashCode() > 0) { return Collections.emptyList(); } return new ArrayList<>(); } } """) .doTest(); } @Test public void tracksActualVariableTypes() { compilationHelper .addSourceLines( "Test.java", """ import java.util.Collections; import java.util.List; import java.util.ArrayList;
Test
java
apache__camel
components/camel-mongodb/src/test/java/org/apache/camel/component/mongodb/integration/MongoDbStopEndpointIT.java
{ "start": 1363, "end": 2925 }
class ____ extends AbstractMongoDbITSupport implements ConfigurableRoute { private static final String MY_ID = "myId"; String intermediate = "mongodb:myDb?database={{mongodb.testDb}}&collection={{mongodb.testCollection}}&operation=insert"; protected RouteBuilder createRouteBuilder() { return new RouteBuilder() { public void configure() { from("direct:insertJsonString").routeId("insert").to(intermediate); from("direct:findById").routeId("find").to( "mongodb:myDb?database={{mongodb.testDb}}&collection={{mongodb.testCollection}}&operation=findById&dynamicity=true"); } }; } @BeforeEach void checkDocuments() { Assumptions.assumeTrue(0 == testCollection.countDocuments(), "The collection should have no documents"); } @Test public void testStopEndpoint() { template.requestBody("direct:insertJsonString", "{\"scientist\": \"Newton\", \"_id\": \"" + MY_ID + "\"}"); context.getEndpoint("mongodb:myDb?database={{mongodb.testDb}}&collection={{mongodb.testCollection}}&operation=insert") .stop(); Document result = template.requestBody("direct:findById", MY_ID, Document.class); assertEquals(MY_ID, result.get(MONGO_ID)); assertEquals("Newton", result.get("scientist")); } @RouteFixture @Override public void createRouteBuilder(CamelContext context) throws Exception { context.addRoutes(createRouteBuilder()); } }
MongoDbStopEndpointIT
java
apache__kafka
server-common/src/main/java/org/apache/kafka/server/config/QuotaConfig.java
{ "start": 3736, "end": 16491 }
class ____ implements the ClientQuotaCallback interface, " + "which is used to determine quota limits applied to client requests. " + "By default, the &lt;user&gt; and &lt;client-id&gt quotas that are stored and applied. " + "For any given request, the most specific quota that matches the user principal of the session and the client-id of the request is applied."; public static final String LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG = "leader.replication.throttled.replicas"; public static final String LEADER_REPLICATION_THROTTLED_REPLICAS_DOC = "A list of replicas for which log replication should be throttled on " + "the leader side. The list should describe a set of replicas in the form " + "[PartitionId]:[BrokerId],[PartitionId]:[BrokerId]:... or alternatively the wildcard '*' can be used to throttle " + "all replicas for this topic."; public static final List<String> LEADER_REPLICATION_THROTTLED_REPLICAS_DEFAULT = List.of(); public static final String FOLLOWER_REPLICATION_THROTTLED_REPLICAS_CONFIG = "follower.replication.throttled.replicas"; public static final String FOLLOWER_REPLICATION_THROTTLED_REPLICAS_DOC = "A list of replicas for which log replication should be throttled on " + "the follower side. The list should describe a set of " + "replicas in the form " + "[PartitionId]:[BrokerId],[PartitionId]:[BrokerId]:... or alternatively the wildcard '*' can be used to throttle " + "all replicas for this topic."; public static final List<String> FOLLOWER_REPLICATION_THROTTLED_REPLICAS_DEFAULT = List.of(); public static final String LEADER_REPLICATION_THROTTLED_RATE_CONFIG = "leader.replication.throttled.rate"; public static final String LEADER_REPLICATION_THROTTLED_RATE_DOC = "A long representing the upper bound (bytes/sec) on replication traffic for leaders enumerated in the " + String.format("property %s (for each topic). This property can be only set dynamically. It is suggested that the ", LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG) + "limit be kept above 1MB/s for accurate behaviour."; public static final String FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG = "follower.replication.throttled.rate"; public static final String FOLLOWER_REPLICATION_THROTTLED_RATE_DOC = "A long representing the upper bound (bytes/sec) on replication traffic for followers enumerated in the " + String.format("property %s (for each topic). This property can be only set dynamically. It is suggested that the ", FOLLOWER_REPLICATION_THROTTLED_REPLICAS_CONFIG) + "limit be kept above 1MB/s for accurate behaviour."; public static final String REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_CONFIG = "replica.alter.log.dirs.io.max.bytes.per.second"; public static final String REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_DOC = "A long representing the upper bound (bytes/sec) on disk IO used for moving replica between log directories on the same broker. " + "This property can be only set dynamically. It is suggested that the limit be kept above 1MB/s for accurate behaviour."; public static final long QUOTA_BYTES_PER_SECOND_DEFAULT = Long.MAX_VALUE; public static final String PRODUCER_BYTE_RATE_OVERRIDE_CONFIG = "producer_byte_rate"; public static final String CONSUMER_BYTE_RATE_OVERRIDE_CONFIG = "consumer_byte_rate"; public static final String REQUEST_PERCENTAGE_OVERRIDE_CONFIG = "request_percentage"; public static final String CONTROLLER_MUTATION_RATE_OVERRIDE_CONFIG = "controller_mutation_rate"; public static final String IP_CONNECTION_RATE_OVERRIDE_CONFIG = "connection_creation_rate"; public static final String PRODUCER_BYTE_RATE_DOC = "A rate representing the upper bound (bytes/sec) for producer traffic."; public static final String CONSUMER_BYTE_RATE_DOC = "A rate representing the upper bound (bytes/sec) for consumer traffic."; public static final String REQUEST_PERCENTAGE_DOC = "A percentage representing the upper bound of time spent for processing requests."; public static final String CONTROLLER_MUTATION_RATE_DOC = "The rate at which mutations are accepted for the create " + "topics request, the create partitions request and the delete topics request. The rate is accumulated by " + "the number of partitions created or deleted."; public static final String IP_CONNECTION_RATE_DOC = "An int representing the upper bound of connections accepted " + "for the specified IP."; public static final int IP_CONNECTION_RATE_DEFAULT = Integer.MAX_VALUE; public static final ConfigDef CONFIG_DEF = new ConfigDef() .define(QuotaConfig.NUM_QUOTA_SAMPLES_CONFIG, INT, QuotaConfig.NUM_QUOTA_SAMPLES_DEFAULT, atLeast(1), LOW, QuotaConfig.NUM_QUOTA_SAMPLES_DOC) .define(QuotaConfig.NUM_REPLICATION_QUOTA_SAMPLES_CONFIG, INT, QuotaConfig.NUM_QUOTA_SAMPLES_DEFAULT, atLeast(1), LOW, QuotaConfig.NUM_REPLICATION_QUOTA_SAMPLES_DOC) .define(QuotaConfig.NUM_ALTER_LOG_DIRS_REPLICATION_QUOTA_SAMPLES_CONFIG, INT, QuotaConfig.NUM_QUOTA_SAMPLES_DEFAULT, atLeast(1), LOW, QuotaConfig.NUM_ALTER_LOG_DIRS_REPLICATION_QUOTA_SAMPLES_DOC) .define(QuotaConfig.NUM_CONTROLLER_QUOTA_SAMPLES_CONFIG, INT, QuotaConfig.NUM_QUOTA_SAMPLES_DEFAULT, atLeast(1), LOW, QuotaConfig.NUM_CONTROLLER_QUOTA_SAMPLES_DOC) .define(QuotaConfig.QUOTA_WINDOW_SIZE_SECONDS_CONFIG, INT, QuotaConfig.QUOTA_WINDOW_SIZE_SECONDS_DEFAULT, atLeast(1), LOW, QuotaConfig.QUOTA_WINDOW_SIZE_SECONDS_DOC) .define(QuotaConfig.REPLICATION_QUOTA_WINDOW_SIZE_SECONDS_CONFIG, INT, QuotaConfig.QUOTA_WINDOW_SIZE_SECONDS_DEFAULT, atLeast(1), LOW, QuotaConfig.REPLICATION_QUOTA_WINDOW_SIZE_SECONDS_DOC) .define(QuotaConfig.ALTER_LOG_DIRS_REPLICATION_QUOTA_WINDOW_SIZE_SECONDS_CONFIG, INT, QuotaConfig.QUOTA_WINDOW_SIZE_SECONDS_DEFAULT, atLeast(1), LOW, QuotaConfig.ALTER_LOG_DIRS_REPLICATION_QUOTA_WINDOW_SIZE_SECONDS_DOC) .define(QuotaConfig.CONTROLLER_QUOTA_WINDOW_SIZE_SECONDS_CONFIG, INT, QuotaConfig.QUOTA_WINDOW_SIZE_SECONDS_DEFAULT, atLeast(1), LOW, QuotaConfig.CONTROLLER_QUOTA_WINDOW_SIZE_SECONDS_DOC) .define(QuotaConfig.CLIENT_QUOTA_CALLBACK_CLASS_CONFIG, CLASS, null, LOW, QuotaConfig.CLIENT_QUOTA_CALLBACK_CLASS_DOC); private static final Set<String> USER_AND_CLIENT_QUOTA_NAMES = Set.of( PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, CONSUMER_BYTE_RATE_OVERRIDE_CONFIG, REQUEST_PERCENTAGE_OVERRIDE_CONFIG, CONTROLLER_MUTATION_RATE_OVERRIDE_CONFIG ); private static void buildUserClientQuotaConfigDef(ConfigDef configDef) { configDef.define(PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, ConfigDef.Type.LONG, Long.MAX_VALUE, ConfigDef.Importance.MEDIUM, PRODUCER_BYTE_RATE_DOC); configDef.define(CONSUMER_BYTE_RATE_OVERRIDE_CONFIG, ConfigDef.Type.LONG, Long.MAX_VALUE, ConfigDef.Importance.MEDIUM, CONSUMER_BYTE_RATE_DOC); configDef.define(REQUEST_PERCENTAGE_OVERRIDE_CONFIG, ConfigDef.Type.DOUBLE, Integer.valueOf(Integer.MAX_VALUE).doubleValue(), ConfigDef.Importance.MEDIUM, REQUEST_PERCENTAGE_DOC); configDef.define(CONTROLLER_MUTATION_RATE_OVERRIDE_CONFIG, ConfigDef.Type.DOUBLE, Integer.valueOf(Integer.MAX_VALUE).doubleValue(), ConfigDef.Importance.MEDIUM, CONTROLLER_MUTATION_RATE_DOC); } private final int numQuotaSamples; private final int quotaWindowSizeSeconds; private final int numReplicationQuotaSamples; private final int replicationQuotaWindowSizeSeconds; private final int numAlterLogDirsReplicationQuotaSamples; private final int alterLogDirsReplicationQuotaWindowSizeSeconds; private final int numControllerQuotaSamples; private final int controllerQuotaWindowSizeSeconds; public QuotaConfig(AbstractConfig config) { this.numQuotaSamples = config.getInt(QuotaConfig.NUM_QUOTA_SAMPLES_CONFIG); this.quotaWindowSizeSeconds = config.getInt(QuotaConfig.QUOTA_WINDOW_SIZE_SECONDS_CONFIG); this.numReplicationQuotaSamples = config.getInt(QuotaConfig.NUM_REPLICATION_QUOTA_SAMPLES_CONFIG); this.replicationQuotaWindowSizeSeconds = config.getInt(QuotaConfig.REPLICATION_QUOTA_WINDOW_SIZE_SECONDS_CONFIG); this.numAlterLogDirsReplicationQuotaSamples = config.getInt(QuotaConfig.NUM_ALTER_LOG_DIRS_REPLICATION_QUOTA_SAMPLES_CONFIG); this.alterLogDirsReplicationQuotaWindowSizeSeconds = config.getInt(QuotaConfig.ALTER_LOG_DIRS_REPLICATION_QUOTA_WINDOW_SIZE_SECONDS_CONFIG); this.numControllerQuotaSamples = config.getInt(QuotaConfig.NUM_CONTROLLER_QUOTA_SAMPLES_CONFIG); this.controllerQuotaWindowSizeSeconds = config.getInt(QuotaConfig.CONTROLLER_QUOTA_WINDOW_SIZE_SECONDS_CONFIG); } /** * Gets the number of samples to retain in memory for client quotas. */ public int numQuotaSamples() { return numQuotaSamples; } /** * Gets the time span of each sample for client quotas. */ public int quotaWindowSizeSeconds() { return quotaWindowSizeSeconds; } /** * Gets the number of samples to retain in memory for replication quotas. */ public int numReplicationQuotaSamples() { return numReplicationQuotaSamples; } /** * Gets the time span of each sample for replication quotas. */ public int replicationQuotaWindowSizeSeconds() { return replicationQuotaWindowSizeSeconds; } /** * Gets the number of samples to retain in memory for alter log dirs replication quotas. */ public int numAlterLogDirsReplicationQuotaSamples() { return numAlterLogDirsReplicationQuotaSamples; } /** * Gets the time span of each sample for alter log dirs replication quotas. */ public int alterLogDirsReplicationQuotaWindowSizeSeconds() { return alterLogDirsReplicationQuotaWindowSizeSeconds; } /** * Gets the number of samples to retain in memory for controller mutation quotas. */ public int numControllerQuotaSamples() { return numControllerQuotaSamples; } /** * Gets the time span of each sample for controller mutations quotas. */ public int controllerQuotaWindowSizeSeconds() { return controllerQuotaWindowSizeSeconds; } public static ConfigDef brokerQuotaConfigs() { return new ConfigDef() // Round minimum value down, to make it easier for users. .define(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, ConfigDef.Type.LONG, QuotaConfig.QUOTA_BYTES_PER_SECOND_DEFAULT, ConfigDef.Range.atLeast(0), ConfigDef.Importance.MEDIUM, QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_DOC) .define(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG, ConfigDef.Type.LONG, QuotaConfig.QUOTA_BYTES_PER_SECOND_DEFAULT, ConfigDef.Range.atLeast(0), ConfigDef.Importance.MEDIUM, QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_DOC) .define(QuotaConfig.REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_CONFIG, ConfigDef.Type.LONG, QuotaConfig.QUOTA_BYTES_PER_SECOND_DEFAULT, ConfigDef.Range.atLeast(0), ConfigDef.Importance.MEDIUM, QuotaConfig.REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_DOC); } public static ConfigDef userAndClientQuotaConfigs() { ConfigDef configDef = new ConfigDef(); buildUserClientQuotaConfigDef(configDef); return configDef; } public static ConfigDef scramMechanismsPlusUserAndClientQuotaConfigs() { ConfigDef configDef = new ConfigDef(); ScramMechanism.mechanismNames().forEach(mechanismName -> { configDef.define(mechanismName, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, "User credentials for SCRAM mechanism " + mechanismName); }); buildUserClientQuotaConfigDef(configDef); return configDef; } public static ConfigDef ipConfigs() { ConfigDef configDef = new ConfigDef(); configDef.define(IP_CONNECTION_RATE_OVERRIDE_CONFIG, ConfigDef.Type.INT, Integer.MAX_VALUE, ConfigDef.Range.atLeast(0), ConfigDef.Importance.MEDIUM, IP_CONNECTION_RATE_DOC); return configDef; } public static Boolean isClientOrUserQuotaConfig(String name) { return USER_AND_CLIENT_QUOTA_NAMES.contains(name); } }
that
java
quarkusio__quarkus
extensions/grpc/deployment/src/test/java/io/quarkus/grpc/server/interceptors/MySecondInterceptor.java
{ "start": 418, "end": 1272 }
class ____ implements ServerInterceptor, Prioritized { private volatile long callTime; @Override public <ReqT, RespT> ServerCall.Listener<ReqT> interceptCall(ServerCall<ReqT, RespT> serverCall, Metadata metadata, ServerCallHandler<ReqT, RespT> serverCallHandler) { return serverCallHandler .startCall(new ForwardingServerCall.SimpleForwardingServerCall<ReqT, RespT>(serverCall) { @Override public void close(Status status, Metadata trailers) { callTime = System.nanoTime(); super.close(status, trailers); } }, metadata); } public long getLastCall() { return callTime; } @Override public int getPriority() { return 200; } }
MySecondInterceptor
java
apache__commons-lang
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
{ "start": 3078, "end": 3568 }
class ____ Cloneable but did not implement the * clone method so could not be used. From 3.0 onwards it no longer implements * the interface. * </p> * * @since 2.2 * @deprecated As of <a href="https://commons.apache.org/proper/commons-lang/changes-report.html#a3.6">3.6</a>, use Apache Commons Text * <a href="https://commons.apache.org/proper/commons-text/javadocs/api-release/org/apache/commons/text/TextStringBuilder.html"> * TextStringBuilder</a>. */ @Deprecated public
implemented
java
spring-projects__spring-security
oauth2/oauth2-authorization-server/src/main/java/org/springframework/security/oauth2/server/authorization/oidc/authentication/OidcUserInfoAuthenticationProvider.java
{ "start": 2383, "end": 6704 }
class ____ implements AuthenticationProvider { private final Log logger = LogFactory.getLog(getClass()); private final OAuth2AuthorizationService authorizationService; private Function<OidcUserInfoAuthenticationContext, OidcUserInfo> userInfoMapper = new DefaultOidcUserInfoMapper(); /** * Constructs an {@code OidcUserInfoAuthenticationProvider} using the provided * parameters. * @param authorizationService the authorization service */ public OidcUserInfoAuthenticationProvider(OAuth2AuthorizationService authorizationService) { Assert.notNull(authorizationService, "authorizationService cannot be null"); this.authorizationService = authorizationService; } @Override public Authentication authenticate(Authentication authentication) throws AuthenticationException { OidcUserInfoAuthenticationToken userInfoAuthentication = (OidcUserInfoAuthenticationToken) authentication; AbstractOAuth2TokenAuthenticationToken<?> accessTokenAuthentication = null; if (AbstractOAuth2TokenAuthenticationToken.class .isAssignableFrom(userInfoAuthentication.getPrincipal().getClass())) { accessTokenAuthentication = (AbstractOAuth2TokenAuthenticationToken<?>) userInfoAuthentication .getPrincipal(); } if (accessTokenAuthentication == null || !accessTokenAuthentication.isAuthenticated()) { throw new OAuth2AuthenticationException(OAuth2ErrorCodes.INVALID_TOKEN); } String accessTokenValue = accessTokenAuthentication.getToken().getTokenValue(); OAuth2Authorization authorization = this.authorizationService.findByToken(accessTokenValue, OAuth2TokenType.ACCESS_TOKEN); if (authorization == null) { throw new OAuth2AuthenticationException(OAuth2ErrorCodes.INVALID_TOKEN); } if (this.logger.isTraceEnabled()) { this.logger.trace("Retrieved authorization with access token"); } OAuth2Authorization.Token<OAuth2AccessToken> authorizedAccessToken = authorization.getAccessToken(); if (!authorizedAccessToken.isActive()) { throw new OAuth2AuthenticationException(OAuth2ErrorCodes.INVALID_TOKEN); } if (!authorizedAccessToken.getToken().getScopes().contains(OidcScopes.OPENID)) { throw new OAuth2AuthenticationException(OAuth2ErrorCodes.INSUFFICIENT_SCOPE); } OAuth2Authorization.Token<OidcIdToken> idToken = authorization.getToken(OidcIdToken.class); if (idToken == null) { throw new OAuth2AuthenticationException(OAuth2ErrorCodes.INVALID_TOKEN); } if (this.logger.isTraceEnabled()) { this.logger.trace("Validated user info request"); } OidcUserInfoAuthenticationContext authenticationContext = OidcUserInfoAuthenticationContext .with(userInfoAuthentication) .accessToken(authorizedAccessToken.getToken()) .authorization(authorization) .build(); OidcUserInfo userInfo = this.userInfoMapper.apply(authenticationContext); if (this.logger.isTraceEnabled()) { this.logger.trace("Authenticated user info request"); } return new OidcUserInfoAuthenticationToken(accessTokenAuthentication, userInfo); } @Override public boolean supports(Class<?> authentication) { return OidcUserInfoAuthenticationToken.class.isAssignableFrom(authentication); } /** * Sets the {@link Function} used to extract claims from * {@link OidcUserInfoAuthenticationContext} to an instance of {@link OidcUserInfo} * for the UserInfo response. * * <p> * The {@link OidcUserInfoAuthenticationContext} gives the mapper access to the * {@link OidcUserInfoAuthenticationToken}, as well as, the following context * attributes: * <ul> * <li>{@link OidcUserInfoAuthenticationContext#getAccessToken()} containing the * bearer token used to make the request.</li> * <li>{@link OidcUserInfoAuthenticationContext#getAuthorization()} containing the * {@link OidcIdToken} and {@link OAuth2AccessToken} associated with the bearer token * used to make the request.</li> * </ul> * @param userInfoMapper the {@link Function} used to extract claims from * {@link OidcUserInfoAuthenticationContext} to an instance of {@link OidcUserInfo} */ public void setUserInfoMapper(Function<OidcUserInfoAuthenticationContext, OidcUserInfo> userInfoMapper) { Assert.notNull(userInfoMapper, "userInfoMapper cannot be null"); this.userInfoMapper = userInfoMapper; } private static final
OidcUserInfoAuthenticationProvider
java
netty__netty
codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateEncoderTest.java
{ "start": 2195, "end": 14576 }
class ____ { private static final Random random = new Random(); @Test public void testCompressedFrame() { EmbeddedChannel encoderChannel = new EmbeddedChannel(new PerMessageDeflateEncoder(9, 15, false)); EmbeddedChannel decoderChannel = new EmbeddedChannel( ZlibCodecFactory.newZlibDecoder(ZlibWrapper.NONE, 0)); // initialize byte[] payload = new byte[300]; random.nextBytes(payload); BinaryWebSocketFrame frame = new BinaryWebSocketFrame(true, WebSocketExtension.RSV3, Unpooled.wrappedBuffer(payload)); // execute assertTrue(encoderChannel.writeOutbound(frame)); BinaryWebSocketFrame compressedFrame = encoderChannel.readOutbound(); // test assertNotNull(compressedFrame); assertNotNull(compressedFrame.content()); assertEquals(WebSocketExtension.RSV1 | WebSocketExtension.RSV3, compressedFrame.rsv()); assertTrue(decoderChannel.writeInbound(compressedFrame.content())); assertTrue(decoderChannel.writeInbound(DeflateDecoder.FRAME_TAIL.duplicate())); ByteBuf uncompressedPayload = decoderChannel.readInbound(); assertEquals(300, uncompressedPayload.readableBytes()); byte[] finalPayload = new byte[300]; uncompressedPayload.readBytes(finalPayload); assertArrayEquals(finalPayload, payload); uncompressedPayload.release(); } @Test public void testAlreadyCompressedFrame() { EmbeddedChannel encoderChannel = new EmbeddedChannel(new PerMessageDeflateEncoder(9, 15, false)); // initialize byte[] payload = new byte[300]; random.nextBytes(payload); BinaryWebSocketFrame frame = new BinaryWebSocketFrame(true, WebSocketExtension.RSV3 | WebSocketExtension.RSV1, Unpooled.wrappedBuffer(payload)); // execute assertTrue(encoderChannel.writeOutbound(frame)); BinaryWebSocketFrame newFrame = encoderChannel.readOutbound(); // test assertNotNull(newFrame); assertNotNull(newFrame.content()); assertEquals(WebSocketExtension.RSV3 | WebSocketExtension.RSV1, newFrame.rsv()); assertEquals(300, newFrame.content().readableBytes()); byte[] finalPayload = new byte[300]; newFrame.content().readBytes(finalPayload); assertArrayEquals(finalPayload, payload); newFrame.release(); } @Test public void testFragmentedFrame() { EmbeddedChannel encoderChannel = new EmbeddedChannel(new PerMessageDeflateEncoder(9, 15, false, NEVER_SKIP)); EmbeddedChannel decoderChannel = new EmbeddedChannel( ZlibCodecFactory.newZlibDecoder(ZlibWrapper.NONE, 0)); // initialize byte[] payload1 = new byte[100]; random.nextBytes(payload1); byte[] payload2 = new byte[100]; random.nextBytes(payload2); byte[] payload3 = new byte[100]; random.nextBytes(payload3); BinaryWebSocketFrame frame1 = new BinaryWebSocketFrame(false, WebSocketExtension.RSV3, Unpooled.wrappedBuffer(payload1)); ContinuationWebSocketFrame frame2 = new ContinuationWebSocketFrame(false, WebSocketExtension.RSV3, Unpooled.wrappedBuffer(payload2)); ContinuationWebSocketFrame frame3 = new ContinuationWebSocketFrame(true, WebSocketExtension.RSV3, Unpooled.wrappedBuffer(payload3)); // execute assertTrue(encoderChannel.writeOutbound(frame1)); assertTrue(encoderChannel.writeOutbound(frame2)); assertTrue(encoderChannel.writeOutbound(frame3)); BinaryWebSocketFrame compressedFrame1 = encoderChannel.readOutbound(); ContinuationWebSocketFrame compressedFrame2 = encoderChannel.readOutbound(); ContinuationWebSocketFrame compressedFrame3 = encoderChannel.readOutbound(); // test assertNotNull(compressedFrame1); assertNotNull(compressedFrame2); assertNotNull(compressedFrame3); assertEquals(WebSocketExtension.RSV1 | WebSocketExtension.RSV3, compressedFrame1.rsv()); assertEquals(WebSocketExtension.RSV3, compressedFrame2.rsv()); assertEquals(WebSocketExtension.RSV3, compressedFrame3.rsv()); assertFalse(compressedFrame1.isFinalFragment()); assertFalse(compressedFrame2.isFinalFragment()); assertTrue(compressedFrame3.isFinalFragment()); assertTrue(decoderChannel.writeInbound(compressedFrame1.content())); ByteBuf uncompressedPayload1 = decoderChannel.readInbound(); byte[] finalPayload1 = new byte[100]; uncompressedPayload1.readBytes(finalPayload1); assertArrayEquals(finalPayload1, payload1); uncompressedPayload1.release(); assertTrue(decoderChannel.writeInbound(compressedFrame2.content())); ByteBuf uncompressedPayload2 = decoderChannel.readInbound(); byte[] finalPayload2 = new byte[100]; uncompressedPayload2.readBytes(finalPayload2); assertArrayEquals(finalPayload2, payload2); uncompressedPayload2.release(); assertTrue(decoderChannel.writeInbound(compressedFrame3.content())); assertTrue(decoderChannel.writeInbound(DeflateDecoder.FRAME_TAIL.duplicate())); ByteBuf uncompressedPayload3 = decoderChannel.readInbound(); byte[] finalPayload3 = new byte[100]; uncompressedPayload3.readBytes(finalPayload3); assertArrayEquals(finalPayload3, payload3); uncompressedPayload3.release(); } @Test public void testCompressionSkipForBinaryFrame() { EmbeddedChannel encoderChannel = new EmbeddedChannel(new PerMessageDeflateEncoder(9, 15, false, ALWAYS_SKIP)); byte[] payload = new byte[300]; random.nextBytes(payload); WebSocketFrame binaryFrame = new BinaryWebSocketFrame(Unpooled.wrappedBuffer(payload)); assertTrue(encoderChannel.writeOutbound(binaryFrame.copy())); WebSocketFrame outboundFrame = encoderChannel.readOutbound(); assertEquals(0, outboundFrame.rsv()); assertArrayEquals(payload, ByteBufUtil.getBytes(outboundFrame.content())); assertTrue(outboundFrame.release()); assertFalse(encoderChannel.finish()); } @Test public void testSelectivityCompressionSkip() { WebSocketExtensionFilter selectivityCompressionFilter = new WebSocketExtensionFilter() { @Override public boolean mustSkip(WebSocketFrame frame) { return (frame instanceof TextWebSocketFrame || frame instanceof BinaryWebSocketFrame) && frame.content().readableBytes() < 100; } }; EmbeddedChannel encoderChannel = new EmbeddedChannel( new PerMessageDeflateEncoder(9, 15, false, selectivityCompressionFilter)); EmbeddedChannel decoderChannel = new EmbeddedChannel( ZlibCodecFactory.newZlibDecoder(ZlibWrapper.NONE, 0)); String textPayload = "not compressed payload"; byte[] binaryPayload = new byte[101]; random.nextBytes(binaryPayload); WebSocketFrame textFrame = new TextWebSocketFrame(textPayload); BinaryWebSocketFrame binaryFrame = new BinaryWebSocketFrame(Unpooled.wrappedBuffer(binaryPayload)); assertTrue(encoderChannel.writeOutbound(textFrame)); assertTrue(encoderChannel.writeOutbound(binaryFrame)); WebSocketFrame outboundTextFrame = encoderChannel.readOutbound(); //compression skipped for textFrame assertEquals(0, outboundTextFrame.rsv()); assertEquals(textPayload, outboundTextFrame.content().toString(UTF_8)); assertTrue(outboundTextFrame.release()); WebSocketFrame outboundBinaryFrame = encoderChannel.readOutbound(); //compression not skipped for binaryFrame assertEquals(WebSocketExtension.RSV1, outboundBinaryFrame.rsv()); assertTrue(decoderChannel.writeInbound(outboundBinaryFrame.content().retain())); ByteBuf uncompressedBinaryPayload = decoderChannel.readInbound(); assertArrayEquals(binaryPayload, ByteBufUtil.getBytes(uncompressedBinaryPayload)); assertTrue(outboundBinaryFrame.release()); assertTrue(uncompressedBinaryPayload.release()); assertFalse(encoderChannel.finish()); assertFalse(decoderChannel.finish()); } @Test public void testIllegalStateWhenCompressionInProgress() { WebSocketExtensionFilter selectivityCompressionFilter = new WebSocketExtensionFilter() { @Override public boolean mustSkip(WebSocketFrame frame) { return frame.content().readableBytes() < 100; } }; final EmbeddedChannel encoderChannel = new EmbeddedChannel( new PerMessageDeflateEncoder(9, 15, false, selectivityCompressionFilter)); byte[] firstPayload = new byte[200]; random.nextBytes(firstPayload); byte[] finalPayload = new byte[90]; random.nextBytes(finalPayload); BinaryWebSocketFrame firstPart = new BinaryWebSocketFrame(false, 0, Unpooled.wrappedBuffer(firstPayload)); final ContinuationWebSocketFrame finalPart = new ContinuationWebSocketFrame(true, 0, Unpooled.wrappedBuffer(finalPayload)); assertTrue(encoderChannel.writeOutbound(firstPart)); BinaryWebSocketFrame outboundFirstPart = encoderChannel.readOutbound(); //first part is compressed assertEquals(WebSocketExtension.RSV1, outboundFirstPart.rsv()); assertFalse(Arrays.equals(firstPayload, ByteBufUtil.getBytes(outboundFirstPart.content()))); assertTrue(outboundFirstPart.release()); //final part throwing exception try { assertThrows(EncoderException.class, new Executable() { @Override public void execute() throws Throwable { encoderChannel.writeOutbound(finalPart); } }); } finally { assertTrue(finalPart.release()); assertFalse(encoderChannel.finishAndReleaseAll()); } } @Test public void testEmptyFrameCompression() { EmbeddedChannel encoderChannel = new EmbeddedChannel(new PerMessageDeflateEncoder(9, 15, false)); TextWebSocketFrame emptyFrame = new TextWebSocketFrame(""); assertTrue(encoderChannel.writeOutbound(emptyFrame)); TextWebSocketFrame emptyDeflateFrame = encoderChannel.readOutbound(); assertEquals(WebSocketExtension.RSV1, emptyDeflateFrame.rsv()); assertTrue(ByteBufUtil.equals(EMPTY_DEFLATE_BLOCK, emptyDeflateFrame.content())); // Unreleasable buffer assertFalse(emptyDeflateFrame.release()); assertFalse(encoderChannel.finish()); } @Test public void testCodecExceptionForNotFinEmptyFrame() { final EmbeddedChannel encoderChannel = new EmbeddedChannel(new PerMessageDeflateEncoder(9, 15, false)); final TextWebSocketFrame emptyNotFinFrame = new TextWebSocketFrame(false, 0, ""); try { assertThrows(EncoderException.class, new Executable() { @Override public void execute() { encoderChannel.writeOutbound(emptyNotFinFrame); } }); } finally { // EmptyByteBuf buffer assertFalse(emptyNotFinFrame.release()); assertFalse(encoderChannel.finish()); } } }
PerMessageDeflateEncoderTest
java
apache__dubbo
dubbo-cluster/src/main/java/org/apache/dubbo/rpc/cluster/router/condition/config/AppStateRouterFactory.java
{ "start": 1198, "end": 1839 }
class ____ implements StateRouterFactory { public static final String NAME = "app"; @SuppressWarnings("rawtypes") private volatile StateRouter router; @SuppressWarnings("unchecked") @Override public <T> StateRouter<T> getRouter(Class<T> interfaceClass, URL url) { if (router != null) { return router; } synchronized (this) { if (router == null) { router = createRouter(url); } } return router; } private <T> StateRouter<T> createRouter(URL url) { return new AppStateRouter<>(url); } }
AppStateRouterFactory
java
apache__hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
{ "start": 7543, "end": 12311 }
class ____ extends HtmlBlock { final FairScheduler fs; final FSQInfo fsqinfo; @Inject QueuesBlock(ResourceManager rm, FSQInfo info) { fs = (FairScheduler)rm.getResourceScheduler(); fsqinfo = info; } @Override public void render(Block html) { html.__(MetricsOverviewTable.class); UL<DIV<DIV<Hamlet>>> ul = html. div("#cs-wrapper.ui-widget"). div(".ui-widget-header.ui-corner-top"). __("Application Queues").__(). div("#cs.ui-widget-content.ui-corner-bottom"). ul(); if (fs == null) { ul. li(). a(_Q).$style(width(Q_MAX_WIDTH)). span().$style(Q_END).__("100% ").__(). span(".q", "default").__().__(); } else { FairSchedulerInfo sinfo = new FairSchedulerInfo(fs); fsqinfo.qinfo = sinfo.getRootQueueInfo(); float used = fsqinfo.qinfo.getUsedMemoryFraction(); ul. li().$style("margin-bottom: 1em"). span().$style("font-weight: bold").__("Legend:").__(). span().$class("qlegend ui-corner-all").$style(Q_GIVEN). $title("The steady fair shares consider all queues, " + "both active (with running applications) and inactive."). __(STEADY_FAIR_SHARE).__(). span().$class("qlegend ui-corner-all").$style(Q_INSTANTANEOUS_FS). $title("The instantaneous fair shares consider only active " + "queues (with running applications)."). __(INSTANTANEOUS_FAIR_SHARE).__(). span().$class("qlegend ui-corner-all").$style(Q_UNDER). __("Used").__(). span().$class("qlegend ui-corner-all").$style(Q_OVER). __("Used (over fair share)").__(). span().$class("qlegend ui-corner-all ui-state-default"). __("Max Capacity").__(). __(). li(). a(_Q).$style(width(Q_MAX_WIDTH)). span().$style(join(width(used), ";left:0%;", used > 1 ? Q_OVER : Q_UNDER)).__(".").__(). span(".q", "root").__(). span().$class("qstats").$style(left(Q_STATS_POS)). __(join(percent(used), " used")).__(). __(QueueBlock.class).__(); } ul.__().__(). script().$type("text/javascript"). __("$('#cs').hide();").__().__(). __(FairSchedulerAppsBlock.class); } } @Override protected void postHead(Page.HTML<__> html) { html. style().$type("text/css"). __("#cs { padding: 0.5em 0 1em 0; margin-bottom: 1em; position: relative }", "#cs ul { list-style: none }", "#cs a { font-weight: normal; margin: 2px; position: relative }", "#cs a span { font-weight: normal; font-size: 80% }", "#cs-wrapper .ui-widget-header { padding: 0.2em 0.5em }", ".qstats { font-weight: normal; font-size: 80%; position: absolute }", ".qlegend { font-weight: normal; padding: 0 1em; margin: 1em }", "table.info tr th {width: 50%}").__(). // to center info table script("/static/jt/jquery.jstree.js"). script().$type("text/javascript"). __("$(function() {", " $('#cs a span').addClass('ui-corner-all').css('position', 'absolute');", " $('#cs').bind('loaded.jstree', function (e, data) {", " var callback = { call:reopenQueryNodes }", " data.inst.open_node('#pq', callback);", " }).", " jstree({", " core: { animation: 188, html_titles: true },", " plugins: ['themeroller', 'html_data', 'ui'],", " themeroller: { item_open: 'ui-icon-minus',", " item_clsd: 'ui-icon-plus', item_leaf: 'ui-icon-gear'", " }", " });", " $('#cs').bind('select_node.jstree', function(e, data) {", " var queues = $('.q', data.rslt.obj);", " var q = '^' + queues.first().text();", " q += queues.length == 1 ? '$' : '\\\\.';", " $('#apps').dataTable().fnFilter(q, 4, true);", " });", " $('#cs').show();", "});").__(). __(SchedulerPageUtil.QueueBlockUtil.class); } @Override protected Class<? extends SubView> content() { return QueuesBlock.class; } @Override protected String initAppsTable() { return WebPageUtils.appsTableInit(true, false); } static String percent(float f) { return StringUtils.formatPercent(f, 1); } static String width(float f) { return StringUtils.format("width:%.1f%%", f * 100); } static String left(float f) { return StringUtils.format("left:%.1f%%", f * 100); } }
QueuesBlock
java
apache__camel
components/camel-jcache/src/test/java/org/apache/camel/component/jcache/JCacheProducerReplaceTest.java
{ "start": 1318, "end": 6598 }
class ____ extends JCacheComponentTestSupport { @Test public void testReplace() throws Exception { final Map<String, Object> headers = new HashMap<>(); final Cache<Object, Object> cache = getCacheFromEndpoint("jcache://test-cache"); final String key = randomString(); final String val = randomString(); final String val1 = randomString(); cache.put(key, val); headers.clear(); headers.put(JCacheConstants.ACTION, "REPLACE"); headers.put(JCacheConstants.KEY, key); sendBody("direct:replace", val1, headers); MockEndpoint mock = getMockEndpoint("mock:replace"); mock.expectedMinimumMessageCount(1); mock.expectedHeaderReceived(JCacheConstants.KEY, key); mock.expectedHeaderReceived(JCacheConstants.RESULT, true); mock.expectedMessagesMatches(new Predicate() { @Override public boolean matches(Exchange exchange) { assertNotNull(exchange.getIn().getBody(), "body"); return exchange.getIn().getBody().equals(val1); } }); mock.assertIsSatisfied(); assertEquals(val1, cache.get(key)); } @Test public void testReplaceIf() throws Exception { final Map<String, Object> headers = new HashMap<>(); final Cache<Object, Object> cache = getCacheFromEndpoint("jcache://test-cache"); final String key = randomString(); final String val = randomString(); final String val1 = randomString(); cache.put(key, val); headers.clear(); headers.put(JCacheConstants.ACTION, "REPLACE"); headers.put(JCacheConstants.KEY, key); headers.put(JCacheConstants.OLD_VALUE, val); sendBody("direct:replace", val1, headers); MockEndpoint mock = getMockEndpoint("mock:replace"); mock.expectedMinimumMessageCount(1); mock.expectedHeaderReceived(JCacheConstants.KEY, key); mock.expectedHeaderReceived(JCacheConstants.RESULT, true); mock.expectedMessagesMatches(new Predicate() { @Override public boolean matches(Exchange exchange) { assertNotNull(exchange.getIn().getBody(), "body"); return exchange.getIn().getBody().equals(val1); } }); mock.assertIsSatisfied(); assertEquals(val1, cache.get(key)); } @Test public void testReplaceIfFailure() throws Exception { final Map<String, Object> headers = new HashMap<>(); final Cache<Object, Object> cache = getCacheFromEndpoint("jcache://test-cache"); final String key = randomString(); final String val = randomString(); final String val1 = randomString(); cache.put(key, val); headers.clear(); headers.put(JCacheConstants.ACTION, "REPLACE"); headers.put(JCacheConstants.KEY, key); headers.put(JCacheConstants.OLD_VALUE, val1); sendBody("direct:replace", val1, headers); MockEndpoint mock = getMockEndpoint("mock:replace"); mock.expectedMinimumMessageCount(1); mock.expectedHeaderReceived(JCacheConstants.KEY, key); mock.expectedHeaderReceived(JCacheConstants.RESULT, false); mock.expectedMessagesMatches(new Predicate() { @Override public boolean matches(Exchange exchange) { assertNotNull(exchange.getIn().getBody(), "body"); return exchange.getIn().getBody().equals(val1); } }); mock.assertIsSatisfied(); assertEquals(val, cache.get(key)); } @Test public void testReplaceFail() throws Exception { final Map<String, Object> headers = new HashMap<>(); final Cache<Object, Object> cache = getCacheFromEndpoint("jcache://test-cache"); final String key = randomString(); final String val = randomString(); assertFalse(cache.containsKey(key)); headers.clear(); headers.put(JCacheConstants.ACTION, "REPLACE"); headers.put(JCacheConstants.KEY, key); sendBody("direct:replace-fail", val, headers); MockEndpoint mock = getMockEndpoint("mock:replace-fail"); mock.expectedMinimumMessageCount(1); mock.expectedHeaderReceived(JCacheConstants.KEY, key); mock.expectedHeaderReceived(JCacheConstants.RESULT, false); mock.expectedMessagesMatches(new Predicate() { @Override public boolean matches(Exchange exchange) { assertNotNull(exchange.getIn().getBody(), "body"); return exchange.getIn().getBody().equals(val); } }); mock.assertIsSatisfied(); assertFalse(cache.containsKey(key)); } @Override protected RouteBuilder createRouteBuilder() { return new RouteBuilder() { public void configure() { from("direct:replace") .to("jcache://test-cache") .to("mock:replace"); from("direct:replace-fail") .to("jcache://test-cache") .to("mock:replace-fail"); } }; } }
JCacheProducerReplaceTest
java
apache__kafka
metadata/src/test/java/org/apache/kafka/metadata/authorizer/StandardAuthorizerTest.java
{ "start": 4730, "end": 35343 }
class ____ { public static final Endpoint PLAINTEXT = new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "127.0.0.1", 9020); public static final Endpoint CONTROLLER = new Endpoint("CONTROLLER", SecurityProtocol.PLAINTEXT, "127.0.0.1", 9020); public record AuthorizerTestServerInfo(Collection<Endpoint> endpoints) implements AuthorizerServerInfo { public AuthorizerTestServerInfo { assertFalse(endpoints.isEmpty()); } @Override public ClusterResource clusterResource() { return new ClusterResource(Uuid.fromString("r7mqHQrxTNmzbKvCvWZzLQ").toString()); } @Override public int brokerId() { return 0; } @Override public Endpoint interBrokerEndpoint() { return endpoints.iterator().next(); } @Override public Collection<String> earlyStartListeners() { List<String> result = new ArrayList<>(); for (Endpoint endpoint : endpoints) { if (endpoint.listener().equals("CONTROLLER")) { result.add(endpoint.listener()); } } return result; } } private final Metrics metrics = new Metrics(); @Test public void testGetConfiguredSuperUsers() { assertEquals(Set.of(), getConfiguredSuperUsers(Map.of())); assertEquals(Set.of(), getConfiguredSuperUsers(Map.of(SUPER_USERS_CONFIG, " "))); assertEquals(Set.of("User:bob", "User:alice"), getConfiguredSuperUsers(Map.of(SUPER_USERS_CONFIG, "User:bob;User:alice "))); assertEquals(Set.of("User:bob", "User:alice"), getConfiguredSuperUsers(Map.of(SUPER_USERS_CONFIG, "; User:bob ; User:alice "))); assertEquals("expected a string in format principalType:principalName but got bob", assertThrows(IllegalArgumentException.class, () -> getConfiguredSuperUsers( Map.of(SUPER_USERS_CONFIG, "bob;:alice"))).getMessage()); } @Test public void testGetDefaultResult() { assertEquals(DENIED, getDefaultResult(Map.of())); assertEquals(ALLOWED, getDefaultResult(Map.of( ALLOW_EVERYONE_IF_NO_ACL_IS_FOUND_CONFIG, "true"))); assertEquals(DENIED, getDefaultResult(Map.of( ALLOW_EVERYONE_IF_NO_ACL_IS_FOUND_CONFIG, "false"))); } @Test public void testAllowEveryoneIfNoAclFoundConfigEnabled() throws Exception { Map<String, Object> configs = Map.of( SUPER_USERS_CONFIG, "User:alice;User:chris", ALLOW_EVERYONE_IF_NO_ACL_IS_FOUND_CONFIG, "true"); StandardAuthorizer authorizer = createAndInitializeStandardAuthorizer(configs); List<StandardAclWithId> acls = List.of( withId(new StandardAcl(TOPIC, "topic1", LITERAL, "User:Alice", WILDCARD, READ, ALLOW)) ); acls.forEach(acl -> authorizer.addAcl(acl.id(), acl.acl())); assertEquals(List.of(DENIED), authorizer.authorize( new MockAuthorizableRequestContext.Builder() .setPrincipal(new KafkaPrincipal(USER_TYPE, "Bob")) .build(), List.of(newAction(READ, TOPIC, "topic1")) )); assertEquals(List.of(ALLOWED), authorizer.authorize( new MockAuthorizableRequestContext.Builder() .setPrincipal(new KafkaPrincipal(USER_TYPE, "Bob")) .build(), List.of(newAction(READ, TOPIC, "topic2")) )); } @Test public void testAllowEveryoneIfNoAclFoundConfigDisabled() throws Exception { Map<String, Object> configs = Map.of( SUPER_USERS_CONFIG, "User:alice;User:chris", ALLOW_EVERYONE_IF_NO_ACL_IS_FOUND_CONFIG, "false"); StandardAuthorizer authorizer = createAndInitializeStandardAuthorizer(configs); List<StandardAclWithId> acls = List.of( withId(new StandardAcl(TOPIC, "topic1", LITERAL, "User:Alice", WILDCARD, READ, ALLOW)) ); acls.forEach(acl -> authorizer.addAcl(acl.id(), acl.acl())); assertEquals(List.of(DENIED), authorizer.authorize( new MockAuthorizableRequestContext.Builder() .setPrincipal(new KafkaPrincipal(USER_TYPE, "Bob")) .build(), List.of(newAction(READ, TOPIC, "topic1")) )); assertEquals(List.of(DENIED), authorizer.authorize( new MockAuthorizableRequestContext.Builder() .setPrincipal(new KafkaPrincipal(USER_TYPE, "Bob")) .build(), List.of(newAction(READ, TOPIC, "topic2")) )); } @Test public void testConfigure() { Map<String, Object> configs = Map.of( SUPER_USERS_CONFIG, "User:alice;User:chris", ALLOW_EVERYONE_IF_NO_ACL_IS_FOUND_CONFIG, "true"); StandardAuthorizer authorizer = createAndInitializeStandardAuthorizer(configs); assertEquals(Set.of("User:alice", "User:chris"), authorizer.superUsers()); assertEquals(ALLOWED, authorizer.defaultResult()); } private static Action newAction(AclOperation aclOperation, ResourceType resourceType, String resourceName) { return new Action(aclOperation, new ResourcePattern(resourceType, resourceName, LITERAL), 1, false, false); } private StandardAuthorizer createAndInitializeStandardAuthorizer() { return createAndInitializeStandardAuthorizer(Map.of(SUPER_USERS_CONFIG, "User:superman")); } private StandardAuthorizer createAndInitializeStandardAuthorizer(Map<String, Object> configs) { StandardAuthorizer authorizer = new StandardAuthorizer(); authorizer.configure(configs); authorizer.withPluginMetrics(new PluginMetricsImpl(metrics, Map.of())); authorizer.start(new AuthorizerTestServerInfo(List.of(PLAINTEXT))); authorizer.completeInitialLoad(); return authorizer; } private static StandardAcl newFooAcl(AclOperation op, AclPermissionType permission) { return new StandardAcl( TOPIC, "foo_", PREFIXED, "User:bob", WILDCARD, op, permission); } private static StandardAclWithId withId(StandardAcl acl) { return new StandardAclWithId(new Uuid(acl.hashCode(), acl.hashCode()), acl); } @Test public void testFindResultImplication() throws Exception { // These permissions all imply DESCRIBE. for (AclOperation op : List.of(DESCRIBE, READ, WRITE, DELETE, ALTER)) { assertEquals(ALLOWED, findResult(newAction(DESCRIBE, TOPIC, "foo_bar"), new MockAuthorizableRequestContext.Builder(). setPrincipal(new KafkaPrincipal(USER_TYPE, "bob")).build(), newFooAcl(op, ALLOW))); } // CREATE does not imply DESCRIBE assertNull(findResult(newAction(DESCRIBE, TOPIC, "foo_bar"), new MockAuthorizableRequestContext.Builder(). setPrincipal(new KafkaPrincipal(USER_TYPE, "bob")).build(), newFooAcl(CREATE, ALLOW))); // Deny ACLs don't do "implication". for (AclOperation op : List.of(READ, WRITE, DELETE, ALTER)) { assertNull(findResult(newAction(DESCRIBE, TOPIC, "foo_bar"), new MockAuthorizableRequestContext.Builder(). setPrincipal(new KafkaPrincipal(USER_TYPE, "bob")).build(), newFooAcl(op, DENY))); } // Exact match assertEquals(DENIED, findResult(newAction(DESCRIBE, TOPIC, "foo_bar"), new MockAuthorizableRequestContext.Builder(). setPrincipal(new KafkaPrincipal(USER_TYPE, "bob")).build(), newFooAcl(DESCRIBE, DENY))); // These permissions all imply DESCRIBE_CONFIGS. for (AclOperation op : List.of(DESCRIBE_CONFIGS, ALTER_CONFIGS)) { assertEquals(ALLOWED, findResult(newAction(DESCRIBE_CONFIGS, TOPIC, "foo_bar"), new MockAuthorizableRequestContext.Builder(). setPrincipal(new KafkaPrincipal(USER_TYPE, "bob")).build(), newFooAcl(op, ALLOW))); } // Deny ACLs don't do "implication". assertNull(findResult(newAction(DESCRIBE_CONFIGS, TOPIC, "foo_bar"), new MockAuthorizableRequestContext.Builder(). setPrincipal(new KafkaPrincipal(USER_TYPE, "bob")).build(), newFooAcl(ALTER_CONFIGS, DENY))); // Exact match assertEquals(DENIED, findResult(newAction(ALTER_CONFIGS, TOPIC, "foo_bar"), new MockAuthorizableRequestContext.Builder(). setPrincipal(new KafkaPrincipal(USER_TYPE, "bob")).build(), newFooAcl(ALTER_CONFIGS, DENY))); } private static StandardAcl newBarAcl(AclOperation op, AclPermissionType permission) { return new StandardAcl( GROUP, "bar", LITERAL, WILDCARD_PRINCIPAL, WILDCARD, op, permission); } @Test public void testFindResultPrincipalMatching() throws Exception { assertEquals(ALLOWED, findResult(newAction(READ, TOPIC, "foo_bar"), new MockAuthorizableRequestContext.Builder(). setPrincipal(new KafkaPrincipal(USER_TYPE, "bob")).build(), newFooAcl(READ, ALLOW))); // Principal does not match. assertNull(findResult(newAction(READ, TOPIC, "foo_bar"), new MockAuthorizableRequestContext.Builder(). setPrincipal(new KafkaPrincipal(USER_TYPE, "alice")).build(), newFooAcl(READ, ALLOW))); // Wildcard principal matches anything. assertEquals(DENIED, findResult(newAction(READ, GROUP, "bar"), new MockAuthorizableRequestContext.Builder(). setPrincipal(new KafkaPrincipal(USER_TYPE, "alice")).build(), newBarAcl(READ, DENY))); } private static void assertContains(Iterable<AclBinding> iterable, StandardAcl... acls) { Iterator<AclBinding> iterator = iterable.iterator(); for (int i = 0; iterator.hasNext(); i++) { AclBinding acl = iterator.next(); assertTrue(i < acls.length, "Only expected " + i + " element(s)"); assertEquals(acls[i].toBinding(), acl, "Unexpected element " + i); } assertFalse(iterator.hasNext(), "Expected only " + acls.length + " element(s)"); } @Test public void testListAcls() { StandardAuthorizer authorizer = createAndInitializeStandardAuthorizer(); List<StandardAclWithId> fooAcls = List.of( withId(newFooAcl(READ, ALLOW)), withId(newFooAcl(WRITE, ALLOW))); List<StandardAclWithId> barAcls = List.of( withId(newBarAcl(DESCRIBE_CONFIGS, DENY)), withId(newBarAcl(ALTER_CONFIGS, DENY))); fooAcls.forEach(a -> authorizer.addAcl(a.id(), a.acl())); barAcls.forEach(a -> authorizer.addAcl(a.id(), a.acl())); assertContains(authorizer.acls(AclBindingFilter.ANY), fooAcls.get(0).acl(), fooAcls.get(1).acl(), barAcls.get(0).acl(), barAcls.get(1).acl()); authorizer.removeAcl(fooAcls.get(1).id()); assertContains(authorizer.acls(AclBindingFilter.ANY), fooAcls.get(0).acl(), barAcls.get(0).acl(), barAcls.get(1).acl()); assertContains(authorizer.acls(new AclBindingFilter(new ResourcePatternFilter( TOPIC, null, PatternType.ANY), AccessControlEntryFilter.ANY)), fooAcls.get(0).acl()); } @Test public void testSimpleAuthorizations() throws Exception { StandardAuthorizer authorizer = createAndInitializeStandardAuthorizer(); List<StandardAclWithId> fooAcls = List.of( withId(newFooAcl(READ, ALLOW)), withId(newFooAcl(WRITE, ALLOW))); List<StandardAclWithId> barAcls = List.of( withId(newBarAcl(DESCRIBE_CONFIGS, ALLOW)), withId(newBarAcl(ALTER_CONFIGS, ALLOW))); fooAcls.forEach(a -> authorizer.addAcl(a.id(), a.acl())); barAcls.forEach(a -> authorizer.addAcl(a.id(), a.acl())); assertEquals(List.of(ALLOWED), authorizer.authorize(new MockAuthorizableRequestContext.Builder(). setPrincipal(new KafkaPrincipal(USER_TYPE, "bob")).build(), List.of(newAction(READ, TOPIC, "foo_")))); assertEquals(List.of(ALLOWED), authorizer.authorize(new MockAuthorizableRequestContext.Builder(). setPrincipal(new KafkaPrincipal(USER_TYPE, "fred")).build(), List.of(newAction(ALTER_CONFIGS, GROUP, "bar")))); } @Test public void testDenyPrecedenceWithOperationAll() throws Exception { StandardAuthorizer authorizer = createAndInitializeStandardAuthorizer(); List<StandardAcl> acls = List.of( new StandardAcl(TOPIC, "foo", LITERAL, "User:alice", "*", ALL, DENY), new StandardAcl(TOPIC, "foo", PREFIXED, "User:alice", "*", READ, ALLOW), new StandardAcl(TOPIC, "foo", LITERAL, "User:*", "*", ALL, DENY), new StandardAcl(TOPIC, "foo", PREFIXED, "User:*", "*", DESCRIBE, ALLOW) ); acls.forEach(acl -> { StandardAclWithId aclWithId = withId(acl); authorizer.addAcl(aclWithId.id(), aclWithId.acl()); }); assertEquals(List.of(DENIED, DENIED, DENIED, ALLOWED), authorizer.authorize( newRequestContext("alice"), List.of( newAction(WRITE, TOPIC, "foo"), newAction(READ, TOPIC, "foo"), newAction(DESCRIBE, TOPIC, "foo"), newAction(READ, TOPIC, "foobar")))); assertEquals(List.of(DENIED, DENIED, DENIED, ALLOWED, DENIED), authorizer.authorize( newRequestContext("bob"), List.of( newAction(DESCRIBE, TOPIC, "foo"), newAction(READ, TOPIC, "foo"), newAction(WRITE, TOPIC, "foo"), newAction(DESCRIBE, TOPIC, "foobaz"), newAction(READ, TOPIC, "foobaz")))); } @Test public void testTopicAclWithOperationAll() throws Exception { StandardAuthorizer authorizer = createAndInitializeStandardAuthorizer(); List<StandardAcl> acls = List.of( new StandardAcl(TOPIC, "foo", LITERAL, "User:*", "*", ALL, ALLOW), new StandardAcl(TOPIC, "bar", PREFIXED, "User:alice", "*", ALL, ALLOW), new StandardAcl(TOPIC, "baz", LITERAL, "User:bob", "*", ALL, ALLOW) ); acls.forEach(acl -> { StandardAclWithId aclWithId = withId(acl); authorizer.addAcl(aclWithId.id(), aclWithId.acl()); }); assertEquals(List.of(ALLOWED, ALLOWED, DENIED), authorizer.authorize( newRequestContext("alice"), List.of( newAction(WRITE, TOPIC, "foo"), newAction(DESCRIBE_CONFIGS, TOPIC, "bar"), newAction(DESCRIBE, TOPIC, "baz")))); assertEquals(List.of(ALLOWED, DENIED, ALLOWED), authorizer.authorize( newRequestContext("bob"), List.of( newAction(WRITE, TOPIC, "foo"), newAction(READ, TOPIC, "bar"), newAction(DESCRIBE, TOPIC, "baz")))); assertEquals(List.of(ALLOWED, DENIED, DENIED), authorizer.authorize( newRequestContext("malory"), List.of( newAction(DESCRIBE, TOPIC, "foo"), newAction(WRITE, TOPIC, "bar"), newAction(READ, TOPIC, "baz")))); } private AuthorizableRequestContext newRequestContext(String principal) throws Exception { return new MockAuthorizableRequestContext.Builder() .setPrincipal(new KafkaPrincipal(USER_TYPE, principal)) .build(); } @Test public void testHostAddressAclValidation() throws Exception { InetAddress host1 = InetAddress.getByName("192.168.1.1"); InetAddress host2 = InetAddress.getByName("192.168.1.2"); StandardAuthorizer authorizer = createAndInitializeStandardAuthorizer(); List<StandardAcl> acls = List.of( new StandardAcl(TOPIC, "foo", LITERAL, "User:alice", host1.getHostAddress(), READ, DENY), new StandardAcl(TOPIC, "foo", LITERAL, "User:alice", "*", READ, ALLOW), new StandardAcl(TOPIC, "bar", LITERAL, "User:bob", host2.getHostAddress(), READ, ALLOW), new StandardAcl(TOPIC, "bar", LITERAL, "User:*", InetAddress.getLocalHost().getHostAddress(), DESCRIBE, ALLOW) ); acls.forEach(acl -> { StandardAclWithId aclWithId = withId(acl); authorizer.addAcl(aclWithId.id(), aclWithId.acl()); }); List<Action> actions = List.of( newAction(READ, TOPIC, "foo"), newAction(READ, TOPIC, "bar"), newAction(DESCRIBE, TOPIC, "bar") ); assertEquals(List.of(ALLOWED, DENIED, ALLOWED), authorizer.authorize( newRequestContext("alice", InetAddress.getLocalHost()), actions)); assertEquals(List.of(DENIED, DENIED, DENIED), authorizer.authorize( newRequestContext("alice", host1), actions)); assertEquals(List.of(ALLOWED, DENIED, DENIED), authorizer.authorize( newRequestContext("alice", host2), actions)); assertEquals(List.of(DENIED, DENIED, ALLOWED), authorizer.authorize( newRequestContext("bob", InetAddress.getLocalHost()), actions)); assertEquals(List.of(DENIED, DENIED, DENIED), authorizer.authorize( newRequestContext("bob", host1), actions)); assertEquals(List.of(DENIED, ALLOWED, ALLOWED), authorizer.authorize( newRequestContext("bob", host2), actions)); } private AuthorizableRequestContext newRequestContext(String principal, InetAddress clientAddress) throws Exception { return new MockAuthorizableRequestContext.Builder() .setPrincipal(new KafkaPrincipal(USER_TYPE, principal)) .setClientAddress(clientAddress) .build(); } private static void addManyAcls(StandardAuthorizer authorizer) { List<StandardAcl> acls = List.of( new StandardAcl(TOPIC, "green2", LITERAL, "User:*", "*", READ, ALLOW), new StandardAcl(TOPIC, "green", PREFIXED, "User:bob", "*", READ, ALLOW), new StandardAcl(TOPIC, "betamax4", LITERAL, "User:bob", "*", READ, ALLOW), new StandardAcl(TOPIC, "betamax", LITERAL, "User:bob", "*", READ, ALLOW), new StandardAcl(TOPIC, "beta", PREFIXED, "User:*", "*", READ, ALLOW), new StandardAcl(TOPIC, "alpha", PREFIXED, "User:*", "*", READ, ALLOW), new StandardAcl(TOPIC, "alp", PREFIXED, "User:bob", "*", READ, DENY), new StandardAcl(GROUP, "*", LITERAL, "User:bob", "*", WRITE, ALLOW), new StandardAcl(GROUP, "wheel", LITERAL, "User:*", "*", WRITE, DENY) ); acls.forEach(acl -> { StandardAclWithId aclWithId = withId(acl); authorizer.addAcl(aclWithId.id(), aclWithId.acl()); }); } @Test public void testAuthorizationWithManyAcls() throws Exception { StandardAuthorizer authorizer = createAndInitializeStandardAuthorizer(); addManyAcls(authorizer); assertEquals(List.of(ALLOWED, DENIED), authorizer.authorize(new MockAuthorizableRequestContext.Builder(). setPrincipal(new KafkaPrincipal(USER_TYPE, "bob")).build(), List.of(newAction(READ, TOPIC, "green1"), newAction(WRITE, GROUP, "wheel")))); assertEquals(List.of(DENIED, ALLOWED, DENIED), authorizer.authorize(new MockAuthorizableRequestContext.Builder(). setPrincipal(new KafkaPrincipal(USER_TYPE, "bob")).build(), List.of(newAction(READ, TOPIC, "alpha"), newAction(WRITE, GROUP, "arbitrary"), newAction(READ, TOPIC, "ala")))); } @ParameterizedTest @ValueSource(booleans = {true, false}) public void testDenyAuditLogging(boolean logIfDenied) throws Exception { try (MockedStatic<LoggerFactory> mockedLoggerFactory = Mockito.mockStatic(LoggerFactory.class)) { Logger otherLog = Mockito.mock(Logger.class); Logger auditLog = Mockito.mock(Logger.class); mockedLoggerFactory .when(() -> LoggerFactory.getLogger("kafka.authorizer.logger")) .thenReturn(auditLog); mockedLoggerFactory .when(() -> LoggerFactory.getLogger(Mockito.any(Class.class))) .thenReturn(otherLog); Mockito.when(auditLog.isDebugEnabled()).thenReturn(true); Mockito.when(auditLog.isTraceEnabled()).thenReturn(true); StandardAuthorizer authorizer = createAndInitializeStandardAuthorizer(); addManyAcls(authorizer); ResourcePattern topicResource = new ResourcePattern(TOPIC, "alpha", LITERAL); Action action = new Action(READ, topicResource, 1, false, logIfDenied); MockAuthorizableRequestContext requestContext = new MockAuthorizableRequestContext.Builder() .setPrincipal(new KafkaPrincipal(USER_TYPE, "bob")) .setClientAddress(InetAddress.getByName("127.0.0.1")) .build(); assertEquals(List.of(DENIED), authorizer.authorize(requestContext, List.of(action))); String expectedAuditLog = "Principal = User:bob is Denied operation = READ " + "from host = 127.0.0.1 on resource = Topic:LITERAL:alpha for request = Fetch " + "with resourceRefCount = 1 based on rule MatchingAcl(acl=StandardAcl[resourceType=TOPIC, " + "resourceName=alp, patternType=PREFIXED, principal=User:bob, host=*, operation=READ, " + "permissionType=DENY])"; if (logIfDenied) { Mockito.verify(auditLog).info(expectedAuditLog); } else { Mockito.verify(auditLog).trace(expectedAuditLog); } } } @ParameterizedTest @ValueSource(booleans = {true, false}) public void testAllowAuditLogging(boolean logIfAllowed) throws Exception { try (MockedStatic<LoggerFactory> mockedLoggerFactory = Mockito.mockStatic(LoggerFactory.class)) { Logger otherLog = Mockito.mock(Logger.class); Logger auditLog = Mockito.mock(Logger.class); mockedLoggerFactory .when(() -> LoggerFactory.getLogger("kafka.authorizer.logger")) .thenReturn(auditLog); mockedLoggerFactory .when(() -> LoggerFactory.getLogger(Mockito.any(Class.class))) .thenReturn(otherLog); Mockito.when(auditLog.isDebugEnabled()).thenReturn(true); Mockito.when(auditLog.isTraceEnabled()).thenReturn(true); StandardAuthorizer authorizer = createAndInitializeStandardAuthorizer(); addManyAcls(authorizer); ResourcePattern topicResource = new ResourcePattern(TOPIC, "green1", LITERAL); Action action = new Action(READ, topicResource, 1, logIfAllowed, false); MockAuthorizableRequestContext requestContext = new MockAuthorizableRequestContext.Builder() .setPrincipal(new KafkaPrincipal(USER_TYPE, "bob")) .setClientAddress(InetAddress.getByName("127.0.0.1")) .build(); assertEquals(List.of(ALLOWED), authorizer.authorize(requestContext, List.of(action))); String expectedAuditLog = "Principal = User:bob is Allowed operation = READ " + "from host = 127.0.0.1 on resource = Topic:LITERAL:green1 for request = Fetch " + "with resourceRefCount = 1 based on rule MatchingAcl(acl=StandardAcl[resourceType=TOPIC, " + "resourceName=green, patternType=PREFIXED, principal=User:bob, host=*, operation=READ, " + "permissionType=ALLOW])"; if (logIfAllowed) { Mockito.verify(auditLog).debug(expectedAuditLog); } else { Mockito.verify(auditLog).trace(expectedAuditLog); } } } /** * Test that StandardAuthorizer#start returns a completed future for early start * listeners. */ @Test public void testStartWithEarlyStartListeners() { StandardAuthorizer authorizer = new StandardAuthorizer(); authorizer.configure(Map.of(SUPER_USERS_CONFIG, "User:superman")); Map<Endpoint, ? extends CompletionStage<Void>> futures2 = authorizer. start(new AuthorizerTestServerInfo(List.of(PLAINTEXT, CONTROLLER))); assertEquals(Set.of(PLAINTEXT, CONTROLLER), futures2.keySet()); assertFalse(futures2.get(PLAINTEXT).toCompletableFuture().isDone()); assertTrue(futures2.get(CONTROLLER).toCompletableFuture().isDone()); } /** * Test attempts to authorize prior to completeInitialLoad. During this time, only * superusers can be authorized. Other users will get an AuthorizerNotReadyException * exception. Not even an authorization result, just an exception thrown for the whole * batch. */ @Test public void testAuthorizationPriorToCompleteInitialLoad() throws Exception { StandardAuthorizer authorizer = new StandardAuthorizer(); authorizer.configure(Map.of(SUPER_USERS_CONFIG, "User:superman")); authorizer.withPluginMetrics(new PluginMetricsImpl(new Metrics(), Map.of())); assertThrows(AuthorizerNotReadyException.class, () -> authorizer.authorize(new MockAuthorizableRequestContext.Builder(). setPrincipal(new KafkaPrincipal(USER_TYPE, "bob")).build(), List.of(newAction(READ, TOPIC, "green1"), newAction(READ, TOPIC, "green2")))); assertEquals(List.of(ALLOWED, ALLOWED), authorizer.authorize(new MockAuthorizableRequestContext.Builder(). setPrincipal(new KafkaPrincipal(USER_TYPE, "superman")).build(), List.of(newAction(READ, TOPIC, "green1"), newAction(WRITE, GROUP, "wheel")))); } @Test public void testCompleteInitialLoad() { StandardAuthorizer authorizer = new StandardAuthorizer(); authorizer.configure(Map.of(SUPER_USERS_CONFIG, "User:superman")); Map<Endpoint, ? extends CompletionStage<Void>> futures = authorizer. start(new AuthorizerTestServerInfo(Set.of(PLAINTEXT))); assertEquals(Set.of(PLAINTEXT), futures.keySet()); assertFalse(futures.get(PLAINTEXT).toCompletableFuture().isDone()); authorizer.completeInitialLoad(); assertTrue(futures.get(PLAINTEXT).toCompletableFuture().isDone()); assertFalse(futures.get(PLAINTEXT).toCompletableFuture().isCompletedExceptionally()); } @Test public void testCompleteInitialLoadWithException() { StandardAuthorizer authorizer = new StandardAuthorizer(); authorizer.configure(Map.of(SUPER_USERS_CONFIG, "User:superman")); Map<Endpoint, ? extends CompletionStage<Void>> futures = authorizer. start(new AuthorizerTestServerInfo(List.of(PLAINTEXT, CONTROLLER))); assertEquals(Set.of(PLAINTEXT, CONTROLLER), futures.keySet()); assertFalse(futures.get(PLAINTEXT).toCompletableFuture().isDone()); assertTrue(futures.get(CONTROLLER).toCompletableFuture().isDone()); authorizer.completeInitialLoad(new TimeoutException("timed out")); assertTrue(futures.get(PLAINTEXT).toCompletableFuture().isDone()); assertTrue(futures.get(PLAINTEXT).toCompletableFuture().isCompletedExceptionally()); assertTrue(futures.get(CONTROLLER).toCompletableFuture().isDone()); assertFalse(futures.get(CONTROLLER).toCompletableFuture().isCompletedExceptionally()); } @Test public void testPrefixAcls() throws Exception { StandardAuthorizer authorizer = createAndInitializeStandardAuthorizer(); List<StandardAcl> acls = List.of( new StandardAcl(TOPIC, "fooa", PREFIXED, "User:alice", "*", ALL, ALLOW), new StandardAcl(TOPIC, "foobar", LITERAL, "User:bob", "*", ALL, ALLOW), new StandardAcl(TOPIC, "f", PREFIXED, "User:bob", "*", ALL, ALLOW) ); acls.forEach(acl -> { StandardAclWithId aclWithId = withId(acl); authorizer.addAcl(aclWithId.id(), aclWithId.acl()); }); assertEquals(List.of(ALLOWED, DENIED, ALLOWED), authorizer.authorize( newRequestContext("bob"), List.of( newAction(WRITE, TOPIC, "foobarr"), newAction(READ, TOPIC, "goobar"), newAction(READ, TOPIC, "fooa")))); assertEquals(List.of(ALLOWED, DENIED, DENIED), authorizer.authorize( newRequestContext("alice"), List.of( newAction(DESCRIBE, TOPIC, "fooa"), newAction(WRITE, TOPIC, "bar"), newAction(READ, TOPIC, "baz")))); } @Test public void testAuthorizerMetrics() throws Exception { // There's always 1 metrics by default, the metrics count assertEquals(1, metrics.metrics().size()); StandardAuthorizer authorizer = createAndInitializeStandardAuthorizer(); assertEquals(List.of(ALLOWED), authorizer.authorize( new MockAuthorizableRequestContext.Builder().setPrincipal(new KafkaPrincipal(USER_TYPE, "superman")).build(), List.of(newAction(READ, TOPIC, "green")))); // StandardAuthorizer has 4 metrics assertEquals(5, metrics.metrics().size()); } }
StandardAuthorizerTest
java
apache__hadoop
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
{ "start": 3885, "end": 43145 }
class ____ { private static final String A_CUSTOM_RESOURCE = "a-custom-resource"; final static String TEST_DIR = new File(System.getProperty("test.build.data", "/tmp")).getAbsolutePath(); final static String ALLOC_FILE = new File(TEST_DIR, "test-queues").getAbsolutePath(); private static final String TEST_FAIRSCHED_XML = "test-fair-scheduler.xml"; private FairScheduler scheduler; private Configuration conf; @BeforeEach public void setup() { SystemClock clock = SystemClock.getInstance(); PlacementManager placementManager = new PlacementManager(); FairSchedulerConfiguration fsConf = new FairSchedulerConfiguration(); RMContext rmContext = mock(RMContext.class); when(rmContext.getQueuePlacementManager()).thenReturn(placementManager); scheduler = mock(FairScheduler.class); conf = new YarnConfiguration(); when(scheduler.getClock()).thenReturn(clock); when(scheduler.getConf()).thenReturn(fsConf); when(scheduler.getConfig()).thenReturn(conf); when(scheduler.getRMContext()).thenReturn(rmContext); } @AfterEach public void teardown() { new File(ALLOC_FILE).delete(); } @Test public void testGetAllocationFileFromFileSystem() throws IOException, URISyntaxException { File baseDir = new File(TEST_DIR + Path.SEPARATOR + "getAllocHDFS").getAbsoluteFile(); FileUtil.fullyDelete(baseDir); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath()); MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf); MiniDFSCluster hdfsCluster = builder.build(); String fsAllocPath = "hdfs://localhost:" + hdfsCluster.getNameNodePort() + Path.SEPARATOR + TEST_FAIRSCHED_XML; URL fschedURL = Thread.currentThread().getContextClassLoader() .getResource(TEST_FAIRSCHED_XML); FileSystem fs = FileSystem.get(conf); fs.copyFromLocalFile(new Path(fschedURL.toURI()), new Path(fsAllocPath)); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, fsAllocPath); AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(scheduler); Path allocationFile = allocLoader.getAllocationFile(conf); assertEquals(fsAllocPath, allocationFile.toString()); assertTrue(fs.exists(allocationFile)); hdfsCluster.shutdown(true); } @Test public void testDenyGetAllocationFileFromUnsupportedFileSystem() throws UnsupportedFileSystemException { assertThrows(UnsupportedFileSystemException.class, () -> { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, "badfs:///badfile"); AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(scheduler); allocLoader.getAllocationFile(conf); }); } @Test public void testGetAllocationFileFromClasspath() { try { FileSystem fs = FileSystem.get(conf); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, TEST_FAIRSCHED_XML); AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(scheduler); Path allocationFile = allocLoader.getAllocationFile(conf); assertEquals(TEST_FAIRSCHED_XML, allocationFile.getName()); assertTrue(fs.exists(allocationFile)); } catch (IOException e) { fail("Unable to access allocation file from classpath: " + e); } } @Test @Timeout(value = 10) public void testReload() throws Exception { AllocationFileWriter.create() .addQueue(new AllocationFileQueue.Builder("queueA") .maxRunningApps(1).build()) .addQueue(new AllocationFileQueue.Builder("queueB").build()) .queuePlacementPolicy(new AllocationFileQueuePlacementPolicy() .addRule(new AllocationFileQueuePlacementRule( AllocationFileQueuePlacementRule.RuleName.DEFAULT))) .writeToFile(ALLOC_FILE); ControlledClock clock = new ControlledClock(); clock.setTime(0); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); AllocationFileLoaderService allocLoader = new AllocationFileLoaderService( clock, scheduler); allocLoader.reloadIntervalMs = 5; allocLoader.init(conf); ReloadListener confHolder = new ReloadListener(); allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); AllocationConfiguration allocConf = confHolder.allocConf; // Verify conf List<PlacementRule> rules = scheduler.getRMContext() .getQueuePlacementManager().getPlacementRules(); assertEquals(1, rules.size()); assertEquals(DefaultPlacementRule.class, rules.get(0).getClass()); assertEquals(1, allocConf.getQueueMaxApps("root.queueA")); assertEquals(2, allocConf.getConfiguredQueues().get(FSQueueType.LEAF) .size()); assertTrue(allocConf.getConfiguredQueues().get(FSQueueType.LEAF) .contains("root.queueA")); assertTrue(allocConf.getConfiguredQueues().get(FSQueueType.LEAF) .contains("root.queueB")); // reset the conf so we can detect the reload confHolder.allocConf = null; // Modify file and advance the clock AllocationFileWriter.create() .addQueue(new AllocationFileQueue.Builder("queueB") .maxRunningApps(3).build()) .queuePlacementPolicy(new AllocationFileQueuePlacementPolicy() .addRule(new AllocationFileQueuePlacementRule( AllocationFileQueuePlacementRule.RuleName.SPECIFIED)) .addRule(new AllocationFileQueuePlacementRule( AllocationFileQueuePlacementRule.RuleName.NESTED) .addNestedRule(new AllocationFileQueuePlacementRule( AllocationFileQueuePlacementRule.RuleName.PRIMARY_GROUP)))) .writeToFile(ALLOC_FILE); clock.tickMsec(System.currentTimeMillis() + AllocationFileLoaderService.ALLOC_RELOAD_WAIT_MS + 10000); allocLoader.start(); while (confHolder.allocConf == null) { Thread.sleep(20); } // Verify conf allocConf = confHolder.allocConf; rules = scheduler.getRMContext().getQueuePlacementManager() .getPlacementRules(); assertEquals(2, rules.size()); assertEquals(SpecifiedPlacementRule.class, rules.get(0).getClass()); assertEquals(UserPlacementRule.class, rules.get(1).getClass()); assertEquals(PrimaryGroupPlacementRule.class, ((FSPlacementRule)(rules.get(1))).getParentRule().getClass()); assertEquals(3, allocConf.getQueueMaxApps("root.queueB")); assertEquals(1, allocConf.getConfiguredQueues().get(FSQueueType.LEAF) .size()); assertTrue(allocConf.getConfiguredQueues().get(FSQueueType.LEAF) .contains("root.queueB")); } @Test public void testAllocationFileParsing() throws Exception { CustomResourceTypesConfigurationProvider. initResourceTypes(A_CUSTOM_RESOURCE); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(scheduler); AllocationFileWriter.create() // Give queue A a minimum of 1024 M .addQueue(new AllocationFileQueue.Builder("queueA") .minResources("1024mb,0vcores") .maxResources("2048mb,10vcores") .build()) // Give queue B a minimum of 2048 M .addQueue(new AllocationFileQueue.Builder("queueB") .minResources("2048mb,0vcores") .maxResources("5120mb,110vcores") .aclAdministerApps("alice,bob admins") .schedulingPolicy("fair") .build()) // Give queue C no minimum .addQueue(new AllocationFileQueue.Builder("queueC") .minResources("5120mb,0vcores") .aclSubmitApps("alice,bob admins") .build()) // Give queue D a limit of 3 running apps and 0.4f maxAMShare .addQueue(new AllocationFileQueue.Builder("queueD") .maxRunningApps(3) .maxAMShare(0.4) .build()) // Give queue E a preemption timeout of one minute .addQueue(new AllocationFileQueue.Builder("queueE") .minSharePreemptionTimeout(60) .build()) // Make queue F a parent queue without configured leaf queues // using the 'type' attribute .addQueue(new AllocationFileQueue.Builder("queueF") .parent(true) .maxChildResources("2048mb,64vcores") .build()) .addQueue(new AllocationFileQueue.Builder("queueG") .maxChildResources("2048mb,64vcores") .fairSharePreemptionTimeout(120) .minSharePreemptionTimeout(50) .fairSharePreemptionThreshold(0.6) .maxContainerAllocation( "vcores=16, memory-mb=512, " + A_CUSTOM_RESOURCE + "=10") // Create hierarchical queues G,H, with different min/fair // share preemption timeouts and preemption thresholds. // Also add a child default to make sure it doesn't impact queue H. .subQueue(new AllocationFileQueue.Builder("queueH") .fairSharePreemptionTimeout(180) .minSharePreemptionTimeout(40) .fairSharePreemptionThreshold(0.7) .maxContainerAllocation("1024mb,8vcores") .build()) .build()) // Set default limit of apps per queue to 15 .queueMaxAppsDefault(15) // Set default limit of max resource per queue to 4G and 100 cores .queueMaxResourcesDefault("4096mb,100vcores") // Set default limit of apps per user to 5 .userMaxAppsDefault(5) // Set default limit of AMResourceShare to 0.5f .queueMaxAMShareDefault(0.5) // Set default min share preemption timeout to 2 minutes .defaultMinSharePreemptionTimeout(120) // Set default fair share preemption timeout to 5 minutes .defaultFairSharePreemptionTimeout(300) // Set default fair share preemption threshold to 0.4 .defaultFairSharePreemptionThreshold(0.4) // Set default scheduling policy to DRF .drfDefaultQueueSchedulingPolicy() // Give user1 a limit of 10 jobs .userSettings(new UserSettings.Builder("user1") .maxRunningApps(10) .build()) .writeToFile(ALLOC_FILE); allocLoader.init(conf); ReloadListener confHolder = new ReloadListener(); allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); AllocationConfiguration queueConf = confHolder.allocConf; assertEquals(6, queueConf.getConfiguredQueues().get(FSQueueType.LEAF).size()); assertEquals(Resources.createResource(0), queueConf.getMinResources("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); assertEquals(Resources.createResource(2048, 10), queueConf.getMaxResources("root.queueA").getResource()); assertEquals(Resources.createResource(5120, 110), queueConf.getMaxResources("root.queueB").getResource()); assertEquals(Resources.createResource(4096, 100), queueConf.getMaxResources("root.queueC").getResource()); assertEquals(Resources.createResource(4096, 100), queueConf.getMaxResources("root.queueD").getResource()); assertEquals(Resources.createResource(4096, 100), queueConf.getMaxResources("root.queueE").getResource()); assertEquals(Resources.createResource(4096, 100), queueConf.getMaxResources("root.queueF").getResource()); assertEquals(Resources.createResource(4096, 100), queueConf.getMaxResources("root.queueG").getResource()); assertEquals(Resources.createResource(4096, 100), queueConf.getMaxResources("root.queueG.queueH").getResource()); assertEquals(Resources.createResource(1024, 0), queueConf.getMinResources("root.queueA")); assertEquals(Resources.createResource(2048, 0), queueConf.getMinResources("root.queueB")); assertEquals(Resources.createResource(5120, 0), queueConf.getMinResources("root.queueC")); assertEquals(Resources.createResource(0), queueConf.getMinResources("root.queueD")); assertEquals(Resources.createResource(0), queueConf.getMinResources("root.queueE")); assertEquals(Resources.createResource(0), queueConf.getMinResources("root.queueF")); assertEquals(Resources.createResource(0), queueConf.getMinResources("root.queueG")); assertEquals(Resources.createResource(0), queueConf.getMinResources("root.queueG.queueH")); assertNull(queueConf.getMaxChildResources("root.queueA"), "Max child resources unexpectedly set for queue root.queueA"); assertNull(queueConf.getMaxChildResources("root.queueB"), "Max child resources unexpectedly set for queue root.queueB"); assertNull(queueConf.getMaxChildResources("root.queueC"), "Max child resources unexpectedly set for queue root.queueC"); assertNull(queueConf.getMaxChildResources("root.queueD"), "Max child resources unexpectedly set for queue root.queueD"); assertNull(queueConf.getMaxChildResources("root.queueE"), "Max child resources unexpectedly set for queue root.queueE"); assertEquals(Resources.createResource(2048, 64), queueConf.getMaxChildResources("root.queueF").getResource()); assertEquals(Resources.createResource(2048, 64), queueConf.getMaxChildResources("root.queueG").getResource()); assertNull(queueConf.getMaxChildResources("root.queueG.queueH"), "Max child resources unexpectedly set for " + "queue root.queueG.queueH"); assertEquals(15, queueConf.getQueueMaxApps("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); assertEquals(15, queueConf.getQueueMaxApps("root.queueA")); assertEquals(15, queueConf.getQueueMaxApps("root.queueB")); assertEquals(15, queueConf.getQueueMaxApps("root.queueC")); assertEquals(3, queueConf.getQueueMaxApps("root.queueD")); assertEquals(15, queueConf.getQueueMaxApps("root.queueE")); assertEquals(10, queueConf.getUserMaxApps("user1")); assertEquals(5, queueConf.getUserMaxApps("user2")); assertEquals(.5f, queueConf.getQueueMaxAMShare("root." + YarnConfiguration.DEFAULT_QUEUE_NAME), 0.01); assertEquals(.5f, queueConf.getQueueMaxAMShare("root.queueA"), 0.01); assertEquals(.5f, queueConf.getQueueMaxAMShare("root.queueB"), 0.01); assertEquals(.5f, queueConf.getQueueMaxAMShare("root.queueC"), 0.01); assertEquals(.4f, queueConf.getQueueMaxAMShare("root.queueD"), 0.01); assertEquals(.5f, queueConf.getQueueMaxAMShare("root.queueE"), 0.01); Resource expectedResourceWithCustomType = Resources.createResource(512, 16); expectedResourceWithCustomType.setResourceValue(A_CUSTOM_RESOURCE, 10); assertEquals(Resources.unbounded(), queueConf.getQueueMaxContainerAllocation( "root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); assertEquals(Resources.unbounded(), queueConf.getQueueMaxContainerAllocation("root.queueA")); assertEquals(Resources.unbounded(), queueConf.getQueueMaxContainerAllocation("root.queueB")); assertEquals(Resources.unbounded(), queueConf.getQueueMaxContainerAllocation("root.queueC")); assertEquals(Resources.unbounded(), queueConf.getQueueMaxContainerAllocation("root.queueD")); assertEquals(Resources.unbounded(), queueConf.getQueueMaxContainerAllocation("root.queueE")); assertEquals(Resources.unbounded(), queueConf.getQueueMaxContainerAllocation("root.queueF")); assertEquals(expectedResourceWithCustomType, queueConf.getQueueMaxContainerAllocation("root.queueG")); assertEquals(Resources.createResource(1024, 8), queueConf.getQueueMaxContainerAllocation("root.queueG.queueH")); assertEquals(120000, queueConf.getMinSharePreemptionTimeout("root")); assertEquals(-1, queueConf.getMinSharePreemptionTimeout("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); assertEquals(-1, queueConf.getMinSharePreemptionTimeout("root.queueA")); assertEquals(-1, queueConf.getMinSharePreemptionTimeout("root.queueB")); assertEquals(-1, queueConf.getMinSharePreemptionTimeout("root.queueC")); assertEquals(-1, queueConf.getMinSharePreemptionTimeout("root.queueD")); assertEquals(60000, queueConf.getMinSharePreemptionTimeout("root.queueE")); assertEquals(-1, queueConf.getMinSharePreemptionTimeout("root.queueF")); assertEquals(50000, queueConf.getMinSharePreemptionTimeout("root.queueG")); assertEquals(40000, queueConf.getMinSharePreemptionTimeout("root.queueG.queueH")); assertEquals(300000, queueConf.getFairSharePreemptionTimeout("root")); assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root.queueA")); assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root.queueB")); assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root.queueC")); assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root.queueD")); assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root.queueE")); assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root.queueF")); assertEquals(120000, queueConf.getFairSharePreemptionTimeout("root.queueG")); assertEquals(180000, queueConf.getFairSharePreemptionTimeout("root.queueG.queueH")); assertEquals(.4f, queueConf.getFairSharePreemptionThreshold("root"), 0.01); assertEquals(-1, queueConf.getFairSharePreemptionThreshold("root." + YarnConfiguration.DEFAULT_QUEUE_NAME), 0.01); assertEquals(-1, queueConf.getFairSharePreemptionThreshold("root.queueA"), 0.01); assertEquals(-1, queueConf.getFairSharePreemptionThreshold("root.queueB"), 0.01); assertEquals(-1, queueConf.getFairSharePreemptionThreshold("root.queueC"), 0.01); assertEquals(-1, queueConf.getFairSharePreemptionThreshold("root.queueD"), 0.01); assertEquals(-1, queueConf.getFairSharePreemptionThreshold("root.queueE"), 0.01); assertEquals(-1, queueConf.getFairSharePreemptionThreshold("root.queueF"), 0.01); assertEquals(.6f, queueConf.getFairSharePreemptionThreshold("root.queueG"), 0.01); assertEquals(.7f, queueConf.getFairSharePreemptionThreshold("root.queueG.queueH"), 0.01); assertTrue(queueConf.getConfiguredQueues() .get(FSQueueType.PARENT) .contains("root.queueF")); assertTrue(queueConf.getConfiguredQueues().get(FSQueueType.PARENT) .contains("root.queueG")); assertTrue(queueConf.getConfiguredQueues().get(FSQueueType.LEAF) .contains("root.queueG.queueH")); // Verify existing queues have default scheduling policy assertEquals(DominantResourceFairnessPolicy.NAME, queueConf.getSchedulingPolicy("root").getName()); assertEquals(DominantResourceFairnessPolicy.NAME, queueConf.getSchedulingPolicy("root.queueA").getName()); // Verify default is overriden if specified explicitly assertEquals(FairSharePolicy.NAME, queueConf.getSchedulingPolicy("root.queueB").getName()); // Verify new queue gets default scheduling policy assertEquals(DominantResourceFairnessPolicy.NAME, queueConf.getSchedulingPolicy("root.newqueue").getName()); } @Test public void testBackwardsCompatibleAllocationFileParsing() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(scheduler); AllocationFileWriter.create() .useLegacyTagNameForQueues() // Give queue A a minimum of 1024 M .addQueue(new AllocationFileQueue.Builder("queueA") .minResources("1024mb,0vcores") .build()) // Give queue B a minimum of 2048 M .addQueue(new AllocationFileQueue.Builder("queueB") .minResources("2048mb,0vcores") .aclAdministerApps("alice,bob admins") .build()) // Give queue C no minimum .addQueue(new AllocationFileQueue.Builder("queueC") .aclAdministerApps("alice,bob admins") .build()) // Give queue D a limit of 3 running apps .addQueue(new AllocationFileQueue.Builder("queueD") .maxRunningApps(3) .build()) // Give queue E a preemption timeout of one minute and 0.3f threshold .addQueue(new AllocationFileQueue.Builder("queueE") .minSharePreemptionTimeout(60) .fairSharePreemptionThreshold(0.3) .build()) // Set default limit of apps per queue to 15 .queueMaxAppsDefault(15) // Set default limit of apps per user to 5 .userMaxAppsDefault(5) // Set default limit of max resource per queue to 4G and 100 cores .queueMaxResourcesDefault("4096mb,100vcores") // Set default limit of AMResourceShare to 0.5f .queueMaxAMShareDefault(0.5) // Set default min share preemption timeout to 2 minutes .defaultMinSharePreemptionTimeout(120) // Set default fair share preemption timeout to 5 minutes .defaultFairSharePreemptionTimeout(300) // Set default fair share preemption threshold to 0.6 .defaultFairSharePreemptionThreshold(0.6) // Set default scheduling policy to DRF .drfDefaultQueueSchedulingPolicy() // Give user1 a limit of 10 jobs .userSettings(new UserSettings.Builder("user1") .maxRunningApps(10) .build()) .writeToFile(ALLOC_FILE); allocLoader.init(conf); ReloadListener confHolder = new ReloadListener(); allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); AllocationConfiguration queueConf = confHolder.allocConf; assertEquals(5, queueConf.getConfiguredQueues().get(FSQueueType.LEAF).size()); assertEquals(Resources.createResource(0), queueConf.getMinResources("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); assertEquals(Resources.createResource(0), queueConf.getMinResources("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); assertEquals(Resources.createResource(1024, 0), queueConf.getMinResources("root.queueA")); assertEquals(Resources.createResource(2048, 0), queueConf.getMinResources("root.queueB")); assertEquals(Resources.createResource(0), queueConf.getMinResources("root.queueC")); assertEquals(Resources.createResource(0), queueConf.getMinResources("root.queueD")); assertEquals(Resources.createResource(0), queueConf.getMinResources("root.queueE")); assertEquals(15, queueConf.getQueueMaxApps("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); assertEquals(15, queueConf.getQueueMaxApps("root.queueA")); assertEquals(15, queueConf.getQueueMaxApps("root.queueB")); assertEquals(15, queueConf.getQueueMaxApps("root.queueC")); assertEquals(3, queueConf.getQueueMaxApps("root.queueD")); assertEquals(15, queueConf.getQueueMaxApps("root.queueE")); assertEquals(10, queueConf.getUserMaxApps("user1")); assertEquals(5, queueConf.getUserMaxApps("user2")); assertEquals(120000, queueConf.getMinSharePreemptionTimeout("root")); assertEquals(-1, queueConf.getMinSharePreemptionTimeout("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); assertEquals(-1, queueConf.getMinSharePreemptionTimeout("root.queueA")); assertEquals(-1, queueConf.getMinSharePreemptionTimeout("root.queueB")); assertEquals(-1, queueConf.getMinSharePreemptionTimeout("root.queueC")); assertEquals(-1, queueConf.getMinSharePreemptionTimeout("root.queueD")); assertEquals(60000, queueConf.getMinSharePreemptionTimeout("root.queueE")); assertEquals(300000, queueConf.getFairSharePreemptionTimeout("root")); assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root.queueA")); assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root.queueB")); assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root.queueC")); assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root.queueD")); assertEquals(-1, queueConf.getFairSharePreemptionTimeout("root.queueE")); assertEquals(.6f, queueConf.getFairSharePreemptionThreshold("root"), 0.01); assertEquals(-1, queueConf.getFairSharePreemptionThreshold("root." + YarnConfiguration.DEFAULT_QUEUE_NAME), 0.01); assertEquals(-1, queueConf.getFairSharePreemptionThreshold("root.queueA"), 0.01); assertEquals(-1, queueConf.getFairSharePreemptionThreshold("root.queueB"), 0.01); assertEquals(-1, queueConf.getFairSharePreemptionThreshold("root.queueC"), 0.01); assertEquals(-1, queueConf.getFairSharePreemptionThreshold("root.queueD"), 0.01); assertEquals(.3f, queueConf.getFairSharePreemptionThreshold("root.queueE"), 0.01); } @Test public void testSimplePlacementPolicyFromConf() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); conf.setBoolean(FairSchedulerConfiguration.ALLOW_UNDECLARED_POOLS, false); conf.setBoolean(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE, false); AllocationFileWriter.create().writeToFile(ALLOC_FILE); AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(scheduler); allocLoader.init(conf); ReloadListener confHolder = new ReloadListener(); allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); List<PlacementRule> rules = scheduler.getRMContext() .getQueuePlacementManager().getPlacementRules(); assertEquals(2, rules.size()); assertEquals(SpecifiedPlacementRule.class, rules.get(0).getClass()); assertFalse(((FSPlacementRule)rules.get(0)).getCreateFlag(), "Create flag was not set to false"); assertEquals(DefaultPlacementRule.class, rules.get(1).getClass()); } /** * Verify that you can't place queues at the same level as the root queue in * the allocations file. */ @Test public void testQueueAlongsideRoot() throws Exception { assertThrows(AllocationConfigurationException.class, () -> { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); AllocationFileWriter.create() .addQueue(new AllocationFileQueue.Builder("root").build()) .addQueue(new AllocationFileQueue.Builder("other").build()) .writeToFile(ALLOC_FILE); AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(scheduler); allocLoader.init(conf); ReloadListener confHolder = new ReloadListener(); allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); }); } /** * Verify that you can't include periods as the queue name in the allocations * file. */ @Test public void testQueueNameContainingPeriods() throws Exception { assertThrows(AllocationConfigurationException.class, () -> { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); AllocationFileWriter.create() .addQueue(new AllocationFileQueue.Builder("parent1.child").build()) .writeToFile(ALLOC_FILE); AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(scheduler); allocLoader.init(conf); ReloadListener confHolder = new ReloadListener(); allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); }); } /** * Verify that you can't have the queue name with whitespace only in the * allocations file. */ @Test public void testQueueNameContainingOnlyWhitespace() throws Exception { assertThrows(AllocationConfigurationException.class, () -> { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); AllocationFileWriter.create() .addQueue(new AllocationFileQueue.Builder(" ").build()) .writeToFile(ALLOC_FILE); AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(scheduler); allocLoader.init(conf); ReloadListener confHolder = new ReloadListener(); allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); }); } @Test public void testParentTagWithReservation() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); AllocationFileWriter.create() .addQueue(new AllocationFileQueue.Builder("parent") .parent(true) .reservation() .build()) .writeToFile(ALLOC_FILE); AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(scheduler); allocLoader.init(conf); ReloadListener confHolder = new ReloadListener(); allocLoader.setReloadListener(confHolder); try { allocLoader.reloadAllocations(); } catch (AllocationConfigurationException ex) { assertEquals(ex.getMessage(), "The configuration settings for root.parent" + " are invalid. A queue element that contains child queue elements" + " or that has the type='parent' attribute cannot also include a" + " reservation element."); } } @Test public void testParentWithReservation() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); AllocationFileWriter.create() .addQueue(new AllocationFileQueue.Builder("parent") .parent(true) .subQueue(new AllocationFileQueue.Builder("child").build()) .reservation() .build()) .writeToFile(ALLOC_FILE); AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(scheduler); allocLoader.init(conf); ReloadListener confHolder = new ReloadListener(); allocLoader.setReloadListener(confHolder); try { allocLoader.reloadAllocations(); } catch (AllocationConfigurationException ex) { assertEquals(ex.getMessage(), "The configuration settings for root.parent" + " are invalid. A queue element that contains child queue elements" + " or that has the type='parent' attribute cannot also include a" + " reservation element."); } } /** * Verify that a parent queue (type = parent) cannot have a maxAMShare element * as dynamic queues won't be able to inherit this setting. */ @Test public void testParentTagWithMaxAMShare() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); AllocationFileWriter.create() .addQueue(new AllocationFileQueue.Builder("parent") .parent(true) .maxAMShare(0.75) .build()) .writeToFile(ALLOC_FILE); AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(scheduler); allocLoader.init(conf); ReloadListener confHolder = new ReloadListener(); allocLoader.setReloadListener(confHolder); try { allocLoader.reloadAllocations(); fail("Expect allocation parsing to fail as maxAMShare cannot be set for" + " a parent queue."); } catch (AllocationConfigurationException ex) { assertEquals(ex.getMessage(), "The configuration settings for root.parent" + " are invalid. A queue element that contains child queue elements" + " or that has the type='parent' attribute cannot also include a" + " maxAMShare element."); } } /** * Verify that a parent queue that is not explicitly tagged with "type" * as "parent" but has a child queue (implicit parent) cannot have a * maxAMShare element. */ @Test public void testParentWithMaxAMShare() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); AllocationFileWriter.create() .addQueue(new AllocationFileQueue.Builder("parent") .parent(false) .maxAMShare(0.76) .subQueue(new AllocationFileQueue.Builder("child").build()) .build()) .writeToFile(ALLOC_FILE); AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(scheduler); allocLoader.init(conf); ReloadListener confHolder = new ReloadListener(); allocLoader.setReloadListener(confHolder); try { allocLoader.reloadAllocations(); fail("Expect allocation parsing to fail as maxAMShare cannot be set for" + " a parent queue."); } catch (AllocationConfigurationException ex) { assertEquals(ex.getMessage(), "The configuration settings for root.parent" + " are invalid. A queue element that contains child queue elements" + " or that has the type='parent' attribute cannot also include a" + " maxAMShare element."); } } @Test public void testParentTagWithChild() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); AllocationFileWriter.create() .addQueue(new AllocationFileQueue.Builder("parent") .parent(true) .subQueue(new AllocationFileQueue.Builder("child").build()) .build()) .writeToFile(ALLOC_FILE); AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(scheduler); allocLoader.init(conf); ReloadListener confHolder = new ReloadListener(); allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); AllocationConfiguration queueConf = confHolder.allocConf; // Check whether queue 'parent' and 'child' are loaded successfully assertTrue(queueConf.getConfiguredQueues().get(FSQueueType.PARENT) .contains("root.parent")); assertTrue(queueConf.getConfiguredQueues().get(FSQueueType.LEAF) .contains("root.parent.child")); } /** * Verify that you can't have the queue name with just a non breaking * whitespace in the allocations file. */ @Test public void testQueueNameContainingNBWhitespace() throws Exception { assertThrows(AllocationConfigurationException.class, () -> { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); AllocationFileWriter.create() .addQueue(new AllocationFileQueue.Builder("\u00a0").build()) .writeToFile(ALLOC_FILE); AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(scheduler); allocLoader.init(conf); ReloadListener confHolder = new ReloadListener(); allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); }); } /** * Verify that defaultQueueSchedulingMode can't accept FIFO as a value. */ @Test public void testDefaultQueueSchedulingModeIsFIFO() throws Exception { assertThrows(AllocationConfigurationException.class, () -> { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); AllocationFileWriter.create() .fifoDefaultQueueSchedulingPolicy() .writeToFile(ALLOC_FILE); AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(scheduler); allocLoader.init(conf); ReloadListener confHolder = new ReloadListener(); allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); }); } @Test public void testReservableQueue() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); AllocationFileWriter.create() .addQueue(new AllocationFileQueue.Builder("reservable") .reservation() .build()) .addQueue(new AllocationFileQueue.Builder("other").build()) .reservationAgent("DummyAgentName") .reservationPolicy("AnyAdmissionPolicy") .writeToFile(ALLOC_FILE); AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(scheduler); allocLoader.init(conf); ReloadListener confHolder = new ReloadListener(); allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); AllocationConfiguration allocConf = confHolder.allocConf; String reservableQueueName = "root.reservable"; String nonreservableQueueName = "root.other"; QueuePath nonreservableQueuePath = new QueuePath(nonreservableQueueName); QueuePath reservableQueuePath = new QueuePath(reservableQueueName); assertFalse(allocConf.isReservable(nonreservableQueuePath)); assertTrue(allocConf.isReservable(reservableQueuePath)); Map<FSQueueType, Set<String>> configuredQueues = allocConf.getConfiguredQueues(); assertTrue(configuredQueues.get(FSQueueType.PARENT).contains(reservableQueueName), "reservable queue is expected be to a parent queue"); assertFalse(configuredQueues.get(FSQueueType.LEAF) .contains(reservableQueueName), "reservable queue should not be a leaf queue"); assertTrue(allocConf.getMoveOnExpiry(reservableQueuePath)); assertEquals(ReservationSchedulerConfiguration.DEFAULT_RESERVATION_WINDOW, allocConf.getReservationWindow(reservableQueuePath)); assertEquals(100, allocConf.getInstantaneousMaxCapacity(reservableQueuePath), 0.0001); assertEquals("DummyAgentName", allocConf.getReservationAgent(reservableQueuePath)); assertEquals(100, allocConf.getAverageCapacity(reservableQueuePath), 0.001); assertFalse(allocConf.getShowReservationAsQueues(reservableQueuePath)); assertEquals("AnyAdmissionPolicy", allocConf.getReservationAdmissionPolicy(reservableQueuePath)); assertEquals(ReservationSchedulerConfiguration .DEFAULT_RESERVATION_PLANNER_NAME, allocConf.getReplanner(reservableQueuePath)); assertEquals(ReservationSchedulerConfiguration .DEFAULT_RESERVATION_ENFORCEMENT_WINDOW, allocConf.getEnforcementWindow(reservableQueuePath)); } /** * Verify that you can't have dynamic user queue and reservable queue on * the same queue. */ @Test public void testReservableCannotBeCombinedWithDynamicUserQueue() throws Exception { assertThrows(AllocationConfigurationException.class, () -> { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); AllocationFileWriter.create() .addQueue(new AllocationFileQueue.Builder("notboth") .parent(true) .reservation() .build()) .writeToFile(ALLOC_FILE); AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(scheduler); allocLoader.init(conf); ReloadListener confHolder = new ReloadListener(); allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); }); } private
TestAllocationFileLoaderService
java
junit-team__junit5
junit-jupiter-engine/src/main/java/org/junit/jupiter/engine/execution/InvocationInterceptorChain.java
{ "start": 3542, "end": 4919 }
class ____<T extends @Nullable Object> implements Invocation<T> { private static final Logger logger = LoggerFactory.getLogger(ValidatingInvocation.class); private final AtomicBoolean invokedOrSkipped = new AtomicBoolean(); private final Invocation<T> delegate; private final List<InvocationInterceptor> interceptors; ValidatingInvocation(Invocation<T> delegate, List<InvocationInterceptor> interceptors) { this.delegate = delegate; this.interceptors = interceptors; } @Override public T proceed() throws Throwable { markInvokedOrSkipped(); return delegate.proceed(); } @Override public void skip() { logger.debug(() -> "The invocation is skipped"); markInvokedOrSkipped(); delegate.skip(); } private void markInvokedOrSkipped() { if (!invokedOrSkipped.compareAndSet(false, true)) { fail("Chain of InvocationInterceptors called invocation multiple times instead of just once"); } } void verifyInvokedAtLeastOnce() { if (!invokedOrSkipped.get()) { fail("Chain of InvocationInterceptors never called invocation"); } } private void fail(String prefix) { String commaSeparatedInterceptorClasses = interceptors.stream().map(Object::getClass).map( Class::getName).collect(joining(", ")); throw new JUnitException(prefix + ": " + commaSeparatedInterceptorClasses); } } }
ValidatingInvocation
java
apache__camel
dsl/camel-dsl-support/src/main/java/org/apache/camel/dsl/support/SourceLoader.java
{ "start": 1157, "end": 1509 }
interface ____ { /** * Loads the source from the given resource * * @param resource the resource * @return the source code (such as java, xml, groovy, yaml) * * @throws IOException is thrown if error loading the source */ String loadResource(Resource resource) throws IOException; }
SourceLoader
java
quarkusio__quarkus
extensions/resteasy-reactive/rest-client/runtime/src/main/java/io/quarkus/rest/client/reactive/runtime/RestClientReactiveConfig.java
{ "start": 408, "end": 1203 }
interface ____ { /** * By default, RESTEasy Reactive uses text/plain content type for String values * and application/json for everything else. * <p> * MicroProfile Rest Client spec requires the implementations to always default to application/json. * This build item disables the "smart" behavior of RESTEasy Reactive to comply to the spec */ @WithName("disable-smart-produces") @WithDefault("false") boolean disableSmartProduces(); /** * Whether providers (filters, etc.) annotated with {@link jakarta.ws.rs.ext.Provider} should be * automatically registered for all the clients in the application. */ @WithName("provider-autodiscovery") @WithDefault("true") boolean providerAutodiscovery(); }
RestClientReactiveConfig
java
apache__camel
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/ECS2EndpointBuilderFactory.java
{ "start": 21287, "end": 22699 }
class ____ { /** * The internal instance of the builder used to access to all the * methods representing the name of headers. */ private static final ECS2HeaderNameBuilder INSTANCE = new ECS2HeaderNameBuilder(); /** * The operation we want to perform. * * The option is a: {@code String} type. * * Group: producer * * @return the name of the header {@code AwsECSOperation}. */ public String awsECSOperation() { return "CamelAwsECSOperation"; } /** * The limit number of results while listing clusters. * * The option is a: {@code Integer} type. * * Group: producer * * @return the name of the header {@code AwsECSMaxResults}. */ public String awsECSMaxResults() { return "CamelAwsECSMaxResults"; } /** * The cluster name. * * The option is a: {@code String} type. * * Group: producer * * @return the name of the header {@code AwsECSClusterName}. */ public String awsECSClusterName() { return "CamelAwsECSClusterName"; } } static ECS2EndpointBuilder endpointBuilder(String componentName, String path) {
ECS2HeaderNameBuilder
java
hibernate__hibernate-orm
hibernate-core/src/main/java/org/hibernate/boot/model/source/internal/hbm/ConfigParameterHelper.java
{ "start": 519, "end": 1453 }
class ____ { public static Map<String, String> extractConfigParameters(ConfigParameterContainer container) { return extractConfigParameters( container.getConfigParameters() ); } public static Properties extractConfigParametersAsProperties(ConfigParameterContainer container) { final Properties properties = new Properties(); properties.putAll( extractConfigParameters( container.getConfigParameters() ) ); return properties; } private static Map<String, String> extractConfigParameters(List<JaxbHbmConfigParameterType> paramElementList) { if ( CollectionHelper.isEmpty( paramElementList ) ) { return Collections.emptyMap(); } final Map<String,String> params = new HashMap<>(); for ( JaxbHbmConfigParameterType paramElement : paramElementList ) { params.put( paramElement.getName(), paramElement.getValue() ); } return params; } private ConfigParameterHelper() { } }
ConfigParameterHelper
java
google__dagger
javatests/dagger/internal/codegen/RawTypeInjectionTest.java
{ "start": 4146, "end": 4912 }
class ____ {", " @Inject Bar(Foo foo) {}", // Fail: requesting raw type "}"); CompilerTests.daggerCompiler(component, foo, bar) .compile( subject -> { subject.hasErrorCount(1); subject.hasErrorContaining( "Foo cannot be provided without an @Provides-annotated method.") .onSource(component) .onLine(6); }); } @Test public void rawProvidesReturnTest() { Source component = CompilerTests.javaSource( "test.TestComponent", "package test;", "", "import dagger.Component;", "", "@Component(modules = TestModule.class)", "
Bar
java
apache__camel
components/camel-kubernetes/src/main/java/org/apache/camel/component/kubernetes/services/KubernetesServicesProducer.java
{ "start": 1807, "end": 9172 }
class ____ extends DefaultProducer { private static final Logger LOG = LoggerFactory.getLogger(KubernetesServicesProducer.class); public KubernetesServicesProducer(AbstractKubernetesEndpoint endpoint) { super(endpoint); } @Override public AbstractKubernetesEndpoint getEndpoint() { return (AbstractKubernetesEndpoint) super.getEndpoint(); } @Override public void process(Exchange exchange) throws Exception { String operation = KubernetesHelper.extractOperation(getEndpoint(), exchange); switch (operation) { case KubernetesOperations.LIST_SERVICES_OPERATION: doList(exchange); break; case KubernetesOperations.LIST_SERVICES_BY_LABELS_OPERATION: doListServiceByLabels(exchange); break; case KubernetesOperations.GET_SERVICE_OPERATION: doGetService(exchange); break; case KubernetesOperations.CREATE_SERVICE_OPERATION: doCreateService(exchange); break; case KubernetesOperations.UPDATE_SERVICE_OPERATION: doUpdateService(exchange); break; case KubernetesOperations.DELETE_SERVICE_OPERATION: doDeleteService(exchange); break; default: throw new IllegalArgumentException("Unsupported operation " + operation); } } protected void doList(Exchange exchange) { String namespace = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_NAMESPACE_NAME, String.class); ServiceList servicesList; if (ObjectHelper.isEmpty(namespace)) { servicesList = getEndpoint().getKubernetesClient().services().inAnyNamespace().list(); } else { servicesList = getEndpoint().getKubernetesClient().services().inNamespace(namespace).list(); } prepareOutboundMessage(exchange, servicesList.getItems()); } protected void doListServiceByLabels(Exchange exchange) { String namespace = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_NAMESPACE_NAME, String.class); Map<String, String> labels = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_SERVICE_LABELS, Map.class); ServiceList servicesList; if (ObjectHelper.isEmpty(labels)) { LOG.error("Listing Services by labels requires specifying labels"); throw new IllegalArgumentException("Listing Services by labels requires specifying labels"); } if (ObjectHelper.isEmpty(namespace)) { servicesList = getEndpoint().getKubernetesClient().services().inAnyNamespace().withLabels(labels).list(); } else { servicesList = getEndpoint().getKubernetesClient().services().inNamespace(namespace).withLabels(labels).list(); } prepareOutboundMessage(exchange, servicesList.getItems()); } protected void doGetService(Exchange exchange) { String serviceName = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_SERVICE_NAME, String.class); String namespaceName = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_NAMESPACE_NAME, String.class); if (ObjectHelper.isEmpty(serviceName)) { LOG.error("Get a specific service require specify a service name"); throw new IllegalArgumentException("Get a specific service require specify a service name"); } if (ObjectHelper.isEmpty(namespaceName)) { LOG.error("Get a specific service require specify a namespace name"); throw new IllegalArgumentException("Get a specific service require specify a namespace name"); } Service service = getEndpoint().getKubernetesClient().services().inNamespace(namespaceName).withName(serviceName).get(); prepareOutboundMessage(exchange, service); } protected void doUpdateService(Exchange exchange) { doCreateOrUpdateService(exchange, "Update", Resource::update); } protected void doCreateService(Exchange exchange) { doCreateOrUpdateService(exchange, "Create", Resource::create); } private void doCreateOrUpdateService( Exchange exchange, String operationName, Function<Resource<Service>, Service> operation) { String serviceName = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_SERVICE_NAME, String.class); String namespaceName = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_NAMESPACE_NAME, String.class); ServiceSpec serviceSpec = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_SERVICE_SPEC, ServiceSpec.class); if (ObjectHelper.isEmpty(serviceName)) { LOG.error("{} a specific service require specify a service name", operationName); throw new IllegalArgumentException( String.format("%s a specific service require specify a service name", operationName)); } if (ObjectHelper.isEmpty(namespaceName)) { LOG.error("{} a specific service require specify a namespace name", operationName); throw new IllegalArgumentException( String.format("%s a specific service require specify a namespace name", operationName)); } if (ObjectHelper.isEmpty(serviceSpec)) { LOG.error("{} a specific service require specify a service spec bean", operationName); throw new IllegalArgumentException( String.format("%s a specific service require specify a service spec bean", operationName)); } Map<String, String> labels = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_SERVICE_LABELS, Map.class); Service serviceCreating = new ServiceBuilder().withNewMetadata().withName(serviceName).withLabels(labels).endMetadata() .withSpec(serviceSpec).build(); Service service = operation.apply( getEndpoint().getKubernetesClient().services().inNamespace(namespaceName).resource(serviceCreating)); prepareOutboundMessage(exchange, service); } protected void doDeleteService(Exchange exchange) { String serviceName = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_SERVICE_NAME, String.class); String namespaceName = exchange.getIn().getHeader(KubernetesConstants.KUBERNETES_NAMESPACE_NAME, String.class); if (ObjectHelper.isEmpty(serviceName)) { LOG.error("Delete a specific service require specify a service name"); throw new IllegalArgumentException("Delete a specific service require specify a service name"); } if (ObjectHelper.isEmpty(namespaceName)) { LOG.error("Delete a specific service require specify a namespace name"); throw new IllegalArgumentException("Delete a specific service require specify a namespace name"); } List<StatusDetails> statusDetails = getEndpoint().getKubernetesClient().services().inNamespace(namespaceName).withName(serviceName).delete(); boolean serviceDeleted = ObjectHelper.isNotEmpty(statusDetails); prepareOutboundMessage(exchange, serviceDeleted); } }
KubernetesServicesProducer
java
ReactiveX__RxJava
src/test/java/io/reactivex/rxjava3/flowable/FlowableThrottleLastTests.java
{ "start": 1220, "end": 5182 }
class ____ extends RxJavaTest { @Test public void throttleWithDroppedCallbackException() throws Throwable { Subscriber<Integer> subscriber = TestHelper.mockSubscriber(); Action whenDisposed = mock(Action.class); TestScheduler s = new TestScheduler(); PublishProcessor<Integer> o = PublishProcessor.create(); o.doOnCancel(whenDisposed) .throttleLast(500, TimeUnit.MILLISECONDS, s, e-> { if (e == 1) { throw new TestException("forced"); } }) .subscribe(subscriber); // send events with simulated time increments s.advanceTimeTo(0, TimeUnit.MILLISECONDS); o.onNext(1); // skip o.onNext(2); // deliver s.advanceTimeTo(501, TimeUnit.MILLISECONDS); InOrder inOrder = inOrder(subscriber); inOrder.verify(subscriber).onError(any(TestException.class)); inOrder.verifyNoMoreInteractions(); verify(whenDisposed).run(); } @Test public void throttleWithDroppedCallback() { Subscriber<Integer> subscriber = TestHelper.mockSubscriber(); Observer<Object> dropCallbackObserver = TestHelper.mockObserver(); TestScheduler s = new TestScheduler(); PublishProcessor<Integer> o = PublishProcessor.create(); o.throttleLast(500, TimeUnit.MILLISECONDS, s, dropCallbackObserver::onNext).subscribe(subscriber); // send events with simulated time increments s.advanceTimeTo(0, TimeUnit.MILLISECONDS); o.onNext(1); // skip o.onNext(2); // deliver s.advanceTimeTo(501, TimeUnit.MILLISECONDS); o.onNext(3); // skip s.advanceTimeTo(600, TimeUnit.MILLISECONDS); o.onNext(4); // skip s.advanceTimeTo(700, TimeUnit.MILLISECONDS); o.onNext(5); // skip o.onNext(6); // deliver s.advanceTimeTo(1001, TimeUnit.MILLISECONDS); o.onNext(7); // deliver s.advanceTimeTo(1501, TimeUnit.MILLISECONDS); o.onComplete(); InOrder inOrder = inOrder(subscriber); InOrder dropCallbackOrder = inOrder(dropCallbackObserver); dropCallbackOrder.verify(dropCallbackObserver).onNext(1); inOrder.verify(subscriber).onNext(2); dropCallbackOrder.verify(dropCallbackObserver).onNext(3); dropCallbackOrder.verify(dropCallbackObserver).onNext(4); dropCallbackOrder.verify(dropCallbackObserver).onNext(5); inOrder.verify(subscriber).onNext(6); inOrder.verify(subscriber).onNext(7); inOrder.verify(subscriber).onComplete(); inOrder.verifyNoMoreInteractions(); dropCallbackOrder.verifyNoMoreInteractions(); } @Test public void throttle() { Subscriber<Integer> subscriber = TestHelper.mockSubscriber(); TestScheduler s = new TestScheduler(); PublishProcessor<Integer> o = PublishProcessor.create(); o.throttleLast(500, TimeUnit.MILLISECONDS, s).subscribe(subscriber); // send events with simulated time increments s.advanceTimeTo(0, TimeUnit.MILLISECONDS); o.onNext(1); // skip o.onNext(2); // deliver s.advanceTimeTo(501, TimeUnit.MILLISECONDS); o.onNext(3); // skip s.advanceTimeTo(600, TimeUnit.MILLISECONDS); o.onNext(4); // skip s.advanceTimeTo(700, TimeUnit.MILLISECONDS); o.onNext(5); // skip o.onNext(6); // deliver s.advanceTimeTo(1001, TimeUnit.MILLISECONDS); o.onNext(7); // deliver s.advanceTimeTo(1501, TimeUnit.MILLISECONDS); o.onComplete(); InOrder inOrder = inOrder(subscriber); inOrder.verify(subscriber).onNext(2); inOrder.verify(subscriber).onNext(6); inOrder.verify(subscriber).onNext(7); inOrder.verify(subscriber).onComplete(); inOrder.verifyNoMoreInteractions(); } }
FlowableThrottleLastTests
java
apache__camel
components/camel-ai/camel-weaviate/src/main/java/org/apache/camel/component/weaviate/WeaviateVectorDb.java
{ "start": 1078, "end": 1259 }
class ____ been moved to its own class. Use * {@link org.apache.camel.component.weaviate.WeaviateVectorDbHeaders} instead. */ @Deprecated public static
has
java
quarkusio__quarkus
independent-projects/tools/codestarts/src/main/java/io/quarkus/devtools/codestarts/core/strategy/DefaultCodestartFileStrategyHandler.java
{ "start": 183, "end": 347 }
interface ____ extends CodestartFileStrategyHandler { void copyStaticFile(Source source, Path targetPath) throws IOException; }
DefaultCodestartFileStrategyHandler
java
elastic__elasticsearch
test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java
{ "start": 1703, "end": 3598 }
class ____ extends ESTestCase { private RestController controller; private TestThreadPool threadPool; protected VerifyingClient verifyingClient; @Before public void setUpController() { threadPool = createThreadPool(); verifyingClient = new VerifyingClient(threadPool); controller = new RestController(null, verifyingClient, new NoneCircuitBreakerService(), new UsageService(), TelemetryProvider.NOOP); } @After public void tearDownController() { threadPool.close(); } /** * A test {@link RestController}. This controller can be used to register and delegate * to handlers, but uses a mock client and cannot carry out the full request. */ protected RestController controller() { return controller; } /** * Sends the given request to the test controller in {@link #controller()}. */ protected void dispatchRequest(RestRequest request) { FakeRestChannel channel = new FakeRestChannel(request, true, 1); ThreadContext threadContext = verifyingClient.threadPool().getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { controller.dispatchRequest(request, channel, threadContext); } finally { Releasables.close(channel.capturedResponse()); } } /** * A mocked {@link org.elasticsearch.client.internal.node.NodeClient} which can be easily reconfigured to verify arbitrary verification * functions, and can be reset to allow reconfiguration partway through a test without having to construct a new object. * * By default, will throw {@link AssertionError} when any execution method is called, unless configured otherwise using * {@link #setExecuteVerifier} or {@link #setExecuteLocallyVerifier}. */ public static final
RestActionTestCase
java
alibaba__druid
core/src/main/java/com/alibaba/druid/support/clickhouse/BalancedClickhouseDriver.java
{ "start": 319, "end": 1514 }
class ____ implements java.sql.Driver { private final String url; private BalancedClickhouseDataSource dataSource; public BalancedClickhouseDriver(final String url, Properties properties) { this.url = url; this.dataSource = new BalancedClickhouseDataSource(url, properties); } @Override public Connection connect(String url, Properties info) throws SQLException { if (!acceptsURL(url)) { throw new SQLException("TODO"); } return dataSource.getConnection(); } @Override public boolean acceptsURL(String url) throws SQLException { return this.url.equals(url); } @Override public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException { return new DriverPropertyInfo[0]; } @Override public int getMajorVersion() { return 0; } @Override public int getMinorVersion() { return 0; } @Override public boolean jdbcCompliant() { return false; } @Override public Logger getParentLogger() throws SQLFeatureNotSupportedException { return null; } }
BalancedClickhouseDriver
java
spring-projects__spring-framework
spring-test/src/main/java/org/springframework/test/web/client/match/MockRestRequestMatchers.java
{ "start": 1716, "end": 1959 }
class ____ a Java editor favorite. To navigate to * this setting, open the Preferences and type "favorites". * * @author Craig Walls * @author Rossen Stoyanchev * @author Sam Brannen * @author Simon Baslé * @since 3.2 */ public abstract
as
java
elastic__elasticsearch
server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java
{ "start": 13643, "end": 14760 }
class ____ extends SortedDocValues { final SortedEntry entry; final TermsEnum termsEnum; BaseSortedDocValues(SortedEntry entry) throws IOException { this.entry = entry; this.termsEnum = termsEnum(); } @Override public int getValueCount() { return Math.toIntExact(entry.termsDictEntry.termsDictSize); } @Override public BytesRef lookupOrd(int ord) throws IOException { termsEnum.seekExact(ord); return termsEnum.term(); } @Override public int lookupTerm(BytesRef key) throws IOException { TermsEnum.SeekStatus status = termsEnum.seekCeil(key); return switch (status) { case FOUND -> Math.toIntExact(termsEnum.ord()); default -> Math.toIntExact(-1L - termsEnum.ord()); }; } @Override public TermsEnum termsEnum() throws IOException { return new TermsDict(entry.termsDictEntry, data, merging); } } private abstract static
BaseSortedDocValues
java
apache__logging-log4j2
log4j-core/src/main/java/org/apache/logging/log4j/core/config/builder/impl/BuiltConfiguration.java
{ "start": 1754, "end": 7541 }
class ____ extends AbstractConfiguration { private final StatusConfiguration statusConfig; protected Component rootComponent; private Component loggersComponent; private Component appendersComponent; private Component filtersComponent; private Component propertiesComponent; private Component customLevelsComponent; private Component scriptsComponent; private Component monitorResourcesComponent; private Component asyncWaitStrategyFactoryComponent; private String contentType = "text"; public BuiltConfiguration( final LoggerContext loggerContext, final ConfigurationSource source, final Component rootComponent) { super(loggerContext, source); statusConfig = new StatusConfiguration().withStatus(getDefaultStatus()); for (final Component component : rootComponent.getComponents()) { switch (component.getPluginType()) { case "Scripts": { scriptsComponent = component; break; } case "Loggers": { loggersComponent = component; break; } case "Appenders": { appendersComponent = component; break; } case "Filters": { filtersComponent = component; break; } case "Properties": { propertiesComponent = component; break; } case "CustomLevels": { customLevelsComponent = component; break; } case "MonitorResources": { monitorResourcesComponent = component; break; } case "AsyncWaitStrategyFactory": { asyncWaitStrategyFactoryComponent = component; break; } } } this.rootComponent = rootComponent; } @Override public void setup() { final List<Node> children = rootNode.getChildren(); if (propertiesComponent.getComponents().size() > 0) { children.add(convertToNode(rootNode, propertiesComponent)); } if (scriptsComponent.getComponents().size() > 0) { children.add(convertToNode(rootNode, scriptsComponent)); } if (customLevelsComponent.getComponents().size() > 0) { children.add(convertToNode(rootNode, customLevelsComponent)); } if (monitorResourcesComponent != null && monitorResourcesComponent.getComponents().size() > 0) { children.add(convertToNode(rootNode, monitorResourcesComponent)); } if (asyncWaitStrategyFactoryComponent != null) { children.add(convertToNode(rootNode, asyncWaitStrategyFactoryComponent)); } children.add(convertToNode(rootNode, loggersComponent)); children.add(convertToNode(rootNode, appendersComponent)); if (filtersComponent.getComponents().size() > 0) { if (filtersComponent.getComponents().size() == 1) { children.add( convertToNode(rootNode, filtersComponent.getComponents().get(0))); } else { children.add(convertToNode(rootNode, filtersComponent)); } } rootComponent = null; } public String getContentType() { return this.contentType; } public void setContentType(final String contentType) { this.contentType = contentType; } public void createAdvertiser(final String advertiserString, final ConfigurationSource configSource) { byte[] buffer = null; try { if (configSource != null) { final InputStream is = configSource.getInputStream(); if (is != null) { buffer = toByteArray(is); } } } catch (final IOException ioe) { LOGGER.warn("Unable to read configuration source " + configSource.toString()); } super.createAdvertiser(advertiserString, configSource, buffer, contentType); } public StatusConfiguration getStatusConfiguration() { return statusConfig; } public void setPluginPackages(final String packages) { pluginPackages.addAll(Arrays.asList(packages.split(Patterns.COMMA_SEPARATOR))); } public void setShutdownHook(final String flag) { isShutdownHookEnabled = !"disable".equalsIgnoreCase(flag); } public void setShutdownTimeoutMillis(final long shutdownTimeoutMillis) { this.shutdownTimeoutMillis = shutdownTimeoutMillis; } public void setMonitorInterval(final int intervalSeconds) { if (this instanceof Reconfigurable && intervalSeconds > 0) { initializeWatchers((Reconfigurable) this, getConfigurationSource(), intervalSeconds); } } @Override public PluginManager getPluginManager() { return pluginManager; } protected Node convertToNode(final Node parent, final Component component) { final String name = component.getPluginType(); final PluginType<?> pluginType = pluginManager.getPluginType(name); final Node node = new Node(parent, name, pluginType); node.getAttributes().putAll(component.getAttributes()); node.setValue(component.getValue()); final List<Node> children = node.getChildren(); for (final Component child : component.getComponents()) { children.add(convertToNode(node, child)); } return node; } }
BuiltConfiguration
java
eclipse-vertx__vert.x
vertx-core/src/test/java/io/vertx/tests/wrapper/HttpServerRequestWrapperImpl.java
{ "start": 414, "end": 588 }
class ____ extends HttpServerRequestWrapper { public HttpServerRequestWrapperImpl(HttpServerRequestInternal delegate) { super(delegate); } }
HttpServerRequestWrapperImpl
java
apache__logging-log4j2
log4j-api/src/main/java/org/apache/logging/log4j/util/StackLocatorUtil.java
{ "start": 3182, "end": 3289 }
class ____.</li> * </ul> * * @param depth The stack frame count to walk. * @return A
loader
java
google__dagger
javatests/dagger/internal/codegen/MapKeyProcessorTest.java
{ "start": 3334, "end": 3595 }
interface ____ {", " PathEnum value();", " String relativePath() default \"Defaultpath\";", "}", "}"); Source pathEnumFile = CompilerTests.javaSource("test.PathEnum", "package test;", "", "public
PathKey
java
netty__netty
codec-http3/src/test/java/io/netty/handler/codec/http3/HttpConversionUtilTest.java
{ "start": 2106, "end": 9440 }
class ____ { @Test public void connectNoPath() throws Exception { String authority = "netty.io:80"; Http3Headers headers = new DefaultHttp3Headers(); headers.authority(authority); headers.method(HttpMethod.CONNECT.asciiName()); HttpRequest request = HttpConversionUtil.toHttpRequest(0, headers, true); assertNotNull(request); assertEquals(authority, request.uri()); assertEquals(authority, request.headers().get(HOST)); } @Test public void setHttp3AuthorityWithoutUserInfo() { Http3Headers headers = new DefaultHttp3Headers(); HttpConversionUtil.setHttp3Authority("foo", headers); assertEquals(new AsciiString("foo"), headers.authority()); } @Test public void setHttp3AuthorityWithUserInfo() { Http3Headers headers = new DefaultHttp3Headers(); HttpConversionUtil.setHttp3Authority("info@foo", headers); assertEquals(new AsciiString("foo"), headers.authority()); HttpConversionUtil.setHttp3Authority("@foo.bar", headers); assertEquals(new AsciiString("foo.bar"), headers.authority()); } @Test public void setHttp3AuthorityNullOrEmpty() { Http3Headers headers = new DefaultHttp3Headers(); HttpConversionUtil.setHttp3Authority(null, headers); assertNull(headers.authority()); HttpConversionUtil.setHttp3Authority("", headers); assertSame(AsciiString.EMPTY_STRING, headers.authority()); } @Test public void setHttp2AuthorityWithEmptyAuthority() { assertThrows(IllegalArgumentException.class, () -> HttpConversionUtil.setHttp3Authority("info@", new DefaultHttp3Headers())); } @Test public void stripTEHeaders() { HttpHeaders inHeaders = new DefaultHttpHeaders(); inHeaders.add(TE, GZIP); Http3Headers out = new DefaultHttp3Headers(); HttpConversionUtil.toHttp3Headers(inHeaders, out); assertTrue(out.isEmpty()); } @Test public void stripTEHeadersExcludingTrailers() { HttpHeaders inHeaders = new DefaultHttpHeaders(); inHeaders.add(TE, GZIP); inHeaders.add(TE, TRAILERS); Http3Headers out = new DefaultHttp3Headers(); HttpConversionUtil.toHttp3Headers(inHeaders, out); assertSame(TRAILERS, out.get(TE)); } @Test public void stripTEHeadersCsvSeparatedExcludingTrailers() { HttpHeaders inHeaders = new DefaultHttpHeaders(); inHeaders.add(TE, GZIP + "," + TRAILERS); Http3Headers out = new DefaultHttp3Headers(); HttpConversionUtil.toHttp3Headers(inHeaders, out); assertSame(TRAILERS, out.get(TE)); } @Test public void stripTEHeadersCsvSeparatedAccountsForValueSimilarToTrailers() { HttpHeaders inHeaders = new DefaultHttpHeaders(); inHeaders.add(TE, GZIP + "," + TRAILERS + "foo"); Http3Headers out = new DefaultHttp3Headers(); HttpConversionUtil.toHttp3Headers(inHeaders, out); assertFalse(out.contains(TE)); } @Test public void stripTEHeadersAccountsForValueSimilarToTrailers() { HttpHeaders inHeaders = new DefaultHttpHeaders(); inHeaders.add(TE, TRAILERS + "foo"); Http3Headers out = new DefaultHttp3Headers(); HttpConversionUtil.toHttp3Headers(inHeaders, out); assertFalse(out.contains(TE)); } @Test public void stripTEHeadersAccountsForOWS() { // Disable header validation, since it will otherwise reject the header. boolean validate = false; HttpHeaders inHeaders = new DefaultHttpHeaders(validate); inHeaders.add(TE, " " + TRAILERS + ' '); Http3Headers out = new DefaultHttp3Headers(); HttpConversionUtil.toHttp3Headers(inHeaders, out); assertSame(TRAILERS, out.get(TE)); } @Test public void stripConnectionHeadersAndNominees() { HttpHeaders inHeaders = new DefaultHttpHeaders(); inHeaders.add(CONNECTION, "foo"); inHeaders.add("foo", "bar"); Http3Headers out = new DefaultHttp3Headers(); HttpConversionUtil.toHttp3Headers(inHeaders, out); assertTrue(out.isEmpty()); } @Test public void stripConnectionNomineesWithCsv() { HttpHeaders inHeaders = new DefaultHttpHeaders(); inHeaders.add(CONNECTION, "foo, bar"); inHeaders.add("foo", "baz"); inHeaders.add("bar", "qux"); inHeaders.add("hello", "world"); Http3Headers out = new DefaultHttp3Headers(); HttpConversionUtil.toHttp3Headers(inHeaders, out); assertEquals(1, out.size()); assertSame("world", out.get("hello")); } @Test public void addHttp3ToHttpHeadersCombinesCookies() throws Http3Exception { Http3Headers inHeaders = new DefaultHttp3Headers(); inHeaders.add("yes", "no"); inHeaders.add(COOKIE, "foo=bar"); inHeaders.add(COOKIE, "bax=baz"); HttpHeaders outHeaders = new DefaultHttpHeaders(); HttpConversionUtil.addHttp3ToHttpHeaders(5, inHeaders, outHeaders, HttpVersion.HTTP_1_1, false, false); assertEquals("no", outHeaders.get("yes")); assertEquals("foo=bar; bax=baz", outHeaders.get(COOKIE.toString())); } @Test public void connectionSpecificHeadersShouldBeRemoved() { HttpHeaders inHeaders = new DefaultHttpHeaders(); inHeaders.add(CONNECTION, "keep-alive"); inHeaders.add(HOST, "example.com"); @SuppressWarnings("deprecation") AsciiString keepAlive = KEEP_ALIVE; inHeaders.add(keepAlive, "timeout=5, max=1000"); @SuppressWarnings("deprecation") AsciiString proxyConnection = PROXY_CONNECTION; inHeaders.add(proxyConnection, "timeout=5, max=1000"); inHeaders.add(TRANSFER_ENCODING, "chunked"); inHeaders.add(UPGRADE, "h2c"); Http3Headers outHeaders = new DefaultHttp3Headers(); HttpConversionUtil.toHttp3Headers(inHeaders, outHeaders); assertFalse(outHeaders.contains(CONNECTION)); assertFalse(outHeaders.contains(HOST)); assertFalse(outHeaders.contains(keepAlive)); assertFalse(outHeaders.contains(proxyConnection)); assertFalse(outHeaders.contains(TRANSFER_ENCODING)); assertFalse(outHeaders.contains(UPGRADE)); } @Test public void http3ToHttpHeaderTest() throws Exception { Http3Headers http3Headers = new DefaultHttp3Headers(); http3Headers.status("200"); http3Headers.path("/meow"); // HTTP/2 Header response should not contain 'path' in response. http3Headers.set("cat", "meow"); HttpHeaders httpHeaders = new DefaultHttpHeaders(); HttpConversionUtil.addHttp3ToHttpHeaders(3, http3Headers, httpHeaders, HttpVersion.HTTP_1_1, false, true); assertFalse(httpHeaders.contains(HttpConversionUtil.ExtensionHeaderNames.PATH.text())); assertEquals("meow", httpHeaders.get("cat")); httpHeaders.clear(); HttpConversionUtil.addHttp3ToHttpHeaders(3, http3Headers, httpHeaders, HttpVersion.HTTP_1_1, false, false); assertTrue(httpHeaders.contains(HttpConversionUtil.ExtensionHeaderNames.PATH.text())); assertEquals("meow", httpHeaders.get("cat")); } }
HttpConversionUtilTest
java
mybatis__mybatis-3
src/test/java/org/apache/ibatis/submitted/typebasedtypehandlerresolution/LocallySpecifiedTypeHandlerResolutionTest.java
{ "start": 13632, "end": 13670 }
class ____.lang.Object"); } } }
java
java
apache__kafka
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/OffsetMetadataManager.java
{ "start": 4479, "end": 8272 }
class ____ { private LogContext logContext = null; private SnapshotRegistry snapshotRegistry = null; private Time time = null; private GroupMetadataManager groupMetadataManager = null; private MetadataImage metadataImage = null; private GroupCoordinatorConfig config = null; private GroupCoordinatorMetricsShard metrics = null; public Builder withLogContext(LogContext logContext) { this.logContext = logContext; return this; } public Builder withSnapshotRegistry(SnapshotRegistry snapshotRegistry) { this.snapshotRegistry = snapshotRegistry; return this; } public Builder withTime(Time time) { this.time = time; return this; } public Builder withGroupMetadataManager(GroupMetadataManager groupMetadataManager) { this.groupMetadataManager = groupMetadataManager; return this; } public Builder withGroupCoordinatorConfig(GroupCoordinatorConfig config) { this.config = config; return this; } public Builder withMetadataImage(MetadataImage metadataImage) { this.metadataImage = metadataImage; return this; } public Builder withGroupCoordinatorMetricsShard(GroupCoordinatorMetricsShard metrics) { this.metrics = metrics; return this; } public OffsetMetadataManager build() { if (logContext == null) logContext = new LogContext(); if (snapshotRegistry == null) snapshotRegistry = new SnapshotRegistry(logContext); if (metadataImage == null) metadataImage = MetadataImage.EMPTY; if (time == null) time = Time.SYSTEM; if (groupMetadataManager == null) { throw new IllegalArgumentException("GroupMetadataManager cannot be null"); } if (metrics == null) { throw new IllegalArgumentException("GroupCoordinatorMetricsShard cannot be null"); } return new OffsetMetadataManager( snapshotRegistry, logContext, time, metadataImage, groupMetadataManager, config, metrics ); } } /** * The logger. */ private final Logger log; /** * The snapshot registry. */ private final SnapshotRegistry snapshotRegistry; /** * The system time. */ private final Time time; /** * The group metadata manager. */ private final GroupMetadataManager groupMetadataManager; /** * The coordinator metrics. */ private final GroupCoordinatorMetricsShard metrics; /** * The group coordinator config. */ private final GroupCoordinatorConfig config; /** * The committed offsets. */ private final Offsets offsets; /** * The pending transactional offsets keyed by producer id. This structure holds all the * transactional offsets that are part of ongoing transactions. When the transaction is * committed, they are transferred to `offsets`; when the transaction is aborted, they * are removed. */ private final TimelineHashMap<Long, Offsets> pendingTransactionalOffsets; /** * The open transactions (producer ids) by group id, topic name and partition id. */ private final OpenTransactions openTransactions; /** * Tracks open transactions (producer ids) by group id, topic name and partition id. * It is the responsibility of the caller to update {@link #pendingTransactionalOffsets}. */ private
Builder
java
quarkusio__quarkus
independent-projects/arc/processor/src/main/java/io/quarkus/arc/processor/bcextensions/ExtensionMethodParameter.java
{ "start": 156, "end": 2931 }
enum ____ { META_ANNOTATIONS(DotNames.META_ANNOTATIONS, false, ExtensionPhase.DISCOVERY), SCANNED_CLASSES(DotNames.SCANNED_CLASSES, false, ExtensionPhase.DISCOVERY), CLASS_INFO(DotNames.CLASS_INFO, true, ExtensionPhase.ENHANCEMENT), METHOD_INFO(DotNames.METHOD_INFO, true, ExtensionPhase.ENHANCEMENT), FIELD_INFO(DotNames.FIELD_INFO, true, ExtensionPhase.ENHANCEMENT), CLASS_CONFIG(DotNames.CLASS_CONFIG, true, ExtensionPhase.ENHANCEMENT), METHOD_CONFIG(DotNames.METHOD_CONFIG, true, ExtensionPhase.ENHANCEMENT), FIELD_CONFIG(DotNames.FIELD_CONFIG, true, ExtensionPhase.ENHANCEMENT), BEAN_INFO(DotNames.BEAN_INFO, true, ExtensionPhase.REGISTRATION), INTERCEPTOR_INFO(DotNames.INTERCEPTOR_INFO, true, ExtensionPhase.REGISTRATION), OBSERVER_INFO(DotNames.OBSERVER_INFO, true, ExtensionPhase.REGISTRATION), INVOKER_FACTORY(DotNames.INVOKER_FACTORY, false, ExtensionPhase.REGISTRATION), SYNTHETIC_COMPONENTS(DotNames.SYNTHETIC_COMPONENTS, false, ExtensionPhase.SYNTHESIS), MESSAGES(DotNames.MESSAGES, false, ExtensionPhase.DISCOVERY, ExtensionPhase.ENHANCEMENT, ExtensionPhase.REGISTRATION, ExtensionPhase.SYNTHESIS, ExtensionPhase.VALIDATION), TYPES(DotNames.TYPES, false, ExtensionPhase.ENHANCEMENT, ExtensionPhase.REGISTRATION, ExtensionPhase.SYNTHESIS, ExtensionPhase.VALIDATION), UNKNOWN(null, false), ; private final DotName typeName; private final boolean isQuery; private final Set<ExtensionPhase> validPhases; ExtensionMethodParameter(DotName typeName, boolean isQuery, ExtensionPhase... validPhases) { this.typeName = typeName; this.isQuery = isQuery; if (validPhases == null || validPhases.length == 0) { this.validPhases = EnumSet.noneOf(ExtensionPhase.class); } else { this.validPhases = EnumSet.copyOf(Arrays.asList(validPhases)); } } boolean isQuery() { return isQuery; } void verifyAvailable(ExtensionPhase phase, ExtensionMethod method) { if (!validPhases.contains(phase)) { throw new IllegalArgumentException(phase + " methods can't declare a parameter of type " + (typeName != null ? typeName.withoutPackagePrefix() : this.name()) + ", found at " + method); } } static ExtensionMethodParameter of(org.jboss.jandex.Type type) { if (type.kind() == org.jboss.jandex.Type.Kind.CLASS) { for (ExtensionMethodParameter candidate : ExtensionMethodParameter.values()) { if (candidate.typeName.equals(type.name())) { return candidate; } } } return UNKNOWN; } }
ExtensionMethodParameter
java
google__error-prone
check_api/src/main/java/com/google/errorprone/matchers/HasIdentifier.java
{ "start": 1062, "end": 1552 }
class ____ implements Matcher<Tree> { private final Matcher<IdentifierTree> nodeMatcher; public HasIdentifier(Matcher<IdentifierTree> nodeMatcher) { this.nodeMatcher = nodeMatcher; } @Override public boolean matches(Tree tree, VisitorState state) { Boolean matches = new HasIdentifierScanner(state, nodeMatcher).scan(state.getPath(), null); return firstNonNull(matches, false); } /** AST Visitor that matches identifiers in a Tree */ private static
HasIdentifier
java
apache__kafka
connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTest.java
{ "start": 9737, "end": 20075 }
class ____ { private static final String CONNECTOR_ID = "test-connector"; private static final ConnectorTaskId TASK_ID = new ConnectorTaskId("job", 0); private static final String WORKER_ID = "localhost:8083"; private static final String CLUSTER_ID = "test-cluster"; private final ConnectorClientConfigOverridePolicy noneConnectorClientConfigOverridePolicy = new NoneConnectorClientConfigOverridePolicy(); private final ConnectorClientConfigOverridePolicy allConnectorClientConfigOverridePolicy = new AllConnectorClientConfigOverridePolicy(); private final Map<String, String> workerProps = new HashMap<>(); private WorkerConfig config; private Worker worker; private final Map<String, String> defaultProducerConfigs = new HashMap<>(); private final Map<String, String> defaultConsumerConfigs = new HashMap<>(); @Mock private Plugins plugins; @Mock private PluginClassLoader pluginLoader; @Mock private LoaderSwap loaderSwap; @Mock private Runnable isolatedRunnable; @Mock private OffsetBackingStore offsetBackingStore; @Mock private TaskStatus.Listener taskStatusListener; @Mock private ConnectorStatus.Listener connectorStatusListener; @Mock private Herder herder; @Mock private StatusBackingStore statusBackingStore; @Mock private SourceConnector sourceConnector; @Mock private SinkConnector sinkConnector; @Mock private CloseableConnectorContext ctx; @Mock private TestSourceTask task; @Mock private Converter taskKeyConverter; @Mock private Converter taskValueConverter; @Mock private HeaderConverter taskHeaderConverter; @Mock private ExecutorService executorService; @Mock private ConnectorConfig connectorConfig; private String mockFileProviderTestId; private Map<String, String> connectorProps; private MockedConstruction<WorkerSourceTask> sourceTaskMockedConstruction; private MockedConstruction<ExactlyOnceWorkerSourceTask> eosSourceTaskMockedConstruction; private MockedConstruction<WorkerSinkTask> sinkTaskMockedConstruction; private MockitoSession mockitoSession; public void setup(boolean enableTopicCreation) { // Use strict mode to detect unused mocks mockitoSession = Mockito.mockitoSession() .initMocks(this) .strictness(Strictness.STRICT_STUBS) .startMocking(); workerProps.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); workerProps.put("key.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("value.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("offset.storage.file.filename", "/tmp/connect.offsets"); workerProps.put(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG, JmxReporter.class.getName() + "," + MockMetricsReporter.class.getName()); workerProps.put("config.providers", "file"); workerProps.put("config.providers.file.class", MockFileConfigProvider.class.getName()); mockFileProviderTestId = UUID.randomUUID().toString(); workerProps.put("config.providers.file.param.testId", mockFileProviderTestId); workerProps.put(TOPIC_CREATION_ENABLE_CONFIG, String.valueOf(enableTopicCreation)); config = new StandaloneConfig(workerProps); defaultProducerConfigs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); defaultProducerConfigs.put( ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer"); defaultProducerConfigs.put( ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer"); defaultProducerConfigs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, Long.toString(Long.MAX_VALUE)); // By default, producers that are instantiated and used by Connect have idempotency disabled even after idempotency became // default for Kafka producers. This is chosen to avoid breaking changes when Connect contacts Kafka brokers that do not support // idempotent producers or require explicit steps to enable them (e.g. adding the IDEMPOTENT_WRITE ACL to brokers older than 2.8). // These settings might change when https://cwiki.apache.org/confluence/display/KAFKA/KIP-318%3A+Make+Kafka+Connect+Source+idempotent // gets approved and scheduled for release. defaultProducerConfigs.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false"); defaultProducerConfigs.put(ProducerConfig.ACKS_CONFIG, "all"); defaultProducerConfigs.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "1"); defaultProducerConfigs.put(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, Integer.toString(Integer.MAX_VALUE)); defaultConsumerConfigs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); defaultConsumerConfigs.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); defaultConsumerConfigs.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); defaultConsumerConfigs .put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer"); defaultConsumerConfigs .put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer"); // Some common defaults. They might change on individual tests connectorProps = anyConnectorConfigMap(); // Make calls to new WorkerSourceTask() return a mock to avoid the source task trying to connect to a broker. sourceTaskMockedConstruction = Mockito.mockConstruction( WorkerSourceTask.class, context -> Mockito.withSettings().defaultAnswer(this::workerTaskMethod), WorkerTest::workerTaskConstructor); eosSourceTaskMockedConstruction = Mockito.mockConstruction( ExactlyOnceWorkerSourceTask.class, context -> Mockito.withSettings().defaultAnswer(this::workerTaskMethod), WorkerTest::workerTaskConstructor); sinkTaskMockedConstruction = Mockito.mockConstruction( WorkerSinkTask.class, context -> Mockito.withSettings().defaultAnswer(this::workerTaskMethod), WorkerTest::workerTaskConstructor); } @AfterEach public void teardown() { // Critical to always close MockedStatics // Ideal would be to use try-with-resources in an individual test, but it introduced a rather large level of // indentation of most test bodies, hence sticking with setup() / teardown() sourceTaskMockedConstruction.close(); eosSourceTaskMockedConstruction.close(); sinkTaskMockedConstruction.close(); mockitoSession.finishMocking(); } @ParameterizedTest @ValueSource(booleans = {true, false}) public void testStartAndStopConnector(boolean enableTopicCreation) throws Throwable { setup(enableTopicCreation); final String connectorClass = SampleSourceConnector.class.getName(); connectorProps.put(CONNECTOR_CLASS_CONFIG, connectorClass); // Create mockKafkaClusterId(); mockVersionedConnectorIsolation(connectorClass, null, sourceConnector); mockExecutorRealSubmit(WorkerConnector.class); worker = new Worker(WORKER_ID, new MockTime(), plugins, config, offsetBackingStore, noneConnectorClientConfigOverridePolicy); worker.start(); assertEquals(Set.of(), worker.connectorNames()); FutureCallback<TargetState> onFirstStart = new FutureCallback<>(); worker.startConnector(CONNECTOR_ID, connectorProps, ctx, connectorStatusListener, TargetState.STARTED, onFirstStart); // Wait for the connector to actually start assertEquals(TargetState.STARTED, onFirstStart.get(1000, TimeUnit.MILLISECONDS)); assertEquals(Set.of(CONNECTOR_ID), worker.connectorNames()); FutureCallback<TargetState> onSecondStart = new FutureCallback<>(); worker.startConnector(CONNECTOR_ID, connectorProps, ctx, connectorStatusListener, TargetState.STARTED, onSecondStart); Exception exc = assertThrows(ExecutionException.class, () -> onSecondStart.get(0, TimeUnit.MILLISECONDS)); assertInstanceOf(ConnectException.class, exc.getCause()); assertStatistics(worker, 1, 0); assertStartupStatistics(worker, 1, 0, 0, 0); worker.stopAndAwaitConnector(CONNECTOR_ID); assertStatistics(worker, 0, 0); assertStartupStatistics(worker, 1, 0, 0, 0); assertEquals(Set.of(), worker.connectorNames()); // Nothing should be left, so this should effectively be a nop worker.stop(); assertStatistics(worker, 0, 0); verifyKafkaClusterId(); verifyVersionedConnectorIsolation(connectorClass, null, sourceConnector); verifyExecutorSubmit(); verify(sourceConnector).initialize(any(ConnectorContext.class)); verify(sourceConnector).start(connectorProps); verify(connectorStatusListener).onStartup(CONNECTOR_ID); verify(sourceConnector).stop(); verify(connectorStatusListener).onShutdown(CONNECTOR_ID); verify(ctx).close(); MockFileConfigProvider.assertClosed(mockFileProviderTestId); } private void mockFileConfigProvider() { MockFileConfigProvider mockFileConfigProvider = new MockFileConfigProvider(); mockFileConfigProvider.configure(Map.of("testId", mockFileProviderTestId)); when(plugins.newConfigProvider(any(AbstractConfig.class), eq("config.providers.file"), any(ClassLoaderUsage.class))) .thenReturn(mockFileConfigProvider); } @ParameterizedTest @ValueSource(booleans = {true, false}) public void testStartConnectorFailure(boolean enableTopicCreation) throws Exception { setup(enableTopicCreation); final String nonConnectorClass = "java.util.HashMap"; connectorProps.put(CONNECTOR_CLASS_CONFIG, nonConnectorClass); // Bad connector
WorkerTest
java
quarkusio__quarkus
extensions/kubernetes/vanilla/deployment/src/main/java/io/quarkus/kubernetes/deployment/JobConfig.java
{ "start": 237, "end": 1641 }
interface ____ { /** * Specifies the maximum desired number of pods the job should run at any given time. */ Optional<Integer> parallelism(); /** * Specifies the desired number of successfully finished pods the job should be run with. */ Optional<Integer> completions(); /** * CompletionMode specifies how Pod completions are tracked. */ @WithDefault("NonIndexed") JobCompletionMode completionMode(); /** * Specifies the number of retries before marking this job failed. */ Optional<Integer> backoffLimit(); /** * Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system * tries to terminate it; value must be positive integer. */ Optional<Long> activeDeadlineSeconds(); /** * Limits the lifetime of a Job that has finished execution (either Complete or Failed). If this * field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. */ Optional<Integer> ttlSecondsAfterFinished(); /** * Suspend specifies whether the Job controller should create Pods or not. */ @WithDefault("false") boolean suspend(); /** * Restart policy when the job container fails. */ @WithDefault("OnFailure") JobRestartPolicy restartPolicy(); }
JobConfig
java
elastic__elasticsearch
x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/support/SamlInitiateSingleSignOnException.java
{ "start": 568, "end": 1701 }
class ____ extends ElasticsearchSecurityException { private SamlInitiateSingleSignOnResponse samlInitiateSingleSignOnResponse; public SamlInitiateSingleSignOnException( String msg, RestStatus status, Exception cause, SamlInitiateSingleSignOnResponse samlInitiateSingleSignOnResponse ) { super(msg, status, cause); this.samlInitiateSingleSignOnResponse = samlInitiateSingleSignOnResponse; } public SamlInitiateSingleSignOnException(String msg, RestStatus status, Exception cause) { super(msg, status, cause); } @Override protected void metadataToXContent(XContentBuilder builder, Params params) throws IOException { if (this.samlInitiateSingleSignOnResponse != null) { builder.startObject("saml_initiate_single_sign_on_response"); this.samlInitiateSingleSignOnResponse.toXContent(builder); builder.endObject(); } } public SamlInitiateSingleSignOnResponse getSamlInitiateSingleSignOnResponse() { return samlInitiateSingleSignOnResponse; } }
SamlInitiateSingleSignOnException
java
apache__camel
components/camel-google/camel-google-mail/src/test/java/org/apache/camel/component/google/mail/GmailConfigurationTest.java
{ "start": 1379, "end": 1433 }
class ____ {@link GoogleMailConfiguration}. */ public
for
java
apache__avro
lang/java/avro/src/main/java/org/apache/avro/io/parsing/ValidatingGrammarGenerator.java
{ "start": 996, "end": 1048 }
class ____ generates validating grammar. */ public
that
java
apache__flink
flink-models/flink-model-openai/src/main/java/org/apache/flink/model/openai/OpenAIModelProviderFactory.java
{ "start": 1302, "end": 3608 }
class ____ implements ModelProviderFactory { public static final String IDENTIFIER = "openai"; @Override public ModelProvider createModelProvider(ModelProviderFactory.Context context) { FactoryUtil.ModelProviderFactoryHelper helper = FactoryUtil.createModelProviderFactoryHelper(this, context); helper.validate(); String endpoint = helper.getOptions().get(OpenAIOptions.ENDPOINT); endpoint = endpoint.replaceAll("/*$", "").toLowerCase(); AsyncPredictFunction function; if (endpoint.endsWith(OpenAIEmbeddingModelFunction.ENDPOINT_SUFFIX)) { function = new OpenAIEmbeddingModelFunction(context, helper.getOptions()); } else if (endpoint.endsWith(OpenAIChatModelFunction.ENDPOINT_SUFFIX)) { function = new OpenAIChatModelFunction(context, helper.getOptions()); } else { throw new UnsupportedOperationException("Unsupported endpoint: " + endpoint); } return new Provider(function); } @Override public String factoryIdentifier() { return IDENTIFIER; } @Override public Set<ConfigOption<?>> requiredOptions() { Set<ConfigOption<?>> set = new HashSet<>(); set.add(OpenAIOptions.ENDPOINT); set.add(OpenAIOptions.API_KEY); set.add(OpenAIOptions.MODEL); return set; } @Override public Set<ConfigOption<?>> optionalOptions() { Set<ConfigOption<?>> set = new HashSet<>(); set.add(OpenAIOptions.MAX_CONTEXT_SIZE); set.add(OpenAIOptions.CONTEXT_OVERFLOW_ACTION); set.add(OpenAIOptions.ERROR_HANDLING_STRATEGY); set.add(OpenAIOptions.RETRY_NUM); set.add(OpenAIOptions.RETRY_FALLBACK_STRATEGY); set.add(OpenAIOptions.SYSTEM_PROMPT); set.add(OpenAIOptions.TEMPERATURE); set.add(OpenAIOptions.TOP_P); set.add(OpenAIOptions.STOP); set.add(OpenAIOptions.MAX_TOKENS); set.add(OpenAIOptions.PRESENCE_PENALTY); set.add(OpenAIOptions.N); set.add(OpenAIOptions.SEED); set.add(OpenAIOptions.RESPONSE_FORMAT); set.add(OpenAIOptions.DIMENSION); return set; } /** {@link ModelProvider} for openai model functions. */ public static
OpenAIModelProviderFactory
java
spring-projects__spring-framework
spring-web/src/test/java/org/springframework/web/filter/reactive/UrlHandlerFilterTests.java
{ "start": 1630, "end": 4614 }
class ____ { @Test void requestMutation() { UrlHandlerFilter filter = UrlHandlerFilter.trailingSlashHandler("/path/**").mutateRequest().build(); String path = "/path/123"; MockServerHttpRequest original = MockServerHttpRequest.get(path + "/").build(); ServerWebExchange exchange = MockServerWebExchange.from(original); ServerHttpRequest actual = invokeFilter(filter, exchange); assertThat(actual).isNotNull().isNotSameAs(original); assertThat(actual.getPath().value()).isEqualTo(path); } @Test void redirect() { HttpStatus status = HttpStatus.PERMANENT_REDIRECT; UrlHandlerFilter filter = UrlHandlerFilter.trailingSlashHandler("/path/*").redirect(status).build(); String path = "/path/123"; MockServerHttpRequest original = MockServerHttpRequest.get(path + "/").build(); ServerWebExchange exchange = MockServerWebExchange.from(original); assertThatThrownBy(() -> invokeFilter(filter, exchange)) .hasMessageContaining("No argument value was captured"); assertThat(exchange.getResponse().getStatusCode()).isEqualTo(status); assertThat(exchange.getResponse().getHeaders().getLocation()).isEqualTo(URI.create(path)); } @Test void noUrlHandling() { testNoUrlHandling("/path/**", "", "/path/123"); testNoUrlHandling("/path/*", "", "/path/123"); testNoUrlHandling("/**", "", "/"); // gh-33444 testNoUrlHandling("/**", "/myApp", "/myApp/"); // gh-33565 } private static void testNoUrlHandling(String pattern, String contextPath, String path) { // No request mutation UrlHandlerFilter filter = UrlHandlerFilter.trailingSlashHandler(pattern).mutateRequest().build(); MockServerHttpRequest request = MockServerHttpRequest.get(path).contextPath(contextPath).build(); ServerWebExchange exchange = MockServerWebExchange.from(request); ServerHttpRequest actual = invokeFilter(filter, exchange); assertThat(actual).isNotNull().isSameAs(request); assertThat(actual.getPath().value()).isEqualTo(path); // No redirect HttpStatus status = HttpStatus.PERMANENT_REDIRECT; filter = UrlHandlerFilter.trailingSlashHandler(pattern).redirect(status).build(); request = MockServerHttpRequest.get(path).contextPath(contextPath).build(); exchange = MockServerWebExchange.from(request); actual = invokeFilter(filter, exchange); assertThat(actual).isNotNull().isSameAs(request); assertThat(exchange.getResponse().getStatusCode()).isNull(); assertThat(exchange.getResponse().getHeaders().getLocation()).isNull(); } private static ServerHttpRequest invokeFilter(UrlHandlerFilter filter, ServerWebExchange exchange) { WebHandler handler = mock(WebHandler.class); ArgumentCaptor<ServerWebExchange> captor = ArgumentCaptor.forClass(ServerWebExchange.class); given(handler.handle(captor.capture())).willReturn(Mono.empty()); WebFilterChain chain = new DefaultWebFilterChain(handler, List.of(filter)); filter.filter(exchange, chain).block(); return captor.getValue().getRequest(); } }
UrlHandlerFilterTests
java
assertj__assertj-core
assertj-core/src/test/java/org/assertj/core/api/objectarray/ObjectArrayAssert_containsAll_Test.java
{ "start": 977, "end": 1372 }
class ____ extends ObjectArrayAssertBaseTest { @Override protected ObjectArrayAssert<Object> invoke_api_method() { return assertions.containsAll(newArrayList("Yoda", "Luke")); } @Override protected void verify_internal_effects() { verify(arrays).assertContainsAll(getInfo(assertions), getActual(assertions), newArrayList("Yoda", "Luke")); } }
ObjectArrayAssert_containsAll_Test
java
elastic__elasticsearch
x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/Sample.java
{ "start": 964, "end": 2458 }
class ____ implements Accountable { private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(Sample.class); private final SequenceKey key; private final HitReference[] matches; Sample(SequenceKey key, List<SearchHit> searchHits) { this.key = key; this.matches = new HitReference[searchHits.size()]; for (int i = 0; i < searchHits.size(); i++) { this.matches[i] = new HitReference(searchHits.get(i)); } } public SequenceKey key() { return key; } public List<HitReference> hits() { return Arrays.asList(matches); } @Override public long ramBytesUsed() { return SHALLOW_SIZE + RamUsageEstimator.sizeOf(key) + RamUsageEstimator.sizeOf(matches); } @Override public int hashCode() { return Objects.hash(key); } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } Sample other = (Sample) obj; return Objects.equals(key, other.key); } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append(format(null, "[Samp<{}>]", key)); for (int i = 0; i < matches.length; i++) { sb.append(format(null, "\n [{}]={{}}", i, matches[i])); } return sb.toString(); } }
Sample
java
apache__logging-log4j2
log4j-core/src/main/java/org/apache/logging/log4j/core/layout/ScriptPatternSelector.java
{ "start": 2281, "end": 2502 }
class ____ implements PatternSelector, LocationAware { /** * Custom ScriptPatternSelector builder. Use the {@link #newBuilder() builder factory method} to create this. */ public static
ScriptPatternSelector
java
micronaut-projects__micronaut-core
inject-java/src/test/groovy/io/micronaut/inject/generics/Vehicle.java
{ "start": 746, "end": 1294 }
class ____ { private final Engine<V8> engine; @Inject List<Engine<V6>> v6Engines; private Engine<V8> anotherV8; @Inject public Vehicle(Engine<V8> engine) {// <4> this.engine = engine; } public String start() { return engine.start();// <5> } @Inject public void setAnotherV8(Engine<V8> anotherV8) { this.anotherV8 = anotherV8; } public Engine<V8> getAnotherV8() { return anotherV8; } public Engine<V8> getEngine() { return engine; } }
Vehicle
java
quarkusio__quarkus
core/runtime/src/main/java/io/quarkus/runtime/graal/AwtImageIO.java
{ "start": 4179, "end": 4450 }
class ____ { public static void load() { throw new UnsupportedOperationException(AwtImageIO.AWT_EXTENSION_HINT); } } @TargetClass(className = "sun.awt.FontConfiguration", onlyWith = AwtImageIO.IsAWTAbsent.class) final
Target_sun_font_FontManagerNativeLibrary
java
alibaba__fastjson
src/test/java/com/alibaba/json/bvt/feature/FeaturesTest4.java
{ "start": 304, "end": 905 }
class ____ extends TestCase { public void test_0() throws Exception { SerializeConfig config = new SerializeConfig(); config.setAsmEnable(false); String text = JSON.toJSONString(new Entity(), config); Assert.assertEquals("{\"value\":\"\"}", text); } public void test_1() throws Exception { SerializeConfig config = new SerializeConfig(); config.setAsmEnable(true); String text = JSON.toJSONString(new Entity(), config); Assert.assertEquals("{\"value\":\"\"}", text); } public static
FeaturesTest4
java
spring-projects__spring-framework
spring-core/src/main/java/org/springframework/cglib/proxy/BridgeMethodResolver.java
{ "start": 1636, "end": 3025 }
class ____ { private final Map/* <Class, Set<Signature> */declToBridge; private final ClassLoader classLoader; public BridgeMethodResolver(Map declToBridge, ClassLoader classLoader) { this.declToBridge = declToBridge; this.classLoader = classLoader; } /** * Finds all bridge methods that are being called with invokespecial & * returns them. */ public Map/*<Signature, Signature>*/resolveAll() { Map resolved = new HashMap(); for (Iterator entryIter = declToBridge.entrySet().iterator(); entryIter.hasNext(); ) { Map.Entry entry = (Map.Entry) entryIter.next(); Class owner = (Class) entry.getKey(); Set bridges = (Set) entry.getValue(); try { InputStream is = classLoader.getResourceAsStream(owner.getName().replace('.', '/') + ".class"); if (is == null) { return resolved; } try { new ClassReader(is) .accept(new BridgedFinder(bridges, resolved), ClassReader.SKIP_FRAMES | ClassReader.SKIP_DEBUG); } finally { is.close(); } } catch (IOException ignored) {} } return resolved; } private static
BridgeMethodResolver
java
spring-projects__spring-security
cas/src/main/java/org/springframework/security/cas/authentication/CasServiceTicketAuthenticationToken.java
{ "start": 3892, "end": 4745 }
class ____<B extends Builder<B>> extends AbstractAuthenticationBuilder<B> { private String principal; private @Nullable Object credentials; protected Builder(CasServiceTicketAuthenticationToken token) { super(token); this.principal = token.identifier; this.credentials = token.credentials; } @Override public B principal(@Nullable Object principal) { Assert.isInstanceOf(String.class, principal, "principal must be of type String"); this.principal = (String) principal; return (B) this; } @Override public B credentials(@Nullable Object credentials) { Assert.notNull(credentials, "credentials cannot be null"); this.credentials = credentials; return (B) this; } @Override public CasServiceTicketAuthenticationToken build() { return new CasServiceTicketAuthenticationToken(this); } } }
Builder
java
apache__dubbo
dubbo-config/dubbo-config-spring/src/main/java/org/apache/dubbo/config/spring/context/annotation/EnableDubbo.java
{ "start": 2232, "end": 2855 }
class ____ will be * scanned. * * @return classes from the base packages to scan * @see DubboComponentScan#basePackageClasses */ @AliasFor(annotation = DubboComponentScan.class, attribute = "basePackageClasses") Class<?>[] scanBasePackageClasses() default {}; /** * It indicates whether {@link AbstractConfig} binding to multiple Spring Beans. * * @return the default value is <code>true</code> * @see EnableDubboConfig#multiple() */ @AliasFor(annotation = EnableDubboConfig.class, attribute = "multiple") boolean multipleConfig() default true; }
specified
java
google__error-prone
core/src/test/java/com/google/errorprone/bugpatterns/inlineme/SuggesterTest.java
{ "start": 12461, "end": 12632 }
class ____ { @Deprecated public NestedClass silly() { return new NestedClass(); } public static
Client
java
quarkusio__quarkus
extensions/amazon-lambda-rest/runtime/src/main/java/io/quarkus/amazon/lambda/http/DefaultLambdaAuthenticationRequest.java
{ "start": 295, "end": 584 }
class ____ extends BaseAuthenticationRequest { private AwsProxyRequest event; public DefaultLambdaAuthenticationRequest(AwsProxyRequest event) { this.event = event; } public AwsProxyRequest getEvent() { return event; } }
DefaultLambdaAuthenticationRequest
java
junit-team__junit5
jupiter-tests/src/test/java/org/junit/jupiter/params/provider/MethodArgumentsProviderTests.java
{ "start": 34805, "end": 34955 }
class ____ { void test() { } Stream<String> nonStaticStringStreamProvider() { return Stream.of("foo", "bar"); } } static
NonStaticTestCase
java
dropwizard__dropwizard
dropwizard-jersey/src/test/java/io/dropwizard/jersey/validation/FuzzyEnumParamConverterProviderTest.java
{ "start": 2407, "end": 2935 }
enum ____ { A("1"), B("2"); private final String code; ExplicitFromStringThrowsOtherException(String code) { this.code = code; } @SuppressWarnings("unused") public String getCode() { return this.code; } @SuppressWarnings("unused") public static ExplicitFromStringThrowsOtherException fromString(String str) { throw new RuntimeException("Boo!"); } } private
ExplicitFromStringThrowsOtherException
java
quarkusio__quarkus
extensions/hibernate-orm/deployment/src/main/java/io/quarkus/hibernate/orm/deployment/HibernateOrmDisabledProcessor.java
{ "start": 378, "end": 873 }
class ____ { @BuildStep @Record(ExecutionTime.RUNTIME_INIT) public void disableHibernateOrm(HibernateOrmDisabledRecorder recorder) { // The disabling itself is done through conditions on build steps (see uses of HibernateOrmEnabled.class) // We still want to check that nobody tries to set quarkus.hibernate-orm.active = true at runtime // if Hibernate ORM is disabled, though: recorder.checkNoExplicitActiveTrue(); } }
HibernateOrmDisabledProcessor
java
apache__dubbo
dubbo-common/src/main/java/org/apache/dubbo/common/cache/FileCacheStoreFactory.java
{ "start": 1824, "end": 7402 }
class ____ { /** * Forbids instantiation. */ private FileCacheStoreFactory() { throw new UnsupportedOperationException("No instance of 'FileCacheStoreFactory' for you! "); } private static final ErrorTypeAwareLogger logger = LoggerFactory.getErrorTypeAwareLogger(FileCacheStoreFactory.class); private static final ConcurrentMap<String, FileCacheStore> cacheMap = new ConcurrentHashMap<>(); private static final String SUFFIX = ".dubbo.cache"; private static final char ESCAPE_MARK = '%'; private static final Set<Character> LEGAL_CHARACTERS = Collections.unmodifiableSet(new HashSet<Character>() { { // - $ . _ 0-9 a-z A-Z add('-'); add('$'); add('.'); add('_'); for (char c = '0'; c <= '9'; c++) { add(c); } for (char c = 'a'; c <= 'z'; c++) { add(c); } for (char c = 'A'; c <= 'Z'; c++) { add(c); } } }); public static FileCacheStore getInstance(String basePath, String cacheName) { return getInstance(basePath, cacheName, true); } public static FileCacheStore getInstance(String basePath, String cacheName, boolean enableFileCache) { if (basePath == null) { // default case: ~/.dubbo basePath = SystemPropertyConfigUtils.getSystemProperty(USER_HOME) + File.separator + ".dubbo"; } if (basePath.endsWith(File.separator)) { basePath = basePath.substring(0, basePath.length() - 1); } File candidate = new File(basePath); Path path = candidate.toPath(); // ensure cache store path exists if (!candidate.isDirectory()) { try { Files.createDirectories(path); } catch (IOException e) { // 0-3 - cache path inaccessible logger.error( COMMON_CACHE_PATH_INACCESSIBLE, "inaccessible of cache path", "", "Cache store path can't be created: ", e); throw new RuntimeException("Cache store path can't be created: " + candidate, e); } } cacheName = safeName(cacheName); if (!cacheName.endsWith(SUFFIX)) { cacheName = cacheName + SUFFIX; } String cacheFilePath = basePath + File.separator + cacheName; return ConcurrentHashMapUtils.computeIfAbsent(cacheMap, cacheFilePath, k -> getFile(k, enableFileCache)); } /** * sanitize a name for valid file or directory name * * @param name origin file name * @return sanitized version of name */ private static String safeName(String name) { int len = name.length(); StringBuilder sb = new StringBuilder(len); for (int i = 0; i < len; i++) { char c = name.charAt(i); if (LEGAL_CHARACTERS.contains(c)) { sb.append(c); } else { sb.append(ESCAPE_MARK); sb.append(String.format("%04x", (int) c)); } } return sb.toString(); } /** * Get a file object for the given name * * @param name the file name * @return a file object */ private static FileCacheStore getFile(String name, boolean enableFileCache) { if (!enableFileCache) { return FileCacheStore.Empty.getInstance(name); } try { FileCacheStore.Builder builder = FileCacheStore.newBuilder(); tryFileLock(builder, name); File file = new File(name); if (!file.exists()) { Path pathObjectOfFile = file.toPath(); Files.createFile(pathObjectOfFile); } builder.cacheFilePath(name).cacheFile(file); return builder.build(); } catch (Throwable t) { logger.warn( COMMON_CACHE_PATH_INACCESSIBLE, "inaccessible of cache path", "", "Failed to create file store cache. Local file cache will be disabled. Cache file name: " + name, t); return FileCacheStore.Empty.getInstance(name); } } private static void tryFileLock(FileCacheStore.Builder builder, String fileName) throws PathNotExclusiveException { File lockFile = new File(fileName + ".lock"); FileLock dirLock; try { lockFile.createNewFile(); if (!lockFile.exists()) { throw new AssertionError("Failed to create lock file " + lockFile); } FileChannel lockFileChannel = new RandomAccessFile(lockFile, "rw").getChannel(); dirLock = lockFileChannel.tryLock(); } catch (OverlappingFileLockException ofle) { dirLock = null; } catch (IOException ioe) { throw new RuntimeException(ioe); } if (dirLock == null) { throw new PathNotExclusiveException( fileName + " is not exclusive. Maybe multiple Dubbo instances are using the same folder."); } lockFile.deleteOnExit(); builder.directoryLock(dirLock).lockFile(lockFile); } static void removeCache(String cacheFileName) { cacheMap.remove(cacheFileName); } private static
FileCacheStoreFactory
java
apache__maven
its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng3680InvalidDependencyPOMTest.java
{ "start": 1113, "end": 2282 }
class ____ extends AbstractMavenIntegrationTestCase { /** * Verify that dependencies with invalid POMs can still be used without failing the build. * * @throws Exception in case of failure */ @Test public void testitMNG3680() throws Exception { File testDir = extractResources("/mng-3680"); Verifier verifier = newVerifier(testDir.getAbsolutePath()); verifier.setAutoclean(false); verifier.deleteDirectory("target"); verifier.deleteArtifacts("org.apache.maven.its.mng3680"); verifier.filterFile("settings-template.xml", "settings.xml"); verifier.addCliArgument("--settings"); verifier.addCliArgument("settings.xml"); verifier.addCliArgument("validate"); verifier.execute(); verifier.verifyErrorFreeLog(); List<String> artifacts = verifier.loadLines("target/artifacts.txt"); assertTrue(artifacts.contains("org.apache.maven.its.mng3680:direct:jar:0.1"), artifacts.toString()); assertTrue(artifacts.contains("org.apache.maven.its.mng3680:transitive:jar:0.1"), artifacts.toString()); } }
MavenITmng3680InvalidDependencyPOMTest
java
apache__flink
flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/adaptivebatch/AllToAllBlockingResultInfoTest.java
{ "start": 1272, "end": 6220 }
class ____ { @Test void testGetNumBytesProducedForNonBroadcast() { testGetNumBytesProduced(false, false, 192L); } @Test void testGetNumBytesProducedForBroadcast() { testGetNumBytesProduced(true, true, 96L); testGetNumBytesProduced(true, false, 192L); } @Test void testGetNumBytesProducedWithIndexRange() { AllToAllBlockingResultInfo resultInfo = new AllToAllBlockingResultInfo(new IntermediateDataSetID(), 2, 2, false, false); resultInfo.recordPartitionInfo(0, new ResultPartitionBytes(new long[] {32L, 64L})); resultInfo.recordPartitionInfo(1, new ResultPartitionBytes(new long[] {128L, 256L})); IndexRange partitionIndexRange = new IndexRange(0, 1); IndexRange subpartitionIndexRange = new IndexRange(0, 0); assertThat(resultInfo.getNumBytesProduced(partitionIndexRange, subpartitionIndexRange)) .isEqualTo(160L); } @Test void testGetAggregatedSubpartitionBytes() { AllToAllBlockingResultInfo resultInfo = new AllToAllBlockingResultInfo(new IntermediateDataSetID(), 2, 2, false, false); resultInfo.recordPartitionInfo(0, new ResultPartitionBytes(new long[] {32L, 64L})); resultInfo.recordPartitionInfo(1, new ResultPartitionBytes(new long[] {128L, 256L})); assertThat(resultInfo.getAggregatedSubpartitionBytes()).containsExactly(160L, 320L); } @Test void testGetBytesWithPartialPartitionInfos() { AllToAllBlockingResultInfo resultInfo = new AllToAllBlockingResultInfo(new IntermediateDataSetID(), 2, 2, false, false); resultInfo.recordPartitionInfo(0, new ResultPartitionBytes(new long[] {32L, 64L})); resultInfo.onFineGrainedSubpartitionBytesNotNeeded(); assertThatThrownBy(resultInfo::getNumBytesProduced) .isInstanceOf(IllegalStateException.class); assertThatThrownBy(resultInfo::getAggregatedSubpartitionBytes) .isInstanceOf(IllegalStateException.class); } @Test void testRecordPartitionInfoMultiTimes() { AllToAllBlockingResultInfo resultInfo = new AllToAllBlockingResultInfo(new IntermediateDataSetID(), 2, 2, false, false); ResultPartitionBytes partitionBytes1 = new ResultPartitionBytes(new long[] {32L, 64L}); ResultPartitionBytes partitionBytes2 = new ResultPartitionBytes(new long[] {64L, 128L}); ResultPartitionBytes partitionBytes3 = new ResultPartitionBytes(new long[] {128L, 256L}); ResultPartitionBytes partitionBytes4 = new ResultPartitionBytes(new long[] {256L, 512L}); // record partitionBytes1 for subtask 0 and then reset it resultInfo.recordPartitionInfo(0, partitionBytes1); assertThat(resultInfo.getNumOfRecordedPartitions()).isOne(); resultInfo.resetPartitionInfo(0); assertThat(resultInfo.getNumOfRecordedPartitions()).isZero(); // record partitionBytes2 for subtask 0 and record partitionBytes3 for subtask 1 resultInfo.recordPartitionInfo(0, partitionBytes2); resultInfo.recordPartitionInfo(1, partitionBytes3); // The result info should be (partitionBytes2 + partitionBytes3) assertThat(resultInfo.getNumBytesProduced()).isEqualTo(576L); assertThat(resultInfo.getAggregatedSubpartitionBytes()).containsExactly(192L, 384L); // The raw info should not be clear assertThat(resultInfo.getNumOfRecordedPartitions()).isGreaterThan(0); resultInfo.onFineGrainedSubpartitionBytesNotNeeded(); // The raw info should be clear assertThat(resultInfo.getNumOfRecordedPartitions()).isZero(); // reset subtask 0 and then record partitionBytes4 for subtask 0 resultInfo.resetPartitionInfo(0); resultInfo.recordPartitionInfo(0, partitionBytes4); // The result info should still be (partitionBytes2 + partitionBytes3) assertThat(resultInfo.getNumBytesProduced()).isEqualTo(576L); assertThat(resultInfo.getAggregatedSubpartitionBytes()).containsExactly(192L, 384L); assertThat(resultInfo.getNumOfRecordedPartitions()).isZero(); } private void testGetNumBytesProduced( boolean isBroadcast, boolean singleSubpartitionContainsAllData, long expectedBytes) { AllToAllBlockingResultInfo resultInfo = new AllToAllBlockingResultInfo( new IntermediateDataSetID(), 2, 2, isBroadcast, singleSubpartitionContainsAllData); resultInfo.recordPartitionInfo(0, new ResultPartitionBytes(new long[] {32L, 32L})); resultInfo.recordPartitionInfo(1, new ResultPartitionBytes(new long[] {64L, 64L})); assertThat(resultInfo.getNumBytesProduced()).isEqualTo(expectedBytes); } }
AllToAllBlockingResultInfoTest
java
google__truth
extensions/re2j/src/test/java/com/google/common/truth/extensions/re2j/Re2jSubjectsTest.java
{ "start": 979, "end": 2257 }
class ____ { private static final String PATTERN_STR = "(?:hello )+world"; private static final Pattern PATTERN = Pattern.compile(PATTERN_STR); @Test public void matches_string_succeeds() { assertAbout(re2jString()).that("hello world").matches(PATTERN_STR); } @Test public void matches_pattern_succeeds() { assertAbout(re2jString()).that("hello world").matches(PATTERN); } @Test public void doesNotMatch_string_succeeds() { assertAbout(re2jString()).that("world").doesNotMatch(PATTERN_STR); } @Test public void doesNotMatch_pattern_succeeds() { assertAbout(re2jString()).that("world").doesNotMatch(PATTERN); } @Test public void containsMatch_string_succeeds() { assertAbout(re2jString()).that("this is a hello world").containsMatch(PATTERN_STR); } @Test public void containsMatch_pattern_succeeds() { assertAbout(re2jString()).that("this is a hello world").containsMatch(PATTERN); } @Test public void doesNotContainMatch_string_succeeds() { assertAbout(re2jString()).that("hello cruel world").doesNotContainMatch(PATTERN_STR); } @Test public void doesNotContainMatch_pattern_succeeds() { assertAbout(re2jString()).that("hello cruel world").doesNotContainMatch(PATTERN); } }
Re2jSubjectsTest
java
elastic__elasticsearch
x-pack/plugin/migrate/src/test/java/org/elasticsearch/system_indices/task/SystemIndexMigrationTaskStateXContentTests.java
{ "start": 539, "end": 1592 }
class ____ extends AbstractXContentTestCase<SystemIndexMigrationTaskState> { @Override protected SystemIndexMigrationTaskState createTestInstance() { return SystemIndexMigrationTaskStateTests.randomSystemIndexMigrationTask(); } @Override protected SystemIndexMigrationTaskState doParseInstance(XContentParser parser) throws IOException { return SystemIndexMigrationTaskState.PARSER.parse(parser, null); } @Override protected boolean supportsUnknownFields() { return true; } @Override protected Predicate<String> getRandomFieldsExcludeFilter() { // featureCallbackMetadata is a Map<String,Object> so adding random fields there make no sense return p -> p.startsWith(SystemIndexMigrationTaskState.FEATURE_METADATA_MAP_FIELD.getPreferredName()); } @Override protected NamedXContentRegistry xContentRegistry() { return new NamedXContentRegistry(SystemIndexMigrationExecutor.getNamedXContentParsers()); } }
SystemIndexMigrationTaskStateXContentTests
java
spring-projects__spring-framework
spring-jms/src/main/java/org/springframework/jms/listener/AbstractJmsListeningContainer.java
{ "start": 2060, "end": 2690 }
class ____ not assume any specific listener programming model * or listener invoker mechanism. It just provides the general runtime * lifecycle management needed for any kind of JMS-based listening mechanism * that operates on a JMS Connection/Session. * * <p>For a concrete listener programming model, check out the * {@link AbstractMessageListenerContainer} subclass. For a concrete listener * invoker mechanism, check out the {@link DefaultMessageListenerContainer} class. * * @author Juergen Hoeller * @since 2.0.3 * @see #sharedConnectionEnabled() * @see #doInitialize() * @see #doShutdown() */ public abstract
does
java
alibaba__nacos
naming/src/main/java/com/alibaba/nacos/naming/push/v2/task/FuzzyWatchSyncNotifyTask.java
{ "start": 1046, "end": 3582 }
class ____ extends AbstractDelayTask { private final String clientId; private final String pattern; private final Set<NamingFuzzyWatchSyncRequest.Context> syncServiceKeys; private final String syncType; private int totalBatch = 1; private int currentBatch = 1; private BatchTaskCounter batchTaskCounter; private long executeStartTime = System.currentTimeMillis(); public FuzzyWatchSyncNotifyTask(String clientId, String pattern, String syncType, Set<NamingFuzzyWatchSyncRequest.Context> syncServiceKeys, long delay) { this.clientId = clientId; this.pattern = pattern; this.syncType = syncType; if (syncServiceKeys != null) { this.syncServiceKeys = syncServiceKeys; } else { this.syncServiceKeys = new HashSet<>(); } setTaskInterval(delay); setLastProcessTime(System.currentTimeMillis()); } public int getTotalBatch() { return totalBatch; } public void setTotalBatch(int totalBatch) { this.totalBatch = totalBatch; } public int getCurrentBatch() { return currentBatch; } public void setCurrentBatch(int currentBatch) { this.currentBatch = currentBatch; } @Override public void merge(AbstractDelayTask task) { if (!(task instanceof FuzzyWatchSyncNotifyTask)) { return; } FuzzyWatchSyncNotifyTask oldTask = (FuzzyWatchSyncNotifyTask) task; if (oldTask.getSyncServiceKeys() != null) { syncServiceKeys.addAll(oldTask.getSyncServiceKeys()); } setLastProcessTime(Math.min(getLastProcessTime(), task.getLastProcessTime())); Loggers.PUSH.info("[FUZZY-WATCH-INIT-PUSH] Task merge for pattern {}", pattern); } public String getPattern() { return pattern; } public Set<NamingFuzzyWatchSyncRequest.Context> getSyncServiceKeys() { return syncServiceKeys; } public String getSyncType() { return syncType; } public String getClientId() { return clientId; } public BatchTaskCounter getBatchTaskCounter() { return batchTaskCounter; } public void setBatchTaskCounter(BatchTaskCounter batchTaskCounter) { this.batchTaskCounter = batchTaskCounter; } public long getExecuteStartTime() { return executeStartTime; } }
FuzzyWatchSyncNotifyTask
java
spring-projects__spring-security
oauth2/oauth2-core/src/main/java/org/springframework/security/oauth2/core/converter/ObjectToURLConverter.java
{ "start": 958, "end": 1485 }
class ____ implements GenericConverter { @Override public Set<ConvertiblePair> getConvertibleTypes() { return Collections.singleton(new ConvertiblePair(Object.class, URL.class)); } @Override public Object convert(Object source, TypeDescriptor sourceType, TypeDescriptor targetType) { if (source == null) { return null; } if (source instanceof URL) { return source; } try { return new URI(source.toString()).toURL(); } catch (Exception ex) { // Ignore } return null; } }
ObjectToURLConverter
java
alibaba__nacos
api/src/test/java/com/alibaba/nacos/api/config/remote/response/ConfigChangeNotifyResponseTest.java
{ "start": 934, "end": 1839 }
class ____ extends BasedConfigResponseTest { ConfigChangeNotifyResponse configChangeNotifyResponse; @BeforeEach void before() { configChangeNotifyResponse = new ConfigChangeNotifyResponse(); requestId = injectResponseUuId(configChangeNotifyResponse); } @Override @Test public void testSerializeSuccessResponse() throws JsonProcessingException { String json = mapper.writeValueAsString(configChangeNotifyResponse); assertTrue(json.contains("\"success\":" + Boolean.TRUE)); assertTrue(json.contains("\"requestId\":\"" + requestId)); assertTrue(json.contains("\"resultCode\":" + ResponseCode.SUCCESS.getCode())); assertTrue(json.contains("\"errorCode\":0")); } @Override public void testSerializeFailResponse() throws JsonProcessingException { } }
ConfigChangeNotifyResponseTest
java
alibaba__druid
core/src/test/java/com/alibaba/druid/bvt/sql/mysql/MySqlUseTest_0.java
{ "start": 924, "end": 1738 }
class ____ extends MysqlTest { public void test_0() throws Exception { String sql = "use db;"; MySqlStatementParser parser = new MySqlStatementParser(sql); List<SQLStatement> statementList = parser.parseStatementList(); SQLStatement stmt = statementList.get(0); // print(statementList); assertEquals(1, statementList.size()); MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor(); stmt.accept(visitor); assertEquals(0, visitor.getTables().size()); assertEquals(0, visitor.getColumns().size()); assertEquals(0, visitor.getConditions().size()); assertEquals(0, visitor.getOrderByColumns().size()); // assertTrue(visitor.getTables().containsKey(new TableStat.Name("mytable"))); } }
MySqlUseTest_0
java
lettuce-io__lettuce-core
src/main/java/io/lettuce/core/search/arguments/AggregateArgs.java
{ "start": 32690, "end": 36288 }
class ____<K, V> { private final String function; private final List<V> args; private Optional<K> alias = Optional.empty(); public Reducer(String function, List<V> args) { this.function = function; this.args = new ArrayList<>(args); } public Reducer<K, V> as(K alias) { this.alias = Optional.of(alias); return this; } /** * Static factory method to create a COUNT reducer. * * @param <K> Key type * @param <V> Value type * @return new COUNT Reducer instance */ public static <K, V> Reducer<K, V> count() { return new Reducer<>("COUNT", Collections.emptyList()); } /** * Static factory method to create a SUM reducer. * * @param field the field to sum * @param <K> Key type * @param <V> Value type * @return new SUM Reducer instance */ public static <K, V> Reducer<K, V> sum(V field) { return new Reducer<>("SUM", Collections.singletonList(field)); } /** * Static factory method to create an AVG reducer. * * @param field the field to average * @param <K> Key type * @param <V> Value type * @return new AVG Reducer instance */ public static <K, V> Reducer<K, V> avg(V field) { return new Reducer<>("AVG", Collections.singletonList(field)); } /** * Static factory method to create a MIN reducer. * * @param field the field to find minimum * @param <K> Key type * @param <V> Value type * @return new MIN Reducer instance */ public static <K, V> Reducer<K, V> min(V field) { return new Reducer<>("MIN", Collections.singletonList(field)); } /** * Static factory method to create a MAX reducer. * * @param field the field to find maximum * @param <K> Key type * @param <V> Value type * @return new MAX Reducer instance */ public static <K, V> Reducer<K, V> max(V field) { return new Reducer<>("MAX", Collections.singletonList(field)); } /** * Static factory method to create a COUNT_DISTINCT reducer. * * @param field the field to count distinct values * @param <K> Key type * @param <V> Value type * @return new COUNT_DISTINCT Reducer instance */ public static <K, V> Reducer<K, V> countDistinct(V field) { return new Reducer<>("COUNT_DISTINCT", Collections.singletonList(field)); } public void build(CommandArgs<K, V> args) { args.add(CommandKeyword.REDUCE); args.add(function); args.add(this.args.size()); for (V arg : this.args) { args.addValue(arg); } alias.ifPresent(a -> { args.add(CommandKeyword.AS); args.add(a.toString()); }); } } /** * Represents a FILTER clause in an aggregation pipeline. * * <p> * Filters the results using predicate expressions relating to values in each result. Filters are applied after the query * and relate to the current state of the pipeline. This allows filtering on computed fields created by APPLY operations or * reducer results. * </p> */ public static
Reducer
java
hibernate__hibernate-orm
hibernate-core/src/main/java/org/hibernate/boot/internal/InFlightMetadataCollectorImpl.java
{ "start": 44056, "end": 44279 }
class ____ both '@Entity' and '@Embeddable': '" + clazz.getName() + "'" ); } else if ( clazz.hasDirectAnnotationUsage( jakarta.persistence.MappedSuperclass.class ) ) { throw new AnnotationException( "Invalid
annotated
java
google__guava
android/guava/src/com/google/common/collect/AbstractBiMap.java
{ "start": 12052, "end": 13782 }
class ____<K extends @Nullable Object, V extends @Nullable Object> extends AbstractBiMap<K, V> { Inverse(Map<K, V> backward, AbstractBiMap<V, K> forward) { super(backward, forward); } /* * Serialization stores the forward bimap, the inverse of this inverse. * Deserialization calls inverse() on the forward bimap and returns that * inverse. * * If a bimap and its inverse are serialized together, the deserialized * instances have inverse() methods that return the other. */ @Override @ParametricNullness K checkKey(@ParametricNullness K key) { return super.inverse.checkValue(key); } @Override @ParametricNullness V checkValue(@ParametricNullness V value) { return super.inverse.checkKey(value); } /** * @serialData the forward bimap */ @GwtIncompatible @J2ktIncompatible private void writeObject(ObjectOutputStream stream) throws IOException { stream.defaultWriteObject(); stream.writeObject(inverse()); } @GwtIncompatible @J2ktIncompatible @SuppressWarnings("unchecked") // reading data stored by writeObject private void readObject(ObjectInputStream stream) throws IOException, ClassNotFoundException { stream.defaultReadObject(); setInverse((AbstractBiMap<V, K>) requireNonNull(stream.readObject())); } @GwtIncompatible // Not needed in the emulated source. @J2ktIncompatible Object readResolve() { return inverse().inverse(); } @GwtIncompatible @J2ktIncompatible private static final long serialVersionUID = 0; } @GwtIncompatible @J2ktIncompatible private static final long serialVersionUID = 0; }
Inverse
java
spring-projects__spring-boot
module/spring-boot-security/src/test/java/org/springframework/boot/security/autoconfigure/web/servlet/ServletWebSecurityAutoConfigurationTests.java
{ "start": 9670, "end": 10053 }
class ____ { @Bean @ConfigurationPropertiesBinding static Converter<String, TargetType> targetTypeConverter() { return new Converter<>() { @Override public TargetType convert(String input) { return new TargetType(); } }; } } @Configuration(proxyBeanMethods = false) @EnableConfigurationProperties(JwtProperties.class) static
ConverterConfiguration
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/discriminator/CaseStatementWithTypeTest.java
{ "start": 6405, "end": 6709 }
class ____ extends JoinedDiscParent { public JoinedDiscChildB() { } public JoinedDiscChildB(Long id) { super( id ); } } @SuppressWarnings({"FieldCanBeLocal", "unused"}) @Entity( name = "UnionParent" ) @Inheritance( strategy = InheritanceType.TABLE_PER_CLASS ) public static
JoinedDiscChildB
java
alibaba__fastjson
src/test/java/com/alibaba/json/bvt/serializer/SerilaizeFilterTest.java
{ "start": 558, "end": 871 }
class ____ implements ValueFilter { public Object process(Object object, String name, Object value) { if (name.equals("id")) { return 123; } return null; } } @JSONType(serialzeFilters = MyValueFilter.class) public static
MyValueFilter
java
spring-projects__spring-framework
spring-web/src/main/java/org/springframework/web/client/RestClientResponseException.java
{ "start": 1307, "end": 1425 }
class ____ exceptions that contain actual HTTP response data. * * @author Rossen Stoyanchev * @since 4.3 */ public
for
java
apache__flink
flink-core/src/test/java/org/apache/flink/api/common/typeutils/ClassRelocator.java
{ "start": 1993, "end": 2976 }
interface ____ { String value(); } @SuppressWarnings("unchecked") public static <T> Class<? extends T> relocate(Class<?> originalClass) { ClassRegistry remapping = new ClassRegistry(originalClass); ClassRenamer classRenamer = new ClassRenamer(remapping); Map<String, byte[]> newClassBytes = classRenamer.remap(); return (Class<? extends T>) patchClass(newClassBytes, remapping); } private static Class<?> patchClass(Map<String, byte[]> newClasses, ClassRegistry remapping) { final ByteClassLoader renamingClassLoader = new ByteClassLoader(remapping.getRoot().getClassLoader(), newClasses); try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(renamingClassLoader)) { return renamingClassLoader.loadClass(remapping.getRootNewName()); } catch (Exception e) { throw new RuntimeException(e); } } private static final
RelocateClass
java
apache__camel
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/NatsEndpointBuilderFactory.java
{ "start": 99105, "end": 99412 }
class ____ extends AbstractEndpointBuilder implements NatsEndpointBuilder, AdvancedNatsEndpointBuilder { public NatsEndpointBuilderImpl(String path) { super(componentName, path); } } return new NatsEndpointBuilderImpl(path); } }
NatsEndpointBuilderImpl
java
quarkusio__quarkus
integration-tests/kubernetes/quarkus-standard-way/src/test/java/io/quarkus/it/kubernetes/OpenshiftWithRouteSecuredTest.java
{ "start": 673, "end": 2500 }
class ____ { private static final String APP_NAME = "openshift-with-route-secured"; @RegisterExtension static final QuarkusProdModeTest config = new QuarkusProdModeTest() .withApplicationRoot((jar) -> jar.addClasses(GreetingResource.class)) .setApplicationName(APP_NAME) .setApplicationVersion("0.1-SNAPSHOT") .withConfigurationResource(APP_NAME + ".properties"); @ProdBuildResults private ProdModeTestResults prodModeTestResults; @Test public void assertGeneratedResources() throws IOException { Path kubernetesDir = prodModeTestResults.getBuildDir().resolve("kubernetes"); assertThat(kubernetesDir) .isDirectoryContaining(p -> p.getFileName().endsWith("openshift.json")) .isDirectoryContaining(p -> p.getFileName().endsWith("openshift.yml")); List<HasMetadata> openshiftList = DeserializationUtil .deserializeAsList(kubernetesDir.resolve("openshift.yml")); assertThat(openshiftList).filteredOn(i -> "Route".equals(i.getKind())).singleElement().satisfies(i -> { assertThat(i).isInstanceOfSatisfying(Route.class, r -> { TLSConfig tls = r.getSpec().getTls(); assertNotNull(tls, "TLS configuration was not created!"); assertEquals("THE CERTIFICATE", tls.getCertificate()); assertEquals("THE CA CERTIFICATE", tls.getCaCertificate()); assertEquals("Redirect", tls.getInsecureEdgeTerminationPolicy()); assertEquals("THE KEY", tls.getKey()); assertEquals("reencrypt", tls.getTermination()); assertEquals("THE DESTINATION CERTIFICATE", tls.getDestinationCACertificate()); }); }); } }
OpenshiftWithRouteSecuredTest
java
apache__flink
flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/stream/PartitionTimeCommitTrigger.java
{ "start": 2036, "end": 6267 }
class ____ implements PartitionCommitTrigger { private static final ListStateDescriptor<List<String>> PENDING_PARTITIONS_STATE_DESC = new ListStateDescriptor<>( "pending-partitions", new ListSerializer<>(StringSerializer.INSTANCE)); private static final ListStateDescriptor<Map<Long, Long>> WATERMARKS_STATE_DESC = new ListStateDescriptor<>( "checkpoint-id-to-watermark", new MapSerializer<>(LongSerializer.INSTANCE, LongSerializer.INSTANCE)); private final ListState<List<String>> pendingPartitionsState; private final Set<String> pendingPartitions; private final ListState<Map<Long, Long>> watermarksState; private final TreeMap<Long, Long> watermarks; private final PartitionCommitPredicate partitionCommitPredicate; public PartitionTimeCommitTrigger( boolean isRestored, OperatorStateStore stateStore, PartitionCommitPredicate partitionCommitPredicate) throws Exception { this.pendingPartitionsState = stateStore.getListState(PENDING_PARTITIONS_STATE_DESC); this.pendingPartitions = new HashSet<>(); if (isRestored) { pendingPartitions.addAll(pendingPartitionsState.get().iterator().next()); } this.partitionCommitPredicate = partitionCommitPredicate; this.watermarksState = stateStore.getListState(WATERMARKS_STATE_DESC); this.watermarks = new TreeMap<>(); if (isRestored) { watermarks.putAll(watermarksState.get().iterator().next()); } } @Override public void addPartition(String partition) { if (!StringUtils.isNullOrWhitespaceOnly(partition)) { this.pendingPartitions.add(partition); } } @Override public List<String> committablePartitions(long checkpointId) { if (!watermarks.containsKey(checkpointId)) { throw new IllegalArgumentException( String.format( "Checkpoint(%d) has not been snapshot. The watermark information is: %s.", checkpointId, watermarks)); } long watermark = watermarks.get(checkpointId); watermarks.headMap(checkpointId, true).clear(); List<String> needCommit = new ArrayList<>(); Iterator<String> iter = pendingPartitions.iterator(); while (iter.hasNext()) { String partition = iter.next(); PredicateContext predicateContext = createPredicateContext(partition, watermark); if (partitionCommitPredicate.isPartitionCommittable(predicateContext)) { needCommit.add(partition); iter.remove(); } } return needCommit; } private PredicateContext createPredicateContext(String partition, long watermark) { return new PredicateContext() { @Override public String partition() { return partition; } @Override public long createProcTime() { throw new UnsupportedOperationException( "Method createProcTime isn't supported in PartitionTimeCommitTrigger."); } @Override public long currentProcTime() { throw new UnsupportedOperationException( "Method currentProcTime isn't supported in PartitionTimeCommitTrigger."); } @Override public long currentWatermark() { return watermark; } }; } @Override public void snapshotState(long checkpointId, long watermark) throws Exception { pendingPartitionsState.update( Collections.singletonList(new ArrayList<>(pendingPartitions))); watermarks.put(checkpointId, watermark); watermarksState.update(Collections.singletonList(new HashMap<>(watermarks))); } @Override public List<String> endInput() { ArrayList<String> partitions = new ArrayList<>(pendingPartitions); pendingPartitions.clear(); return partitions; } }
PartitionTimeCommitTrigger
java
spring-projects__spring-framework
spring-beans/src/main/java/org/springframework/beans/propertyeditors/InputStreamEditor.java
{ "start": 1576, "end": 2749 }
class ____ extends PropertyEditorSupport { private final ResourceEditor resourceEditor; /** * Create a new InputStreamEditor, using the default ResourceEditor underneath. */ public InputStreamEditor() { this.resourceEditor = new ResourceEditor(); } /** * Create a new InputStreamEditor, using the given ResourceEditor underneath. * @param resourceEditor the ResourceEditor to use */ public InputStreamEditor(ResourceEditor resourceEditor) { Assert.notNull(resourceEditor, "ResourceEditor must not be null"); this.resourceEditor = resourceEditor; } @Override public void setAsText(String text) throws IllegalArgumentException { this.resourceEditor.setAsText(text); Resource resource = (Resource) this.resourceEditor.getValue(); try { setValue(resource != null ? resource.getInputStream() : null); } catch (IOException ex) { throw new IllegalArgumentException("Failed to retrieve InputStream for " + resource, ex); } } /** * This implementation returns {@code null} to indicate that * there is no appropriate text representation. */ @Override public @Nullable String getAsText() { return null; } }
InputStreamEditor
java
apache__camel
core/camel-core/src/test/java/org/apache/camel/processor/RecipientListWithSimpleExpressionTest.java
{ "start": 1197, "end": 2547 }
class ____ extends ContextTestSupport { @Override public boolean isUseRouteBuilder() { return false; } @Test public void testRecipientList() throws Exception { context.addRoutes(new RouteBuilder() { @Override public void configure() { from("direct:start").recipientList(simple("mock:${in.header.queue}")); } }); context.start(); template.start(); for (int i = 0; i < 10; i++) { getMockEndpoint("mock:" + i).expectedMessageCount(50); } // use concurrent producers to send a lot of messages ExecutorService executors = Executors.newFixedThreadPool(10); for (int i = 0; i < 50; i++) { executors.execute(new Runnable() { public void run() { for (int i = 0; i < 10; i++) { try { template.sendBodyAndHeader("direct:start", "Hello " + i, "queue", i); Thread.sleep(5); } catch (Exception e) { // ignore } } } }); } assertMockEndpointsSatisfied(); executors.shutdownNow(); } }
RecipientListWithSimpleExpressionTest
java
apache__flink
flink-clients/src/main/java/org/apache/flink/client/program/PackagedProgram.java
{ "start": 5979, "end": 6543 }
class ____.mainClass = loadMainClass( // if no entryPointClassName name was given, we try and look one up through // the manifest entryPointClassName != null ? entryPointClassName : getEntryPointClassNameFromJar(this.jarFile), userCodeClassLoader); if (!hasMainMethod(mainClass)) { throw new ProgramInvocationException( "The given program
this
java
spring-projects__spring-framework
spring-web/src/main/java/org/springframework/web/service/invoker/HttpExchangeAdapterDecorator.java
{ "start": 1019, "end": 2336 }
class ____ implements HttpExchangeAdapter { private final HttpExchangeAdapter delegate; public HttpExchangeAdapterDecorator(HttpExchangeAdapter delegate) { this.delegate = delegate; } /** * Return the wrapped delgate {@code HttpExchangeAdapter}. */ public HttpExchangeAdapter getHttpExchangeAdapter() { return this.delegate; } @Override public boolean supportsRequestAttributes() { return this.delegate.supportsRequestAttributes(); } @Override public void exchange(HttpRequestValues requestValues) { this.delegate.exchange(requestValues); } @Override public HttpHeaders exchangeForHeaders(HttpRequestValues requestValues) { return this.delegate.exchangeForHeaders(requestValues); } @Override public <T> @Nullable T exchangeForBody(HttpRequestValues requestValues, ParameterizedTypeReference<T> bodyType) { return this.delegate.exchangeForBody(requestValues, bodyType); } @Override public ResponseEntity<Void> exchangeForBodilessEntity(HttpRequestValues requestValues) { return this.delegate.exchangeForBodilessEntity(requestValues); } @Override public <T> ResponseEntity<T> exchangeForEntity(HttpRequestValues requestValues, ParameterizedTypeReference<T> bodyType) { return this.delegate.exchangeForEntity(requestValues, bodyType); } }
HttpExchangeAdapterDecorator
java
hibernate__hibernate-orm
hibernate-core/src/test/java/org/hibernate/orm/test/schemaupdate/MySQLLobSchemaCreationTest.java
{ "start": 1182, "end": 3010 }
class ____ { private File output; private StandardServiceRegistry ssr; private MetadataImplementor metadata; @BeforeEach public void setUp() throws Exception { output = File.createTempFile( "update_script", ".sql" ); output.deleteOnExit(); ssr = ServiceRegistryUtil.serviceRegistry(); } @AfterEach public void tearsDown() { output.delete(); dropSchema( TestEntity.class ); StandardServiceRegistryBuilder.destroy( ssr ); } @Test public void testSchemaCreation() throws Exception { createSchema( TestEntity.class ); String fileContent = new String( Files.readAllBytes( output.toPath() ) ).toLowerCase() .replace( System.lineSeparator(), "" ); assertThat( fileContent ).contains( "lobfield longtext" ); } private void createSchema(Class... annotatedClasses) { final MetadataSources metadataSources = new MetadataSources( ssr ); for ( Class c : annotatedClasses ) { metadataSources.addAnnotatedClass( c ); } metadata = (MetadataImplementor) metadataSources.buildMetadata(); metadata.orderColumns( false ); metadata.validate(); new SchemaExport() .setHaltOnError( true ) .setOutputFile( output.getAbsolutePath() ) .setFormat( false ) .create( EnumSet.of( TargetType.SCRIPT, TargetType.DATABASE ), metadata ); } private void dropSchema(Class... annotatedClasses) { final MetadataSources metadataSources = new MetadataSources( ssr ); for ( Class c : annotatedClasses ) { metadataSources.addAnnotatedClass( c ); } metadata = (MetadataImplementor) metadataSources.buildMetadata(); metadata.orderColumns( false ); metadata.validate(); new SchemaExport() .setHaltOnError( false ) .setFormat( false ) .drop( EnumSet.of( TargetType.DATABASE ), metadata ); } @Entity @Table(name = "TestEntity") public static
MySQLLobSchemaCreationTest
java
micronaut-projects__micronaut-core
test-suite/src/test/java/io/micronaut/docs/aop/lifecycle/ProductService.java
{ "start": 166, "end": 623 }
class ____ { private final Map<String, Product> products = new HashMap<>(); void addProduct(Product product) { products.put(product.getProductName(), product); } void removeProduct(Product product) { product.setActive(false); products.remove(product.getProductName()); } Optional<Product> findProduct(String name) { return Optional.ofNullable(products.get(name)); } } // end::class[]
ProductService
java
resilience4j__resilience4j
resilience4j-framework-common/src/main/java/io/github/resilience4j/common/micrometer/monitoring/endpoint/TimerEndpointResponse.java
{ "start": 756, "end": 1141 }
class ____ { @Nullable private List<String> timers; public TimerEndpointResponse() { } public TimerEndpointResponse(List<String> timers) { this.timers = timers; } @Nullable public List<String> getTimers() { return timers; } public void setTimers(List<String> timers) { this.timers = timers; } }
TimerEndpointResponse
java
assertj__assertj-core
assertj-core/src/test/java/org/assertj/core/api/charsequence/CharSequenceAssert_containsSubsequence_with_var_args_Test.java
{ "start": 1005, "end": 1405 }
class ____ extends CharSequenceAssertBaseTest { @Override protected CharSequenceAssert invoke_api_method() { return assertions.containsSubsequence("od", "do"); } @Override protected void verify_internal_effects() { verify(strings).assertContainsSubsequence(getInfo(assertions), getActual(assertions), array("od", "do")); } }
CharSequenceAssert_containsSubsequence_with_var_args_Test
java
apache__rocketmq
client/src/test/java/org/apache/rocketmq/client/trace/TraceDataEncoderTest.java
{ "start": 1230, "end": 12437 }
class ____ { private String traceData; private long time; @Before public void init() { time = System.currentTimeMillis(); traceData = new StringBuilder() .append("Pub").append(TraceConstants.CONTENT_SPLITOR) .append(time).append(TraceConstants.CONTENT_SPLITOR) .append("DefaultRegion").append(TraceConstants.CONTENT_SPLITOR) .append("PID-test").append(TraceConstants.CONTENT_SPLITOR) .append("topic-test").append(TraceConstants.CONTENT_SPLITOR) .append("AC1415116D1418B4AAC217FE1B4E0000").append(TraceConstants.CONTENT_SPLITOR) .append("Tags").append(TraceConstants.CONTENT_SPLITOR) .append("Keys").append(TraceConstants.CONTENT_SPLITOR) .append("127.0.0.1:10911").append(TraceConstants.CONTENT_SPLITOR) .append(26).append(TraceConstants.CONTENT_SPLITOR) .append(245).append(TraceConstants.CONTENT_SPLITOR) .append(MessageType.Normal_Msg.ordinal()).append(TraceConstants.CONTENT_SPLITOR) .append("0A9A002600002A9F0000000000002329").append(TraceConstants.CONTENT_SPLITOR) .append(true).append(TraceConstants.FIELD_SPLITOR) .toString(); } @Test public void testDecoderFromTraceDataString() { List<TraceContext> contexts = TraceDataEncoder.decoderFromTraceDataString(traceData); Assert.assertEquals(contexts.size(), 1); Assert.assertEquals(contexts.get(0).getTraceType(), TraceType.Pub); } @Test public void testEncoderFromContextBean() { TraceContext context = new TraceContext(); context.setTraceType(TraceType.Pub); context.setGroupName("PID-test"); context.setRegionId("DefaultRegion"); context.setCostTime(245); context.setSuccess(true); context.setTimeStamp(time); TraceBean traceBean = new TraceBean(); traceBean.setTopic("topic-test"); traceBean.setKeys("Keys"); traceBean.setTags("Tags"); traceBean.setMsgId("AC1415116D1418B4AAC217FE1B4E0000"); traceBean.setOffsetMsgId("0A9A002600002A9F0000000000002329"); traceBean.setStoreHost("127.0.0.1:10911"); traceBean.setStoreTime(time); traceBean.setMsgType(MessageType.Normal_Msg); traceBean.setBodyLength(26); List<TraceBean> traceBeans = new ArrayList<>(); traceBeans.add(traceBean); context.setTraceBeans(traceBeans); TraceTransferBean traceTransferBean = TraceDataEncoder.encoderFromContextBean(context); Assert.assertEquals(traceTransferBean.getTransData(), traceData); Assert.assertEquals(traceTransferBean.getTransKey().size(), 2); } @Test public void testEncoderFromContextBean_EndTransaction() { TraceContext context = new TraceContext(); context.setTraceType(TraceType.EndTransaction); context.setGroupName("PID-test"); context.setRegionId("DefaultRegion"); context.setTimeStamp(time); TraceBean traceBean = new TraceBean(); traceBean.setTopic("topic-test"); traceBean.setKeys("Keys"); traceBean.setTags("Tags"); traceBean.setMsgId("AC1415116D1418B4AAC217FE1B4E0000"); traceBean.setStoreHost("127.0.0.1:10911"); traceBean.setMsgType(MessageType.Trans_msg_Commit); traceBean.setTransactionId("transactionId"); traceBean.setTransactionState(LocalTransactionState.COMMIT_MESSAGE); traceBean.setFromTransactionCheck(false); List<TraceBean> traceBeans = new ArrayList<>(); traceBeans.add(traceBean); context.setTraceBeans(traceBeans); TraceTransferBean traceTransferBean = TraceDataEncoder.encoderFromContextBean(context); Assert.assertEquals(traceTransferBean.getTransKey().size(), 2); String traceData = traceTransferBean.getTransData(); TraceContext contextAfter = TraceDataEncoder.decoderFromTraceDataString(traceData).get(0); Assert.assertEquals(context.getTraceType(), contextAfter.getTraceType()); Assert.assertEquals(context.getTimeStamp(), contextAfter.getTimeStamp()); Assert.assertEquals(context.getGroupName(), contextAfter.getGroupName()); TraceBean before = context.getTraceBeans().get(0); TraceBean after = contextAfter.getTraceBeans().get(0); Assert.assertEquals(before.getTopic(), after.getTopic()); Assert.assertEquals(before.getMsgId(), after.getMsgId()); Assert.assertEquals(before.getTags(), after.getTags()); Assert.assertEquals(before.getKeys(), after.getKeys()); Assert.assertEquals(before.getStoreHost(), after.getStoreHost()); Assert.assertEquals(before.getMsgType(), after.getMsgType()); Assert.assertEquals(before.getClientHost(), after.getClientHost()); Assert.assertEquals(before.getTransactionId(), after.getTransactionId()); Assert.assertEquals(before.getTransactionState(), after.getTransactionState()); Assert.assertEquals(before.isFromTransactionCheck(), after.isFromTransactionCheck()); } @Test public void testPubTraceDataFormatTest() { TraceContext pubContext = new TraceContext(); pubContext.setTraceType(TraceType.Pub); pubContext.setTimeStamp(time); pubContext.setRegionId("Default-region"); pubContext.setGroupName("GroupName-test"); pubContext.setCostTime(34); pubContext.setSuccess(true); TraceBean bean = new TraceBean(); bean.setTopic("topic-test"); bean.setMsgId("AC1415116D1418B4AAC217FE1B4E0000"); bean.setTags("tags"); bean.setKeys("keys"); bean.setStoreHost("127.0.0.1:10911"); bean.setBodyLength(100); bean.setMsgType(MessageType.Normal_Msg); bean.setOffsetMsgId("AC1415116D1418B4AAC217FE1B4E0000"); pubContext.setTraceBeans(new ArrayList<>(1)); pubContext.getTraceBeans().add(bean); TraceTransferBean traceTransferBean = TraceDataEncoder.encoderFromContextBean(pubContext); String transData = traceTransferBean.getTransData(); Assert.assertNotNull(transData); String[] items = transData.split(String.valueOf(TraceConstants.CONTENT_SPLITOR)); Assert.assertEquals(14, items.length); } @Test public void testSubBeforeTraceDataFormatTest() { TraceContext subBeforeContext = new TraceContext(); subBeforeContext.setTraceType(TraceType.SubBefore); subBeforeContext.setTimeStamp(time); subBeforeContext.setRegionId("Default-region"); subBeforeContext.setGroupName("GroupName-test"); subBeforeContext.setRequestId("3455848576927"); TraceBean bean = new TraceBean(); bean.setMsgId("AC1415116D1418B4AAC217FE1B4E0000"); bean.setRetryTimes(0); bean.setKeys("keys"); subBeforeContext.setTraceBeans(new ArrayList<>(1)); subBeforeContext.getTraceBeans().add(bean); TraceTransferBean traceTransferBean = TraceDataEncoder.encoderFromContextBean(subBeforeContext); String transData = traceTransferBean.getTransData(); Assert.assertNotNull(transData); String[] items = transData.split(String.valueOf(TraceConstants.CONTENT_SPLITOR)); Assert.assertEquals(8, items.length); } @Test public void testSubAfterTraceDataFormatTest() { TraceContext subAfterContext = new TraceContext(); subAfterContext.setTraceType(TraceType.SubAfter); subAfterContext.setRequestId("3455848576927"); subAfterContext.setCostTime(20); subAfterContext.setSuccess(true); subAfterContext.setTimeStamp(1625883640000L); subAfterContext.setGroupName("GroupName-test"); subAfterContext.setContextCode(98623046); subAfterContext.setAccessChannel(AccessChannel.LOCAL); TraceBean bean = new TraceBean(); bean.setMsgId("AC1415116D1418B4AAC217FE1B4E0000"); bean.setKeys("keys"); subAfterContext.setTraceBeans(new ArrayList<>(1)); subAfterContext.getTraceBeans().add(bean); TraceTransferBean traceTransferBean = TraceDataEncoder.encoderFromContextBean(subAfterContext); String transData = traceTransferBean.getTransData(); Assert.assertNotNull(transData); String[] items = transData.split(String.valueOf(TraceConstants.CONTENT_SPLITOR)); Assert.assertEquals(9, items.length); } @Test public void testEndTrxTraceDataFormatTest() { TraceContext endTrxContext = new TraceContext(); endTrxContext.setTraceType(TraceType.EndTransaction); endTrxContext.setGroupName("PID-test"); endTrxContext.setRegionId("DefaultRegion"); endTrxContext.setTimeStamp(time); TraceBean endTrxTraceBean = new TraceBean(); endTrxTraceBean.setTopic("topic-test"); endTrxTraceBean.setKeys("Keys"); endTrxTraceBean.setTags("Tags"); endTrxTraceBean.setMsgId("AC1415116D1418B4AAC217FE1B4E0000"); endTrxTraceBean.setStoreHost("127.0.0.1:10911"); endTrxTraceBean.setMsgType(MessageType.Trans_msg_Commit); endTrxTraceBean.setTransactionId("transactionId"); endTrxTraceBean.setTransactionState(LocalTransactionState.COMMIT_MESSAGE); endTrxTraceBean.setFromTransactionCheck(false); List<TraceBean> traceBeans = new ArrayList<>(); traceBeans.add(endTrxTraceBean); endTrxContext.setTraceBeans(traceBeans); TraceTransferBean traceTransferBean = TraceDataEncoder.encoderFromContextBean(endTrxContext); String transData = traceTransferBean.getTransData(); Assert.assertNotNull(transData); String[] items = transData.split(String.valueOf(TraceConstants.CONTENT_SPLITOR)); Assert.assertEquals(13, items.length); } @Test public void testTraceKeys() { TraceContext endTrxContext = new TraceContext(); endTrxContext.setTraceType(TraceType.EndTransaction); endTrxContext.setGroupName("PID-test"); endTrxContext.setRegionId("DefaultRegion"); endTrxContext.setTimeStamp(time); TraceBean endTrxTraceBean = new TraceBean(); endTrxTraceBean.setTopic("topic-test"); endTrxTraceBean.setKeys("Keys Keys2"); endTrxTraceBean.setTags("Tags"); endTrxTraceBean.setMsgId("AC1415116D1418B4AAC217FE1B4E0000"); endTrxTraceBean.setStoreHost("127.0.0.1:10911"); endTrxTraceBean.setMsgType(MessageType.Trans_msg_Commit); endTrxTraceBean.setTransactionId("transactionId"); endTrxTraceBean.setTransactionState(LocalTransactionState.COMMIT_MESSAGE); endTrxTraceBean.setFromTransactionCheck(false); List<TraceBean> traceBeans = new ArrayList<>(); traceBeans.add(endTrxTraceBean); endTrxContext.setTraceBeans(traceBeans); TraceTransferBean traceTransferBean = TraceDataEncoder.encoderFromContextBean(endTrxContext); Set<String> keys = traceTransferBean.getTransKey(); assertThat(keys).contains("Keys"); assertThat(keys).contains("Keys2"); } }
TraceDataEncoderTest
java
apache__camel
components/camel-xchange/src/main/java/org/apache/camel/component/xchange/XChangeEndpoint.java
{ "start": 2617, "end": 7919 }
class ____ extends DefaultEndpoint { @UriParam private XChangeConfiguration configuration; private transient XChange xchange; public XChangeEndpoint(String uri, XChangeComponent component, XChangeConfiguration configuration) { super(uri, component); this.configuration = configuration; } @Override public XChangeComponent getComponent() { return (XChangeComponent) super.getComponent(); } @Override public Consumer createConsumer(Processor processor) throws Exception { throw new UnsupportedOperationException(); } @Override public Producer createProducer() throws Exception { Producer producer = null; XChangeService service = getConfiguration().getService(); if (XChangeService.account == service) { producer = new XChangeAccountProducer(this); } else if (XChangeService.marketdata == service) { producer = new XChangeMarketDataProducer(this); } else if (XChangeService.metadata == service) { producer = new XChangeMetaDataProducer(this); } Assert.notNull(producer, "Unsupported service: " + service); return producer; } public void setConfiguration(XChangeConfiguration configuration) { this.configuration = configuration; } public XChangeConfiguration getConfiguration() { return configuration; } public XChange getXchange() { return xchange; } public void setXchange(XChange xchange) { this.xchange = xchange; } public List<Currency> getCurrencies() { ExchangeMetaData metaData = xchange.getExchangeMetaData(); return metaData.getCurrencies().keySet().stream().sorted().collect(Collectors.toList()); } public CurrencyMetaData getCurrencyMetaData(Currency curr) { Assert.notNull(curr, "Null currency"); ExchangeMetaData metaData = xchange.getExchangeMetaData(); return metaData.getCurrencies().get(curr); } public List<CurrencyPair> getCurrencyPairs() { ExchangeMetaData metaData = xchange.getExchangeMetaData(); return metaData.getInstruments().keySet().stream() .filter(it -> it instanceof CurrencyPair) .map(it -> (CurrencyPair) it) .sorted().collect(Collectors.toList()); } public InstrumentMetaData getCurrencyPairMetaData(CurrencyPair pair) { Assert.notNull(pair, "Null currency"); ExchangeMetaData metaData = xchange.getExchangeMetaData(); return metaData.getInstruments().get(pair); } public List<Balance> getBalances() throws IOException { List<Balance> balances = new ArrayList<>(); getWallets().stream().forEach(w -> { for (Balance aux : w.getBalances().values()) { Currency curr = aux.getCurrency(); CurrencyMetaData metaData = getCurrencyMetaData(curr); if (metaData != null) { int scale = metaData.getScale(); double total = aux.getTotal().doubleValue(); double scaledTotal = total * Math.pow(10, scale / 2); if (1 <= scaledTotal) { balances.add(aux); } } } }); return balances.stream().sorted((Balance o1, Balance o2) -> o1.getCurrency().compareTo(o2.getCurrency())) .collect(Collectors.toList()); } public List<FundingRecord> getFundingHistory() throws IOException { AccountService accountService = xchange.getAccountService(); TradeHistoryParams fundingHistoryParams = accountService.createFundingHistoryParams(); return accountService.getFundingHistory(fundingHistoryParams).stream() .sorted((FundingRecord o1, FundingRecord o2) -> o1.getDate().compareTo(o2.getDate())) .collect(Collectors.toList()); } public List<Wallet> getWallets() throws IOException { // [#4741] BinanceAccountService assumes futures account when not using sandbox // https://github.com/knowm/XChange/issues/4741 AccountService accountService = xchange.getAccountService(); if (accountService instanceof BinanceAccountService binanceAccountService) { Wallet wallet = BinanceAdapters.adaptBinanceSpotWallet(binanceAccountService.account()); return Collections.singletonList(wallet); } else { AccountInfo accountInfo = accountService.getAccountInfo(); return accountInfo.getWallets().values().stream().sorted(Comparator.comparing(Wallet::getName)) .collect(Collectors.toList()); } } public Ticker getTicker(CurrencyPair pair) throws IOException { Assert.notNull(pair, "Null currency pair"); MarketDataService marketService = xchange.getMarketDataService(); try { return marketService.getTicker((Instrument) pair); } catch (NotYetImplementedForExchangeException e) { // Ignored } // Retry service lookup using deprecated (but still in use) MarketDataService.getTicker(CurrencyPair) return marketService.getTicker(pair); } }
XChangeEndpoint
java
micronaut-projects__micronaut-core
inject/src/main/java/io/micronaut/inject/qualifiers/RepeatableAnnotationQualifier.java
{ "start": 1080, "end": 2891 }
class ____<T> extends FilteringQualifier<T> { private final List<AnnotationValue<Annotation>> repeatableValues; private final String repeatableName; RepeatableAnnotationQualifier(AnnotationMetadata annotationMetadata, String repeatableName) { this.repeatableName = repeatableName; this.repeatableValues = annotationMetadata.findAnnotation(repeatableName) .map(av -> av.getAnnotations(AnnotationMetadata.VALUE_MEMBER)) .orElse(Collections.emptyList()); if (repeatableValues.isEmpty()) { throw new IllegalArgumentException("Repeatable qualifier [" + repeatableName + "] declared with no values"); } } @Override public String toString() { return Arrays.toString(repeatableValues.toArray()); } @Override public boolean doesQualify(Class<T> beanType, BeanType<T> candidate) { final AnnotationValue<Annotation> declared = candidate.getAnnotationMetadata().getAnnotation(repeatableName); if (declared != null) { final List<AnnotationValue<Annotation>> repeated = declared.getAnnotations(AnnotationMetadata.VALUE_MEMBER); return repeated.containsAll(repeatableValues); } return false; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } RepeatableAnnotationQualifier<?> that = (RepeatableAnnotationQualifier<?>) o; return repeatableValues.equals(that.repeatableValues) && repeatableName.equals(that.repeatableName); } @Override public int hashCode() { return ObjectUtils.hash(repeatableValues, repeatableName); } }
RepeatableAnnotationQualifier
java
apache__logging-log4j2
log4j-1.2-api/src/main/java/org/apache/log4j/Hierarchy.java
{ "start": 2015, "end": 2174 }
class ____ specialized in retrieving loggers by name and also maintaining the logger hierarchy. * * <p> * <em>The casual user does not have to deal with this
is
java
apache__camel
archetypes/camel-archetype-main/src/main/resources/archetype-resources/src/main/java/MyRouteBuilder.java
{ "start": 1018, "end": 1307 }
class ____ extends RouteBuilder { @Override public void configure() throws Exception { from("timer:foo?period={{myPeriod}}") .bean("myBean", "hello") .log("${body}") .bean("myBean", "bye") .log("${body}"); } }
MyRouteBuilder
java
spring-projects__spring-data-jpa
spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/sample/ProductRepository.java
{ "start": 841, "end": 911 }
interface ____ extends JpaRepository<Product, Long> { }
ProductRepository
java
hibernate__hibernate-orm
hibernate-core/src/main/java/org/hibernate/boot/models/annotations/internal/OverriddenFilterDefOverridesAnnotation.java
{ "start": 663, "end": 1891 }
class ____ implements DialectOverride.FilterDefOverrides, RepeatableContainer<DialectOverride.FilterDefs> { private DialectOverride.FilterDefs[] value; /** * Used in creating dynamic annotation instances (e.g. from XML) */ public OverriddenFilterDefOverridesAnnotation(ModelsContext modelContext) { } /** * Used in creating annotation instances from JDK variant */ public OverriddenFilterDefOverridesAnnotation( DialectOverride.FilterDefOverrides annotation, ModelsContext modelContext) { this.value = extractJdkValue( annotation, DIALECT_OVERRIDE_FILTER_DEF_OVERRIDES, "value", modelContext ); } /** * Used in creating annotation instances from Jandex variant */ public OverriddenFilterDefOverridesAnnotation( Map<String, Object> attributeValues, ModelsContext modelContext) { this.value = (DialectOverride.FilterDefs[]) attributeValues.get( "value" ); } @Override public DialectOverride.FilterDefs[] value() { return value; } @Override public void value(DialectOverride.FilterDefs[] value) { this.value = value; } @Override public Class<? extends Annotation> annotationType() { return DialectOverride.FilterDefOverrides.class; } }
OverriddenFilterDefOverridesAnnotation
java
apache__kafka
clients/src/main/java/org/apache/kafka/clients/admin/AlterStreamsGroupOffsetsOptions.java
{ "start": 1157, "end": 1256 }
class ____ extends AbstractOptions<AlterStreamsGroupOffsetsOptions> { }
AlterStreamsGroupOffsetsOptions
java
elastic__elasticsearch
server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java
{ "start": 755, "end": 2104 }
class ____ extends BucketMetricsPipeLineAggregationTestCase<InternalStatsBucket> { @Override protected StatsBucketPipelineAggregationBuilder BucketMetricsPipelineAgg(String name, String bucketsPath) { return statsBucket(name, bucketsPath); } @Override protected void assertResult( IntToDoubleFunction bucketValues, Function<Integer, String> bucketKeys, int numBuckets, InternalStatsBucket pipelineBucket ) { double sum = 0; int count = 0; double min = Double.POSITIVE_INFINITY; double max = Double.NEGATIVE_INFINITY; for (int i = 0; i < numBuckets; ++i) { double bucketValue = bucketValues.applyAsDouble(i); count++; sum += bucketValue; min = Math.min(min, bucketValue); max = Math.max(max, bucketValue); } double avgValue = count == 0 ? Double.NaN : (sum / count); assertThat(pipelineBucket.getAvg(), equalTo(avgValue)); assertThat(pipelineBucket.getMin(), equalTo(min)); assertThat(pipelineBucket.getMax(), equalTo(max)); } @Override protected String nestedMetric() { return "avg"; } @Override protected double getNestedMetric(InternalStatsBucket bucket) { return bucket.getAvg(); } }
StatsBucketIT
java
hibernate__hibernate-orm
hibernate-core/src/main/java/org/hibernate/sql/model/ast/builder/AbstractRestrictedTableMutationBuilder.java
{ "start": 877, "end": 3265 }
class ____<O extends MutationOperation, M extends RestrictedTableMutation<O>> extends AbstractTableMutationBuilder<M> implements RestrictedTableMutationBuilder<O, M> { private final ColumnValueBindingList keyRestrictionBindings; private final ColumnValueBindingList optimisticLockBindings; public AbstractRestrictedTableMutationBuilder( MutationType mutationType, MutationTarget<?> mutationTarget, TableMapping table, SessionFactoryImplementor sessionFactory) { super( mutationType, mutationTarget, table, sessionFactory ); this.keyRestrictionBindings = new ColumnValueBindingList( getMutatingTable(), getParameters(), ParameterUsage.RESTRICT ); this.optimisticLockBindings = new ColumnValueBindingList( getMutatingTable(), getParameters(), ParameterUsage.RESTRICT ); } public AbstractRestrictedTableMutationBuilder( MutationType mutationType, MutationTarget<?> mutationTarget, MutatingTableReference tableReference, SessionFactoryImplementor sessionFactory) { super( mutationType, mutationTarget, tableReference, sessionFactory ); this.keyRestrictionBindings = new ColumnValueBindingList( getMutatingTable(), getParameters(), ParameterUsage.RESTRICT ); this.optimisticLockBindings = new ColumnValueBindingList( getMutatingTable(), getParameters(), ParameterUsage.RESTRICT ); } @Override public ColumnValueBindingList getKeyRestrictionBindings() { return keyRestrictionBindings; } @Override public ColumnValueBindingList getOptimisticLockBindings() { return optimisticLockBindings; } @Override public void addNonKeyRestriction(ColumnValueBinding valueBinding) { optimisticLockBindings.addRestriction( valueBinding ); } @Override public void addKeyRestrictionBinding(SelectableMapping selectableMapping) { keyRestrictionBindings.addRestriction( selectableMapping ); } @Override public void addNullOptimisticLockRestriction(SelectableMapping column) { optimisticLockBindings.addNullRestriction( column ); } @Override public void addOptimisticLockRestriction(SelectableMapping selectableMapping) { optimisticLockBindings.addRestriction( selectableMapping ); } @Override public void setWhere(String fragment) { throw new UnsupportedOperationException(); } @Override public void addWhereFragment(String fragment) { throw new UnsupportedOperationException(); } }
AbstractRestrictedTableMutationBuilder
java
elastic__elasticsearch
server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java
{ "start": 2001, "end": 74051 }
class ____ extends MapperServiceTestCase { public void testPreflightUpdateDoesNotChangeMapping() throws Throwable { final MapperService mapperService = createMapperService(mapping(b -> {})); merge(mapperService, MergeReason.MAPPING_AUTO_UPDATE_PREFLIGHT, mapping(b -> createMappingSpecifyingNumberOfFields(b, 1))); assertThat("field was not created by preflight check", mapperService.fieldType("field0"), nullValue()); merge( mapperService, randomFrom(MergeReason.MAPPING_UPDATE, MergeReason.MAPPING_AUTO_UPDATE), mapping(b -> createMappingSpecifyingNumberOfFields(b, 1)) ); assertThat("field was not created by mapping update", mapperService.fieldType("field0"), notNullValue()); } public void testMappingLookup() throws IOException { MapperService service = createMapperService(mapping(b -> {})); MappingLookup oldLookup = service.mappingLookup(); assertThat(oldLookup.fieldTypesLookup().get("cat"), nullValue()); merge(service, mapping(b -> b.startObject("cat").field("type", "keyword").endObject())); MappingLookup newLookup = service.mappingLookup(); assertThat(newLookup.fieldTypesLookup().get("cat"), not(nullValue())); assertThat(oldLookup.fieldTypesLookup().get("cat"), nullValue()); } /** * Test that we can have at least the number of fields in new mappings that are defined by "index.mapping.total_fields.limit". * Any additional field should trigger an IllegalArgumentException. */ public void testTotalFieldsLimit() throws Throwable { int totalFieldsLimit = randomIntBetween(1, 10); Settings settings = Settings.builder() .put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), totalFieldsLimit) .put(INDEX_MAPPING_IGNORE_DYNAMIC_BEYOND_LIMIT_SETTING.getKey(), true) .build(); MapperService mapperService = createMapperService( settings, mapping(b -> createMappingSpecifyingNumberOfFields(b, totalFieldsLimit)) ); // adding one more field should trigger exception IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> merge(mapperService, mapping(b -> b.startObject("newfield").field("type", "long").endObject())) ); assertTrue(e.getMessage(), e.getMessage().contains("Limit of total fields [" + totalFieldsLimit + "] has been exceeded")); // adding one more runtime field should trigger exception e = expectThrows( IllegalArgumentException.class, () -> merge(mapperService, runtimeMapping(b -> b.startObject("newfield").field("type", "long").endObject())) ); assertTrue(e.getMessage(), e.getMessage().contains("Limit of total fields [" + totalFieldsLimit + "] has been exceeded")); } private void createMappingSpecifyingNumberOfFields(XContentBuilder b, int numberOfFields) throws IOException { for (int i = 0; i < numberOfFields; i++) { b.startObject("field" + i); b.field("type", randomFrom("long", "integer", "date", "keyword", "text")); b.endObject(); } } public void testMappingDepthExceedsLimit() throws Throwable { Settings settings = Settings.builder().put(MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING.getKey(), 1).build(); MapperService mapperService = createMapperService(settings, mapping(b -> {})); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> merge(mapperService, mapping((b -> { b.startObject("object1"); b.field("type", "object"); b.endObject(); })))); assertThat(e.getMessage(), containsString("Limit of mapping depth [1] has been exceeded")); } public void testPartitionedConstraints() throws IOException { // partitioned index must have routing Settings settings = Settings.builder().put("index.number_of_shards", 4).put("index.routing_partition_size", 2).build(); Exception e = expectThrows(IllegalArgumentException.class, () -> createMapperService(settings, mapping(b -> {}))); assertThat(e.getMessage(), containsString("must have routing")); // valid partitioned index createMapperService(settings, topMapping(b -> b.startObject("_routing").field("required", true).endObject())); } public void testIndexSortWithNestedFields() throws IOException { IndexVersion oldVersion = IndexVersionUtils.getPreviousVersion(IndexVersions.INDEX_SORTING_ON_NESTED); IllegalArgumentException invalidNestedException = expectThrows( IllegalArgumentException.class, () -> createMapperService(oldVersion, settings(oldVersion).put("index.sort.field", "foo").build(), () -> true, mapping(b -> { b.startObject("nested_field").field("type", "nested").endObject(); b.startObject("foo").field("type", "keyword").endObject(); })) ); Settings settings = settings(IndexVersions.INDEX_SORTING_ON_NESTED).put("index.sort.field", "foo").build(); DocumentMapper mapper = createMapperService(settings, mapping(b -> { b.startObject("nested_field").field("type", "nested").endObject(); b.startObject("foo").field("type", "keyword").endObject(); })).documentMapper(); List<LuceneDocument> docs = mapper.parse(source(b -> { b.field("name", "foo"); b.startObject("nested_field").field("foo", "bar").endObject(); })).docs(); assertEquals(2, docs.size()); assertEquals(docs.get(1), docs.get(0).getParent()); MapperService mapperService = createMapperService( settings, mapping(b -> b.startObject("foo").field("type", "keyword").endObject()) ); merge(mapperService, mapping(b -> { b.startObject("nested_field"); b.field("type", "nested"); b.endObject(); })); Settings settings2 = Settings.builder().put("index.sort.field", "foo.bar").build(); invalidNestedException = expectThrows(IllegalArgumentException.class, () -> createMapperService(settings2, mapping(b -> { b.startObject("foo"); { b.field("type", "nested"); b.startObject("properties"); { b.startObject("bar").field("type", "keyword").endObject(); } b.endObject(); } b.endObject(); }))); assertEquals("cannot apply index sort to field [foo.bar] under nested object [foo]", invalidNestedException.getMessage()); } public void testFieldAliasWithMismatchedNestedScope() throws Throwable { MapperService mapperService = createMapperService(mapping(b -> { b.startObject("nested"); { b.field("type", "nested"); b.startObject("properties"); { b.startObject("field").field("type", "text").endObject(); } b.endObject(); } b.endObject(); })); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> merge(mapperService, mapping(b -> { b.startObject("alias"); { b.field("type", "alias"); b.field("path", "nested.field"); } b.endObject(); }))); assertThat(e.getMessage(), containsString("Invalid [path] value [nested.field] for field alias [alias]")); } public void testTotalFieldsLimitWithFieldAlias() throws Throwable { int numberOfFieldsIncludingAlias = 2; Settings settings = Settings.builder() .put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), numberOfFieldsIncludingAlias) .put(INDEX_MAPPING_IGNORE_DYNAMIC_BEYOND_LIMIT_SETTING.getKey(), true) .build(); createMapperService(settings, mapping(b -> { b.startObject("alias").field("type", "alias").field("path", "field").endObject(); b.startObject("field").field("type", "text").endObject(); })); // Set the total fields limit to the number of non-alias fields, to verify that adding // a field alias pushes the mapping over the limit. int numberOfNonAliasFields = 1; Settings errorSettings = Settings.builder() .put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), numberOfNonAliasFields) .put(INDEX_MAPPING_IGNORE_DYNAMIC_BEYOND_LIMIT_SETTING.getKey(), true) .build(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> createMapperService(errorSettings, mapping(b -> { b.startObject("alias").field("type", "alias").field("path", "field").endObject(); b.startObject("field").field("type", "text").endObject(); }))); assertEquals("Limit of total fields [" + numberOfNonAliasFields + "] has been exceeded", e.getMessage()); } public void testFieldNameLengthLimit() throws Throwable { int maxFieldNameLength = randomIntBetween(25, 30); String testString = new String(new char[maxFieldNameLength + 1]).replace("\0", "a"); Settings settings = Settings.builder() .put(MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING.getKey(), maxFieldNameLength) .build(); MapperService mapperService = createMapperService(settings, fieldMapping(b -> b.field("type", "text"))); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> merge(mapperService, mapping(b -> b.startObject(testString).field("type", "text").endObject())) ); assertEquals("Field name [" + testString + "] is longer than the limit of [" + maxFieldNameLength + "] characters", e.getMessage()); } public void testObjectNameLengthLimit() throws Throwable { int maxFieldNameLength = randomIntBetween(25, 30); String testString = new String(new char[maxFieldNameLength + 1]).replace("\0", "a"); Settings settings = Settings.builder() .put(MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING.getKey(), maxFieldNameLength) .build(); MapperService mapperService = createMapperService(settings, mapping(b -> {})); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> merge(mapperService, mapping(b -> b.startObject(testString).field("type", "object").endObject())) ); assertEquals("Field name [" + testString + "] is longer than the limit of [" + maxFieldNameLength + "] characters", e.getMessage()); } public void testAliasFieldNameLengthLimit() throws Throwable { int maxFieldNameLength = randomIntBetween(25, 30); String testString = new String(new char[maxFieldNameLength + 1]).replace("\0", "a"); Settings settings = Settings.builder() .put(MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING.getKey(), maxFieldNameLength) .build(); MapperService mapperService = createMapperService(settings, mapping(b -> {})); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> merge(mapperService, mapping(b -> { b.startObject(testString).field("type", "alias").field("path", "field").endObject(); b.startObject("field").field("type", "text").endObject(); }))); assertEquals("Field name [" + testString + "] is longer than the limit of [" + maxFieldNameLength + "] characters", e.getMessage()); } public void testMappingRecoverySkipFieldNameLengthLimit() throws Throwable { int maxFieldNameLength = randomIntBetween(25, 30); String testString = new String(new char[maxFieldNameLength + 1]).replace("\0", "a"); Settings settings = Settings.builder() .put(MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING.getKey(), maxFieldNameLength) .build(); MapperService mapperService = createMapperService(settings, mapping(b -> {})); CompressedXContent mapping = new CompressedXContent( BytesReference.bytes( XContentFactory.jsonBuilder() .startObject() .startObject("_doc") .startObject("properties") .startObject(testString) .field("type", "text") .endObject() .endObject() .endObject() .endObject() ) ); DocumentMapper documentMapper = mapperService.merge("_doc", mapping, MergeReason.MAPPING_RECOVERY); assertEquals(testString, documentMapper.mappers().getMapper(testString).leafName()); } public void testIsMetadataField() throws IOException { IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); CheckedFunction<IndexMode, MapperService, IOException> initMapperService = (indexMode) -> { Settings.Builder settingsBuilder = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, version) .put(IndexSettings.MODE.getKey(), indexMode); if (indexMode == IndexMode.TIME_SERIES) { settingsBuilder.put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo"); } return createMapperService(settingsBuilder.build(), mapping(b -> {})); }; Consumer<MapperService> assertMapperService = (mapperService) -> { assertFalse(mapperService.isMetadataField(randomAlphaOfLengthBetween(10, 15))); for (String builtIn : IndicesModule.getBuiltInMetadataFields()) { if (NestedPathFieldMapper.NAME.equals(builtIn) && version.before(IndexVersions.V_8_0_0)) { continue; // Nested field does not exist in the 7x line } boolean isTimeSeriesField = builtIn.equals("_tsid") || builtIn.equals("_ts_routing_hash"); boolean isTimeSeriesMode = mapperService.getIndexSettings().getMode().equals(IndexMode.TIME_SERIES); if (isTimeSeriesField && isTimeSeriesMode == false) { assertFalse( "Expected " + builtIn + " to not be a metadata field for version " + version + " and index mode " + mapperService.getIndexSettings().getMode(), mapperService.isMetadataField(builtIn) ); } else { assertTrue( "Expected " + builtIn + " to be a metadata field for version " + version + " and index mode " + mapperService.getIndexSettings().getMode(), mapperService.isMetadataField(builtIn) ); } } }; for (IndexMode indexMode : IndexMode.values()) { MapperService mapperService = initMapperService.apply(indexMode); assertMapperService.accept(mapperService); } } public void testMappingUpdateChecks() throws IOException { MapperService mapperService = createMapperService(fieldMapping(b -> b.field("type", "text"))); { IndexMetadata.Builder builder = new IndexMetadata.Builder("test"); builder.settings(indexSettings(IndexVersion.current(), 1, 0)); // Text fields are not stored by default, so an incoming update that is identical but // just has `stored:false` should not require an update builder.putMapping(""" {"properties":{"field":{"type":"text","store":"false"}}}"""); assertTrue(mapperService.assertNoUpdateRequired(builder.build())); } { IndexMetadata.Builder builder = new IndexMetadata.Builder("test"); builder.settings(indexSettings(IndexVersion.current(), 1, 0)); // However, an update that really does need a rebuild will throw an exception builder.putMapping(""" {"properties":{"field":{"type":"text","store":"true"}}}"""); Exception e = expectThrows(IllegalStateException.class, () -> mapperService.assertNoUpdateRequired(builder.build())); assertThat(e.getMessage(), containsString("expected current mapping [")); assertThat(e.getMessage(), containsString("to be the same as new mapping")); } } public void testEagerGlobalOrdinals() throws IOException { MapperService mapperService = createMapperService(mapping(b -> { b.startObject("eager1").field("type", "keyword").field("eager_global_ordinals", true).endObject(); b.startObject("lazy1").field("type", "keyword").field("eager_global_ordinals", false).endObject(); b.startObject("eager2").field("type", "keyword").field("eager_global_ordinals", true).endObject(); b.startObject("lazy2").field("type", "long").endObject(); })); List<String> eagerFieldNames = StreamSupport.stream(mapperService.getEagerGlobalOrdinalsFields().spliterator(), false) .map(MappedFieldType::name) .toList(); assertThat(eagerFieldNames, containsInAnyOrder("eager1", "eager2")); } public void testMultiFieldChecks() throws IOException { MapperService mapperService = createMapperService(""" { "_doc" : { "properties" : { "field1" : { "type" : "keyword", "fields" : { "subfield1" : { "type" : "long" }, "subfield2" : { "type" : "text" } } }, "object.field2" : { "type" : "keyword" } }, "runtime" : { "object.subfield1" : { "type" : "keyword" }, "field1.subfield2" : { "type" : "keyword" } } } } """); assertFalse(mapperService.isMultiField("non_existent_field")); assertFalse(mapperService.isMultiField("field1")); assertTrue(mapperService.isMultiField("field1.subfield1")); // not a multifield, because it's shadowed by a runtime field assertFalse(mapperService.isMultiField("field1.subfield2")); assertFalse(mapperService.isMultiField("object.field2")); assertFalse(mapperService.isMultiField("object.subfield1")); } public void testMergeObjectSubfieldWhileParsing() throws IOException { /* If we are parsing mappings that hold the definition of the same field twice, the two are merged together. This can happen when mappings have the same field specified using the object notation as well as the dot notation, as well as when applying index templates, in which case the two definitions may come from separate index templates that end up in the same map (through XContentHelper#mergeDefaults, see MetadataCreateIndexService#parseV1Mappings). We had a bug (https://github.com/elastic/elasticsearch/issues/88573) triggered by this scenario that caused the merged leaf fields to get the wrong path (missing the first portion). */ MapperService mapperService = createMapperService(""" { "_doc": { "properties": { "obj": { "properties": { "sub": { "properties": { "string": { "type": "keyword" } } } } }, "obj.sub.string" : { "type" : "keyword" } } } } """); assertNotNull(mapperService.mappingLookup().getMapper("obj.sub.string")); MappedFieldType fieldType = mapperService.mappingLookup().getFieldType("obj.sub.string"); assertNotNull(fieldType); assertEquals(""" { "_doc" : { "properties" : { "obj" : { "properties" : { "sub" : { "properties" : { "string" : { "type" : "keyword" } } } } } } } }""", Strings.toString(mapperService.documentMapper().mapping(), true, true)); // check that with the resulting mappings a new document has the previously merged field indexed properly ParsedDocument parsedDocument = mapperService.documentMapper().parse(source(""" { "obj.sub.string" : "value" }""")); assertNull(parsedDocument.dynamicMappingsUpdate()); List<IndexableField> fields = parsedDocument.rootDoc().getFields("obj.sub.string"); assertEquals(1, fields.size()); } public void testBulkMerge() throws IOException { final MapperService mapperService = createMapperService(mapping(b -> {})); CompressedXContent mapping1 = createTestMapping1(); CompressedXContent mapping2 = createTestMapping2(); mapperService.merge("_doc", mapping1, MergeReason.INDEX_TEMPLATE); DocumentMapper sequentiallyMergedMapper = mapperService.merge("_doc", mapping2, MergeReason.INDEX_TEMPLATE); DocumentMapper bulkMergedMapper = mapperService.merge("_doc", List.of(mapping1, mapping2), MergeReason.INDEX_TEMPLATE); assertEquals(sequentiallyMergedMapper.mappingSource(), bulkMergedMapper.mappingSource()); } public void testMergeSubobjectsFalseOrder() throws IOException { final MapperService mapperService = createMapperService(mapping(b -> {})); CompressedXContent mapping1 = createTestMapping1(); CompressedXContent mapping2 = createTestMapping2(); DocumentMapper subobjectsFirst = mapperService.merge("_doc", List.of(mapping1, mapping2), MergeReason.INDEX_TEMPLATE); DocumentMapper subobjectsLast = mapperService.merge("_doc", List.of(mapping2, mapping1), MergeReason.INDEX_TEMPLATE); assertEquals(subobjectsFirst.mappingSource(), subobjectsLast.mappingSource()); } private static CompressedXContent createTestMapping1() throws IOException { CompressedXContent mapping1; try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()) { mapping1 = new CompressedXContent( BytesReference.bytes( xContentBuilder.startObject() .startObject("_doc") .field("subobjects", false) .startObject("properties") .startObject("parent") .field("type", "text") .endObject() .endObject() .endObject() .endObject() ) ); } return mapping1; } private static CompressedXContent createTestMapping2() throws IOException { CompressedXContent mapping2; try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()) { mapping2 = new CompressedXContent( BytesReference.bytes( xContentBuilder.startObject() .startObject("_doc") .field("subobjects", false) .startObject("properties") .startObject("parent.subfield") .field("type", "text") .endObject() .endObject() .endObject() .endObject() ) ); } return mapping2; } public void testSubobjectsDisabledNotAtRoot() throws IOException { final MapperService mapperService = createMapperService(mapping(b -> {})); CompressedXContent mapping1; try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()) { mapping1 = new CompressedXContent( BytesReference.bytes( xContentBuilder.startObject() .startObject("_doc") .startObject("properties") .startObject("parent") .field("subobjects", false) .field("type", "object") .endObject() .endObject() .endObject() .endObject() ) ); } CompressedXContent mapping2; try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()) { mapping2 = new CompressedXContent( BytesReference.bytes( xContentBuilder.startObject() .startObject("_doc") .startObject("properties") .startObject("parent") .startObject("properties") .startObject("child.grandchild") .field("type", "text") .endObject() .endObject() .endObject() .endObject() .endObject() .endObject() ) ); } DocumentMapper subobjectsFirst = mapperService.merge("_doc", List.of(mapping1, mapping2), MergeReason.INDEX_TEMPLATE); DocumentMapper subobjectsLast = mapperService.merge("_doc", List.of(mapping2, mapping1), MergeReason.INDEX_TEMPLATE); assertEquals(subobjectsFirst.mappingSource(), subobjectsLast.mappingSource()); } public void testMergeMultipleRoots() throws IOException { CompressedXContent mapping1 = new CompressedXContent(""" { "properties" : { "field" : { "subobjects" : false, "type" : "object" } } } """); CompressedXContent mapping2 = new CompressedXContent(""" { "_doc" : { "_meta" : { "meta-field" : "some-info" }, "properties" : { "field" : { "properties" : { "subfield" : { "type" : "keyword" } } } } } }"""); assertMergeEquals(List.of(mapping1, mapping2), """ { "_doc" : { "_meta" : { "meta-field" : "some-info" }, "properties" : { "field" : { "subobjects" : false, "properties" : { "subfield" : { "type" : "keyword" } } } } } }"""); } public void testMergeMultipleRootsWithRootType() throws IOException { CompressedXContent mapping1 = new CompressedXContent(""" { "properties" : { "field" : { "type" : "keyword" } } } """); CompressedXContent mapping2 = new CompressedXContent(""" { "_doc" : { "_meta" : { "meta-field" : "some-info" } }, "properties" : { "field" : { "subobjects" : false } } }"""); final MapperService mapperService = createMapperService(mapping(b -> {})); MapperParsingException e = expectThrows( MapperParsingException.class, () -> mapperService.merge("_doc", List.of(mapping1, mapping2), MergeReason.INDEX_TEMPLATE) ); assertThat(e.getMessage(), containsString("cannot merge a map with multiple roots, one of which is [_doc]")); } public void testMergeMultipleRootsWithoutRootType() throws IOException { CompressedXContent mapping1 = new CompressedXContent(""" { "properties" : { "field" : { "type" : "keyword" } } } """); CompressedXContent mapping2 = new CompressedXContent(""" { "_meta" : { "meta-field" : "some-info" } }"""); assertMergeEquals(List.of(mapping1, mapping2), """ { "_doc" : { "_meta" : { "meta-field" : "some-info" }, "properties" : { "field" : { "type" : "keyword" } } } }"""); } public void testValidMappingSubstitution() throws IOException { CompressedXContent mapping1 = new CompressedXContent(""" { "properties": { "field": { "type": "keyword", "ignore_above": 1024 } } }"""); CompressedXContent mapping2 = new CompressedXContent(""" { "properties": { "field": { "type": "long", "coerce": true } } }"""); assertMergeEquals(List.of(mapping1, mapping2), """ { "_doc" : { "properties" : { "field" : { "type" : "long", "coerce" : true } } } }"""); } public void testValidMappingSubtreeSubstitution() throws IOException { CompressedXContent mapping1 = new CompressedXContent(""" { "properties": { "field": { "type": "object", "subobjects": false, "properties": { "subfield": { "type": "keyword" } } } } }"""); CompressedXContent mapping2 = new CompressedXContent(""" { "properties": { "field": { "type": "long", "coerce": true } } }"""); final MapperService mapperService = createMapperService(mapping(b -> {})); mapperService.merge("_doc", List.of(mapping1, mapping2), MergeReason.INDEX_TEMPLATE); assertEquals(""" { "_doc" : { "properties" : { "field" : { "type" : "long", "coerce" : true } } } }""", Strings.toString(mapperService.documentMapper().mapping(), true, true)); } public void testSameTypeMerge() throws IOException { CompressedXContent mapping1 = new CompressedXContent(""" { "properties": { "field": { "type": "keyword", "ignore_above": 256, "doc_values": false, "fields": { "text": { "type": "text" } } } } }"""); CompressedXContent mapping2 = new CompressedXContent(""" { "properties": { "field": { "type": "keyword", "ignore_above": 1024, "fields": { "other_text": { "type": "text" } } } } }"""); assertMergeEquals(List.of(mapping1, mapping2), """ { "_doc" : { "properties" : { "field" : { "type" : "keyword", "ignore_above" : 1024, "fields" : { "other_text" : { "type" : "text" } } } } } }"""); } public void testObjectAndNestedTypeSubstitution() throws IOException { CompressedXContent mapping1 = new CompressedXContent(""" { "properties" : { "field": { "type": "nested", "include_in_parent": true, "properties": { "subfield1": { "type": "keyword" } } } } }"""); CompressedXContent mapping2 = new CompressedXContent(""" { "properties": { "field": { "type": "object", "properties": { "subfield2": { "type": "keyword" } } } } }"""); final MapperService mapperService = createMapperService(mapping(b -> {})); mapperService.merge("_doc", List.of(mapping1, mapping2), MergeReason.INDEX_TEMPLATE); assertEquals(""" { "_doc" : { "properties" : { "field" : { "properties" : { "subfield1" : { "type" : "keyword" }, "subfield2" : { "type" : "keyword" } } } } } }""", Strings.toString(mapperService.documentMapper().mapping(), true, true)); } public void testNestedContradictingProperties() throws IOException { CompressedXContent mapping1 = new CompressedXContent(""" { "properties": { "field": { "type": "nested", "include_in_parent": false, "properties": { "subfield1": { "type": "keyword" } } } } }"""); CompressedXContent mapping2 = new CompressedXContent(""" { "properties": { "field": { "type": "nested", "include_in_parent": true, "properties": { "subfield2": { "type": "keyword" } } } } }"""); assertMergeEquals(List.of(mapping1, mapping2), """ { "_doc" : { "properties" : { "field" : { "type" : "nested", "include_in_parent" : true, "properties" : { "subfield1" : { "type" : "keyword" }, "subfield2" : { "type" : "keyword" } } } } } }"""); } public void testImplicitObjectHierarchy() throws IOException { CompressedXContent mapping1 = new CompressedXContent(""" { "properties": { "parent": { "properties": { "child.grandchild": { "type": "keyword" } } } } }"""); assertMergeEquals(List.of(mapping1), """ { "_doc" : { "properties" : { "parent" : { "properties" : { "child" : { "properties" : { "grandchild" : { "type" : "keyword" } } } } } } } }"""); } public void testSubobjectsMerge() throws IOException { CompressedXContent mapping1 = new CompressedXContent(""" { "properties": { "parent": { "type": "object", "subobjects": false } } }"""); CompressedXContent mapping2 = new CompressedXContent(""" { "properties": { "parent": { "properties": { "child.grandchild": { "type": "keyword" } } } } }"""); final MapperService mapperService = createMapperService(mapping(b -> {})); mapperService.merge("_doc", List.of(mapping1, mapping2), MergeReason.INDEX_TEMPLATE); assertMergeEquals(List.of(mapping1, mapping2), """ { "_doc" : { "properties" : { "parent" : { "subobjects" : false, "properties" : { "child.grandchild" : { "type" : "keyword" } } } } } }"""); } public void testContradictingSubobjects() throws IOException { CompressedXContent mapping1 = new CompressedXContent(""" { "properties": { "parent": { "type": "object", "subobjects": false, "properties": { "child.grandchild": { "type": "text" } } } } }"""); CompressedXContent mapping2 = new CompressedXContent(""" { "properties": { "parent": { "type": "object", "subobjects": true, "properties": { "child.grandchild": { "type": "long" } } } } }"""); MapperService mapperService = createMapperService(mapping(b -> {})); mapperService.merge("_doc", List.of(mapping1, mapping2), MergeReason.INDEX_TEMPLATE); assertEquals(""" { "_doc" : { "properties" : { "parent" : { "subobjects" : true, "properties" : { "child" : { "properties" : { "grandchild" : { "type" : "long" } } } } } } } }""", Strings.toString(mapperService.documentMapper().mapping(), true, true)); mapperService = createMapperService(mapping(b -> {})); mapperService.merge("_doc", List.of(mapping2, mapping1), MergeReason.INDEX_TEMPLATE); assertMergeEquals(List.of(mapping2, mapping1), """ { "_doc" : { "properties" : { "parent" : { "subobjects" : false, "properties" : { "child.grandchild" : { "type" : "text" } } } } } }"""); } public void testSubobjectsImplicitObjectsMerge() throws IOException { CompressedXContent mapping1 = new CompressedXContent(""" { "properties": { "parent": { "type": "object", "subobjects": false } } }"""); CompressedXContent mapping2 = new CompressedXContent(""" { "properties": { "parent.child.grandchild": { "type": "keyword" } } }"""); assertMergeEquals(List.of(mapping1, mapping2), """ { "_doc" : { "properties" : { "parent" : { "subobjects" : false, "properties" : { "child.grandchild" : { "type" : "keyword" } } } } } }"""); } public void testMultipleTypeMerges() throws IOException { CompressedXContent mapping1 = new CompressedXContent(""" { "properties" : { "parent": { "type": "object", "properties": { "child": { "type": "object", "properties": { "grandchild1": { "type": "keyword" }, "grandchild2": { "type": "date" } } } } } } }"""); CompressedXContent mapping2 = new CompressedXContent(""" { "properties" : { "parent": { "type": "object", "properties": { "child": { "type": "nested", "properties": { "grandchild1": { "type": "text" }, "grandchild3": { "type": "text" } } } } } } }"""); final MapperService mapperService = createMapperService(mapping(b -> {})); mapperService.merge("_doc", List.of(mapping1, mapping2), MergeReason.INDEX_TEMPLATE); assertEquals(""" { "_doc" : { "properties" : { "parent" : { "properties" : { "child" : { "type" : "nested", "properties" : { "grandchild1" : { "type" : "text" }, "grandchild2" : { "type" : "date" }, "grandchild3" : { "type" : "text" } } } } } } } }""", Strings.toString(mapperService.documentMapper().mapping(), true, true)); } public void testPropertiesFieldSingleChildMerge() throws IOException { CompressedXContent mapping1 = new CompressedXContent(""" { "properties": { "properties": { "type": "object", "properties": { "child": { "type": "object", "dynamic": true, "properties": { "grandchild": { "type": "keyword" } } } } } } }"""); CompressedXContent mapping2 = new CompressedXContent(""" { "properties": { "properties": { "properties": { "child": { "type": "long", "coerce": true } } } } }"""); MapperService mapperService = createMapperService(mapping(b -> {})); mapperService.merge("_doc", List.of(mapping1, mapping2), MergeReason.INDEX_TEMPLATE); assertEquals(""" { "_doc" : { "properties" : { "properties" : { "properties" : { "child" : { "type" : "long", "coerce" : true } } } } } }""", Strings.toString(mapperService.documentMapper().mapping(), true, true)); Mapper propertiesMapper = mapperService.documentMapper().mapping().getRoot().getMapper("properties"); assertThat(propertiesMapper, instanceOf(ObjectMapper.class)); Mapper childMapper = ((ObjectMapper) propertiesMapper).getMapper("child"); assertThat(childMapper, instanceOf(FieldMapper.class)); assertEquals("long", childMapper.typeName()); // Now checking the opposite merge mapperService = createMapperService(mapping(b -> {})); mapperService.merge("_doc", List.of(mapping2, mapping1), MergeReason.INDEX_TEMPLATE); assertEquals(""" { "_doc" : { "properties" : { "properties" : { "properties" : { "child" : { "dynamic" : "true", "properties" : { "grandchild" : { "type" : "keyword" } } } } } } } }""", Strings.toString(mapperService.documentMapper().mapping(), true, true)); propertiesMapper = mapperService.documentMapper().mapping().getRoot().getMapper("properties"); assertThat(propertiesMapper, instanceOf(ObjectMapper.class)); childMapper = ((ObjectMapper) propertiesMapper).getMapper("child"); assertThat(childMapper, instanceOf(ObjectMapper.class)); Mapper grandchildMapper = ((ObjectMapper) childMapper).getMapper("grandchild"); assertThat(grandchildMapper, instanceOf(FieldMapper.class)); assertEquals("keyword", grandchildMapper.typeName()); } public void testPropertiesFieldMultiChildMerge() throws IOException { CompressedXContent mapping1 = new CompressedXContent(""" { "properties": { "properties": { "properties": { "child1": { "type": "text", "fields": { "keyword": { "type": "keyword" } } }, "child2": { "type": "text" }, "child3": { "properties": { "grandchild": { "type": "text" } } } } } } }"""); CompressedXContent mapping2 = new CompressedXContent(""" { "properties": { "properties": { "properties": { "child2": { "type": "integer" }, "child3": { "properties": { "grandchild": { "type": "long" } } } } } } }"""); MapperService mapperService = createMapperService(mapping(b -> {})); mapperService.merge("_doc", List.of(mapping1, mapping2), MergeReason.INDEX_TEMPLATE); assertEquals(""" { "_doc" : { "properties" : { "properties" : { "properties" : { "child1" : { "type" : "text", "fields" : { "keyword" : { "type" : "keyword" } } }, "child2" : { "type" : "integer" }, "child3" : { "properties" : { "grandchild" : { "type" : "long" } } } } } } } }""", Strings.toString(mapperService.documentMapper().mapping(), true, true)); Mapper propertiesMapper = mapperService.documentMapper().mapping().getRoot().getMapper("properties"); assertThat(propertiesMapper, instanceOf(ObjectMapper.class)); Mapper childMapper = ((ObjectMapper) propertiesMapper).getMapper("child1"); assertThat(childMapper, instanceOf(FieldMapper.class)); assertEquals("text", childMapper.typeName()); assertEquals(2, childMapper.getTotalFieldsCount()); childMapper = ((ObjectMapper) propertiesMapper).getMapper("child2"); assertThat(childMapper, instanceOf(FieldMapper.class)); assertEquals("integer", childMapper.typeName()); assertEquals(1, childMapper.getTotalFieldsCount()); childMapper = ((ObjectMapper) propertiesMapper).getMapper("child3"); assertThat(childMapper, instanceOf(ObjectMapper.class)); Mapper grandchildMapper = ((ObjectMapper) childMapper).getMapper("grandchild"); assertEquals("long", grandchildMapper.typeName()); } public void testMergeUntilLimit() throws IOException { CompressedXContent mapping1 = new CompressedXContent(""" { "properties": { "parent.child1": { "type": "keyword" } } }"""); CompressedXContent mapping2 = new CompressedXContent(""" { "properties": { "parent.child2": { "type": "keyword" } } }"""); Settings settings = Settings.builder() .put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), 2) .put(INDEX_MAPPING_IGNORE_DYNAMIC_BEYOND_LIMIT_SETTING.getKey(), true) .build(); final MapperService mapperService = createMapperService(settings, mapping(b -> {})); DocumentMapper mapper = mapperService.merge("_doc", mapping1, MergeReason.MAPPING_AUTO_UPDATE); mapper = mapperService.merge("_doc", mapping2, MergeReason.MAPPING_AUTO_UPDATE); assertNotNull(mapper.mappers().getMapper("parent.child1")); assertNull(mapper.mappers().getMapper("parent.child2")); } public void testMergeUntilLimitMixedObjectAndDottedNotation() throws IOException { CompressedXContent mapping = new CompressedXContent(""" { "properties": { "parent": { "properties": { "child1": { "type": "keyword" } } }, "parent.child2": { "type": "keyword" } } }"""); Settings settings = Settings.builder() .put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), 2) .put(INDEX_MAPPING_IGNORE_DYNAMIC_BEYOND_LIMIT_SETTING.getKey(), true) .build(); final MapperService mapperService = createMapperService(settings, mapping(b -> {})); DocumentMapper mapper = mapperService.merge("_doc", mapping, MergeReason.MAPPING_AUTO_UPDATE); assertEquals(0, mapper.mappers().remainingFieldsUntilLimit(2)); assertNotNull(mapper.mappers().objectMappers().get("parent")); // the order is not deterministic, but we expect one to be null and the other to be non-null assertTrue(mapper.mappers().getMapper("parent.child1") == null ^ mapper.mappers().getMapper("parent.child2") == null); } public void testUpdateMappingWhenAtLimit() throws IOException { CompressedXContent mapping1 = new CompressedXContent(""" { "properties": { "parent.child1": { "type": "boolean" } } }"""); CompressedXContent mapping2 = new CompressedXContent(""" { "properties": { "parent.child1": { "type": "boolean", "ignore_malformed": true } } }"""); Settings settings = Settings.builder() .put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), 2) .put(INDEX_MAPPING_IGNORE_DYNAMIC_BEYOND_LIMIT_SETTING.getKey(), true) .build(); final MapperService mapperService = createMapperService(settings, mapping(b -> {})); DocumentMapper mapper = mapperService.merge("_doc", mapping1, MergeReason.MAPPING_AUTO_UPDATE); mapper = mapperService.merge("_doc", mapping2, MergeReason.MAPPING_AUTO_UPDATE); assertNotNull(mapper.mappers().getMapper("parent.child1")); assertTrue(((BooleanFieldMapper) mapper.mappers().getMapper("parent.child1")).ignoreMalformed()); } public void testMultiFieldsUpdate() throws IOException { CompressedXContent mapping1 = new CompressedXContent(""" { "properties": { "text_field": { "type": "text", "fields": { "multi_field1": { "type": "boolean" } } } } }"""); // changes a mapping parameter for multi_field1 and adds another multi field which is supposed to be ignored CompressedXContent mapping2 = new CompressedXContent(""" { "properties": { "text_field": { "type": "text", "fields": { "multi_field1": { "type": "boolean", "ignore_malformed": true }, "multi_field2": { "type": "keyword" } } } } }"""); Settings settings = Settings.builder() .put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), 2) .put(INDEX_MAPPING_IGNORE_DYNAMIC_BEYOND_LIMIT_SETTING.getKey(), true) .build(); final MapperService mapperService = createMapperService(settings, mapping(b -> {})); DocumentMapper mapper = mapperService.merge("_doc", mapping1, MergeReason.MAPPING_AUTO_UPDATE); mapper = mapperService.merge("_doc", mapping2, MergeReason.MAPPING_AUTO_UPDATE); assertNotNull(mapper.mappers().getMapper("text_field")); FieldMapper.MultiFields multiFields = ((TextFieldMapper) mapper.mappers().getMapper("text_field")).multiFields(); Map<String, FieldMapper> multiFieldMap = StreamSupport.stream(multiFields.spliterator(), false) .collect(Collectors.toMap(FieldMapper::fullPath, Function.identity())); assertThat(multiFieldMap.keySet(), contains("text_field.multi_field1")); assertTrue(multiFieldMap.get("text_field.multi_field1").ignoreMalformed()); } public void testMultiFieldExceedsLimit() throws IOException { CompressedXContent mapping = new CompressedXContent(""" { "properties": { "multi_field": { "type": "text", "fields": { "multi_field1": { "type": "boolean" } } }, "keyword_field": { "type": "keyword" } } }"""); Settings settings = Settings.builder() .put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), 1) .put(INDEX_MAPPING_IGNORE_DYNAMIC_BEYOND_LIMIT_SETTING.getKey(), true) .build(); final MapperService mapperService = createMapperService(settings, mapping(b -> {})); DocumentMapper mapper = mapperService.merge("_doc", mapping, MergeReason.MAPPING_AUTO_UPDATE); assertNull(mapper.mappers().getMapper("multi_field")); assertNotNull(mapper.mappers().getMapper("keyword_field")); } public void testMergeUntilLimitInitialMappingExceedsLimit() throws IOException { CompressedXContent mapping = new CompressedXContent(""" { "properties": { "field1": { "type": "keyword" }, "field2": { "type": "keyword" } } }"""); Settings settings = Settings.builder() .put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), 1) .put(INDEX_MAPPING_IGNORE_DYNAMIC_BEYOND_LIMIT_SETTING.getKey(), true) .build(); final MapperService mapperService = createMapperService(settings, mapping(b -> {})); DocumentMapper mapper = mapperService.merge("_doc", mapping, MergeReason.MAPPING_AUTO_UPDATE); // the order is not deterministic, but we expect one to be null and the other to be non-null assertTrue(mapper.mappers().getMapper("field1") == null ^ mapper.mappers().getMapper("field2") == null); } public void testMergeUntilLimitCapacityOnlyForParent() throws IOException { CompressedXContent mapping = new CompressedXContent(""" { "properties": { "parent.child": { "type": "keyword" } } }"""); Settings settings = Settings.builder() .put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), 1) .put(INDEX_MAPPING_IGNORE_DYNAMIC_BEYOND_LIMIT_SETTING.getKey(), true) .build(); final MapperService mapperService = createMapperService(settings, mapping(b -> {})); DocumentMapper mapper = mapperService.merge("_doc", mapping, MergeReason.MAPPING_AUTO_UPDATE); assertNotNull(mapper.mappers().objectMappers().get("parent")); assertNull(mapper.mappers().getMapper("parent.child")); } public void testAutoFlattenObjectsSubobjectsTopLevelMerge() throws IOException { CompressedXContent mapping1 = new CompressedXContent(""" { "subobjects": false }"""); CompressedXContent mapping2 = new CompressedXContent(""" { "properties": { "parent": { "properties": { "child": { "dynamic": true, "properties": { "grandchild": { "type": "keyword" } } } } } } }"""); assertMergeEquals(List.of(mapping1, mapping2), """ { "_doc" : { "subobjects" : false, "properties" : { "parent.child.grandchild" : { "type" : "keyword" } } } }"""); } public void testAutoFlattenObjectsSubobjectsMerge() throws IOException { CompressedXContent mapping1 = new CompressedXContent(""" { "properties" : { "parent" : { "properties" : { "child" : { "type": "object" } } } } }"""); CompressedXContent mapping2 = new CompressedXContent(""" { "properties" : { "parent" : { "subobjects" : false, "properties" : { "child" : { "properties" : { "grandchild" : { "type" : "keyword" } } } } } } }"""); assertMergeEquals(List.of(mapping1, mapping2), """ { "_doc" : { "properties" : { "parent" : { "subobjects" : false, "properties" : { "child.grandchild" : { "type" : "keyword" } } } } } }"""); assertMergeEquals(List.of(mapping2, mapping1), """ { "_doc" : { "properties" : { "parent" : { "subobjects" : false, "properties" : { "child.grandchild" : { "type" : "keyword" } } } } } }"""); } public void testAutoFlattenObjectsSubobjectsMergeConflictingMappingParameter() throws IOException { CompressedXContent mapping1 = new CompressedXContent(""" { "subobjects": false }"""); CompressedXContent mapping2 = new CompressedXContent(""" { "properties": { "parent": { "dynamic": "false", "properties": { "child": { "properties": { "grandchild": { "type": "keyword" } } } } } } }"""); final MapperService mapperService = createMapperService(mapping(b -> {})); MapperParsingException e = expectThrows( MapperParsingException.class, () -> mapperService.merge("_doc", List.of(mapping1, mapping2), MergeReason.INDEX_TEMPLATE) ); assertThat( e.getMessage(), containsString( "Failed to parse mapping: Object mapper [parent] was found in a context where subobjects is set to false. " + "Auto-flattening [parent] failed because the value of [dynamic] (FALSE) is not compatible " + "with the value from its parent context (TRUE)" ) ); } public void testAutoFlattenObjectsSubobjectsMergeConflictingMappingParameterRoot() throws IOException { CompressedXContent mapping1 = new CompressedXContent(""" { "subobjects": false, "dynamic": false }"""); CompressedXContent mapping2 = new CompressedXContent(""" { "subobjects": false, "properties": { "parent": { "dynamic": "true", "properties": { "child": { "properties": { "grandchild": { "type": "keyword" } } } } } } }"""); final MapperService mapperService = createMapperService(mapping(b -> {})); MapperParsingException e = expectThrows( MapperParsingException.class, () -> mapperService.merge("_doc", List.of(mapping1, mapping2), MergeReason.INDEX_TEMPLATE) ); assertThat( e.getMessage(), containsString( "Failed to parse mapping: Object mapper [parent] was found in a context where subobjects is set to false. " + "Auto-flattening [parent] failed because the value of [dynamic] (TRUE) is not compatible " + "with the value from its parent context (FALSE)" ) ); } public void testAutoFlattenObjectsSubobjectsMergeNonConflictingMappingParameter() throws IOException { CompressedXContent mapping = new CompressedXContent(""" { "dynamic": false, "properties": { "parent": { "dynamic": true, "enabled": false, "subobjects": false, "properties": { "child": { "properties": { "grandchild": { "type": "keyword" } } } } } } }"""); assertMergeEquals(List.of(mapping), """ { "_doc" : { "dynamic" : "false", "properties" : { "parent" : { "dynamic" : "true", "enabled" : false, "subobjects" : false, "properties" : { "child.grandchild" : { "type" : "keyword" } } } } } }"""); } public void testExpandDottedNotationToObjectMappers() throws IOException { CompressedXContent mapping1 = new CompressedXContent(""" { "properties": { "parent.child": { "type": "keyword" } } }"""); CompressedXContent mapping2 = new CompressedXContent("{}"); assertMergeEquals(List.of(mapping1, mapping2), """ { "_doc" : { "properties" : { "parent" : { "properties" : { "child" : { "type" : "keyword" } } } } } }"""); } public void testMergeDottedAndNestedNotation() throws IOException { CompressedXContent mapping1 = new CompressedXContent(""" { "properties": { "parent.child": { "type": "keyword" } } }"""); CompressedXContent mapping2 = new CompressedXContent(""" { "properties": { "parent" : { "properties" : { "child" : { "type" : "integer" } } } } }"""); assertMergeEquals(List.of(mapping1, mapping2), """ { "_doc" : { "properties" : { "parent" : { "properties" : { "child" : { "type" : "integer" } } } } } }"""); assertMergeEquals(List.of(mapping2, mapping1), """ { "_doc" : { "properties" : { "parent" : { "properties" : { "child" : { "type" : "keyword" } } } } } }"""); } public void testDottedAndNestedNotationInSameMapping() throws IOException { CompressedXContent mapping = new CompressedXContent(""" { "properties": { "parent.child": { "type": "keyword" }, "parent" : { "properties" : { "child" : { "type" : "integer" } } } } }"""); assertMergeEquals(List.of(mapping), """ { "_doc" : { "properties" : { "parent" : { "properties" : { "child" : { "type" : "integer" } } } } } }"""); } private void assertMergeEquals(List<CompressedXContent> mappingSources, String expected) throws IOException { final MapperService mapperServiceBulk = createMapperService(mapping(b -> {})); // simulates multiple component templates being merged in a composable index template mapperServiceBulk.merge("_doc", mappingSources, MergeReason.INDEX_TEMPLATE); assertEquals(expected, Strings.toString(mapperServiceBulk.documentMapper().mapping(), true, true)); MapperService mapperServiceSequential = createMapperService(mapping(b -> {})); // simulates a series of mapping updates mappingSources.forEach(m -> mapperServiceSequential.merge("_doc", m, MergeReason.INDEX_TEMPLATE)); assertEquals(expected, Strings.toString(mapperServiceSequential.documentMapper().mapping(), true, true)); } }
MapperServiceTests
java
apache__flink
flink-runtime/src/main/java/org/apache/flink/runtime/state/memory/NonPersistentMetadataCheckpointStorageLocation.java
{ "start": 3565, "end": 5091 }
class ____ extends CheckpointMetadataOutputStream { private final ByteArrayOutputStreamWithPos os = new ByteArrayOutputStreamWithPos(); private boolean closed; @Override public void write(int b) throws IOException { os.write(b); } @Override public void write(byte[] b, int off, int len) throws IOException { os.write(b, off, len); } @Override public void flush() throws IOException { os.flush(); } @Override public long getPos() throws IOException { return os.getPosition(); } @Override public void sync() throws IOException {} @Override public CompletedCheckpointStorageLocation closeAndFinalizeCheckpoint() throws IOException { synchronized (this) { if (!closed) { closed = true; byte[] bytes = os.toByteArray(); ByteStreamStateHandle handle = new ByteStreamStateHandle(UUID.randomUUID().toString(), bytes); return new NonPersistentCompletedCheckpointStorageLocation(handle); } else { throw new IOException("Already closed"); } } } @Override public void close() { if (!closed) { closed = true; os.reset(); } } } }
MetadataOutputStream