language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/context/annotation/ConditionEvaluator.java
|
{
"start": 2470,
"end": 5440
}
|
class ____ be {@link ConfigurationPhase#PARSE_CONFIGURATION})
* @param metadata the meta data
* @return if the item should be skipped
*/
public boolean shouldSkip(AnnotatedTypeMetadata metadata) {
return shouldSkip(metadata, null);
}
/**
* Determine if an item should be skipped based on {@code @Conditional} annotations.
* @param metadata the meta data
* @param phase the phase of the call
* @return if the item should be skipped
*/
public boolean shouldSkip(@Nullable AnnotatedTypeMetadata metadata, @Nullable ConfigurationPhase phase) {
if (metadata == null || !metadata.isAnnotated(Conditional.class.getName())) {
return false;
}
if (phase == null) {
if (metadata instanceof AnnotationMetadata annotationMetadata &&
ConfigurationClassUtils.isConfigurationCandidate(annotationMetadata)) {
return shouldSkip(metadata, ConfigurationPhase.PARSE_CONFIGURATION);
}
return shouldSkip(metadata, ConfigurationPhase.REGISTER_BEAN);
}
List<Condition> conditions = collectConditions(metadata);
for (Condition condition : conditions) {
ConfigurationPhase requiredPhase = null;
if (condition instanceof ConfigurationCondition configurationCondition) {
requiredPhase = configurationCondition.getConfigurationPhase();
}
if ((requiredPhase == null || requiredPhase == phase) && !condition.matches(this.context, metadata)) {
return true;
}
}
return false;
}
/**
* Return the {@linkplain Condition conditions} that should be applied when
* considering the given annotated type.
* @param metadata the metadata of the annotated type
* @return the ordered list of conditions for that type
*/
List<Condition> collectConditions(@Nullable AnnotatedTypeMetadata metadata) {
if (metadata == null || !metadata.isAnnotated(Conditional.class.getName())) {
return Collections.emptyList();
}
List<Condition> conditions = new ArrayList<>();
for (String[] conditionClasses : getConditionClasses(metadata)) {
for (String conditionClass : conditionClasses) {
Condition condition = getCondition(conditionClass, this.context.getClassLoader());
conditions.add(condition);
}
}
AnnotationAwareOrderComparator.sort(conditions);
return conditions;
}
@SuppressWarnings("unchecked")
private List<String[]> getConditionClasses(AnnotatedTypeMetadata metadata) {
MultiValueMap<String, @Nullable Object> attributes = metadata.getAllAnnotationAttributes(Conditional.class.getName(), true);
Object values = (attributes != null ? attributes.get("value") : null);
return (List<String[]>) (values != null ? values : Collections.emptyList());
}
private Condition getCondition(String conditionClassName, @Nullable ClassLoader classloader) {
Class<?> conditionClass = ClassUtils.resolveClassName(conditionClassName, classloader);
return (Condition) BeanUtils.instantiateClass(conditionClass);
}
/**
* Implementation of a {@link ConditionContext}.
*/
private static
|
will
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java
|
{
"start": 7889,
"end": 8078
}
|
enum ____ in ascending chronological order.
* Latest one should be added last in the list.
* When upgrading the version for whole driver, update the getCurrentVersion;
*/
public
|
list
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/jdk8/FlowableStageSubscriber.java
|
{
"start": 1029,
"end": 1205
}
|
class ____ extends CompletableFuture and provides basic infrastructure
* to notify watchers upon upstream signals.
* @param <T> the element type
* @since 3.0.0
*/
abstract
|
that
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
|
{
"start": 100637,
"end": 102188
}
|
class ____ extends FSEditLogOp {
DelegationTokenIdentifier token;
CancelDelegationTokenOp() {
super(OP_CANCEL_DELEGATION_TOKEN);
}
static CancelDelegationTokenOp getInstance(OpInstanceCache cache) {
return cache.get(OP_CANCEL_DELEGATION_TOKEN);
}
@Override
void resetSubFields() {
token = null;
}
CancelDelegationTokenOp setDelegationTokenIdentifier(
DelegationTokenIdentifier token) {
this.token = token;
return this;
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
token.write(out);
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
this.token = new DelegationTokenIdentifier();
this.token.readFields(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("CancelDelegationTokenOp [token=")
.append(token)
.append(", opCode=")
.append(opCode)
.append(", txid=")
.append(txid)
.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
FSEditLogOp.delegationTokenToXml(contentHandler, token);
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.token = delegationTokenFromXml(st.getChildren(
"DELEGATION_TOKEN_IDENTIFIER").get(0));
}
}
static
|
CancelDelegationTokenOp
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncRPCMultipleDestinationMountTableResolver.java
|
{
"start": 2935,
"end": 8808
}
|
class ____ extends
TestRouterRPCMultipleDestinationMountTableResolver {
public static final Logger LOG =
LoggerFactory.getLogger(TestRouterAsyncRPCMultipleDestinationMountTableResolver.class);
@BeforeAll
public static void setUp() throws Exception {
// Build and start a federated cluster.
cluster = new StateStoreDFSCluster(false, 3,
MultipleDestinationMountTableResolver.class);
Configuration routerConf = new RouterConfigBuilder()
.stateStore()
.admin()
.quota()
.rpc()
.build();
routerConf.setBoolean(DFS_ROUTER_ASYNC_RPC_ENABLE_KEY, true);
Configuration hdfsConf = new Configuration(false);
hdfsConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster.addRouterOverrides(routerConf);
cluster.addNamenodeOverrides(hdfsConf);
cluster.startCluster();
cluster.startRouters();
cluster.waitClusterUp();
routerContext = cluster.getRandomRouter();
resolver =
(MountTableResolver) routerContext.getRouter().getSubclusterResolver();
nnFs0 = (DistributedFileSystem) cluster
.getNamenode(cluster.getNameservices().get(0), null).getFileSystem();
nnFs1 = (DistributedFileSystem) cluster
.getNamenode(cluster.getNameservices().get(1), null).getFileSystem();
nnFs2 = (DistributedFileSystem) cluster
.getNamenode(cluster.getNameservices().get(2), null).getFileSystem();
routerFs = (DistributedFileSystem) routerContext.getFileSystem();
rpcServer =routerContext.getRouter().getRpcServer();
}
@Test
public void testLocalResolverGetDatanodesSubcluster() throws IOException {
String testPath = "/testLocalResolverGetDatanodesSubcluster";
Path path = new Path(testPath);
Map<String, String> destMap = new HashMap<>();
destMap.put("ns0", testPath);
destMap.put("ns1", testPath);
nnFs0.mkdirs(path);
nnFs1.mkdirs(path);
MountTable addEntry =
MountTable.newInstance(testPath, destMap);
addEntry.setQuota(new RouterQuotaUsage.Builder().build());
addEntry.setDestOrder(DestinationOrder.LOCAL);
assertTrue(addMountTable(addEntry));
Map<String, String> datanodesSubcluster = null;
try {
MultipleDestinationMountTableResolver resolver =
(MultipleDestinationMountTableResolver) routerContext.getRouter().getSubclusterResolver();
LocalResolver localResolver =
(LocalResolver) resolver.getOrderedResolver(DestinationOrder.LOCAL);
datanodesSubcluster = localResolver.getDatanodesSubcluster();
} catch (Exception e) {
LOG.info("Exception occurs when testLocalResolverGetDatanodesSubcluster.", e);
} finally {
RouterClient client = routerContext.getAdminClient();
MountTableManager mountTableManager = client.getMountTableManager();
RemoveMountTableEntryRequest req2 =
RemoveMountTableEntryRequest.newInstance(testPath);
mountTableManager.removeMountTableEntry(req2);
nnFs0.delete(new Path(testPath), true);
nnFs1.delete(new Path(testPath), true);
}
assertNotNull(datanodesSubcluster);
assertFalse(datanodesSubcluster.isEmpty());
}
@Override
@Test
public void testInvokeAtAvailableNs() throws IOException {
// Create a mount point with multiple destinations.
Path path = new Path("/testInvokeAtAvailableNs");
Map<String, String> destMap = new HashMap<>();
destMap.put("ns0", "/testInvokeAtAvailableNs");
destMap.put("ns1", "/testInvokeAtAvailableNs");
nnFs0.mkdirs(path);
nnFs1.mkdirs(path);
MountTable addEntry =
MountTable.newInstance("/testInvokeAtAvailableNs", destMap);
addEntry.setQuota(new RouterQuotaUsage.Builder().build());
addEntry.setDestOrder(DestinationOrder.RANDOM);
addEntry.setFaultTolerant(true);
assertTrue(addMountTable(addEntry));
// Make one subcluster unavailable.
MiniDFSCluster dfsCluster = cluster.getCluster();
dfsCluster.shutdownNameNode(0);
dfsCluster.shutdownNameNode(1);
try {
// Verify that #invokeAtAvailableNs works by calling #getServerDefaults.
RemoteMethod method = new RemoteMethod("getServerDefaults");
FsServerDefaults serverDefaults = null;
rpcServer.invokeAtAvailableNsAsync(method, FsServerDefaults.class);
try {
serverDefaults = syncReturn(FsServerDefaults.class);
} catch (Exception e) {
throw new RuntimeException(e);
}
assertNotNull(serverDefaults);
} finally {
dfsCluster.restartNameNode(0);
dfsCluster.restartNameNode(1);
}
}
@Override
@Test
public void testIsMultiDestDir() throws Exception {
RouterClientProtocol client =
routerContext.getRouter().getRpcServer().getClientProtocolModule();
setupOrderMountPath(DestinationOrder.HASH_ALL);
// Should be true only for directory and false for all other cases.
client.isMultiDestDirectory("/mount/dir");
assertTrue(syncReturn(boolean.class));
client.isMultiDestDirectory("/mount/nodir");
assertFalse(syncReturn(boolean.class));
client.isMultiDestDirectory("/mount/dir/file");
assertFalse(syncReturn(boolean.class));
routerFs.createSymlink(new Path("/mount/dir/file"),
new Path("/mount/dir/link"), true);
client.isMultiDestDirectory("/mount/dir/link");
assertFalse(syncReturn(boolean.class));
routerFs.createSymlink(new Path("/mount/dir/dir"),
new Path("/mount/dir/linkDir"), true);
client.isMultiDestDirectory("/mount/dir/linkDir");
assertFalse(syncReturn(boolean.class));
resetTestEnvironment();
// Test single directory destination. Should be false for the directory.
setupOrderMountPath(DestinationOrder.HASH);
client.isMultiDestDirectory("/mount/dir");
assertFalse(syncReturn(boolean.class));
}
}
|
TestRouterAsyncRPCMultipleDestinationMountTableResolver
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/appender/AsyncAppender.java
|
{
"start": 13327,
"end": 17145
}
|
class ____<B extends Builder<B>> extends AbstractFilterable.Builder<B>
implements org.apache.logging.log4j.core.util.Builder<AsyncAppender> {
@PluginElement("AppenderRef")
@Required(message = "No appender references provided to AsyncAppender")
private AppenderRef[] appenderRefs;
@PluginBuilderAttribute
@PluginAliases("error-ref")
private String errorRef;
@PluginBuilderAttribute
private boolean blocking = true;
@PluginBuilderAttribute
private long shutdownTimeout = 0L;
@PluginBuilderAttribute
private int bufferSize = DEFAULT_QUEUE_SIZE;
@PluginBuilderAttribute
@Required(message = "No name provided for AsyncAppender")
private String name;
@PluginBuilderAttribute
private boolean includeLocation = false;
@PluginConfiguration
private Configuration configuration;
@PluginBuilderAttribute
private boolean ignoreExceptions = true;
@PluginElement(BlockingQueueFactory.ELEMENT_TYPE)
private BlockingQueueFactory<LogEvent> blockingQueueFactory = new ArrayBlockingQueueFactory<>();
public Builder setAppenderRefs(final AppenderRef[] appenderRefs) {
this.appenderRefs = appenderRefs;
return this;
}
public Builder setErrorRef(final String errorRef) {
this.errorRef = errorRef;
return this;
}
public Builder setBlocking(final boolean blocking) {
this.blocking = blocking;
return this;
}
public Builder setShutdownTimeout(final long shutdownTimeout) {
this.shutdownTimeout = shutdownTimeout;
return this;
}
public Builder setBufferSize(final int bufferSize) {
this.bufferSize = bufferSize;
return this;
}
public Builder setName(final String name) {
this.name = name;
return this;
}
public Builder setIncludeLocation(final boolean includeLocation) {
this.includeLocation = includeLocation;
return this;
}
public Builder setConfiguration(final Configuration configuration) {
this.configuration = configuration;
return this;
}
public Builder setIgnoreExceptions(final boolean ignoreExceptions) {
this.ignoreExceptions = ignoreExceptions;
return this;
}
public Builder setBlockingQueueFactory(final BlockingQueueFactory<LogEvent> blockingQueueFactory) {
this.blockingQueueFactory = blockingQueueFactory;
return this;
}
@Override
public AsyncAppender build() {
return new AsyncAppender(
name,
getFilter(),
appenderRefs,
errorRef,
bufferSize,
blocking,
ignoreExceptions,
shutdownTimeout,
configuration,
includeLocation,
blockingQueueFactory,
getPropertyArray());
}
}
/**
* Returns the names of the appenders that this asyncAppender delegates to as an array of Strings.
*
* @return the names of the sink appenders
*/
public String[] getAppenderRefStrings() {
final String[] result = new String[appenderRefs.length];
for (int i = 0; i < result.length; i++) {
result[i] = appenderRefs[i].getRef();
}
return result;
}
/**
* Returns {@code true} if this AsyncAppender will take a snapshot of the stack with every log event to determine
* the
|
Builder
|
java
|
apache__kafka
|
server/src/main/java/org/apache/kafka/network/SocketServerConfigs.java
|
{
"start": 1883,
"end": 5628
}
|
class ____ {
public static final String LISTENER_SECURITY_PROTOCOL_MAP_CONFIG = "listener.security.protocol.map";
public static final String LISTENER_SECURITY_PROTOCOL_MAP_DEFAULT = Arrays.stream(SecurityProtocol.values())
.collect(Collectors.toMap(ListenerName::forSecurityProtocol, sp -> sp))
.entrySet()
.stream()
.map(entry -> entry.getKey().value() + ":" + entry.getValue().name())
.collect(Collectors.joining(","));
public static final String LISTENER_SECURITY_PROTOCOL_MAP_DOC = "Map between listener names and security protocols. This must be defined for " +
"the same security protocol to be usable in more than one port or IP. For example, internal and " +
"external traffic can be separated even if SSL is required for both. Concretely, the user could define listeners " +
"with names INTERNAL and EXTERNAL and this property as: <code>INTERNAL:SSL,EXTERNAL:SSL</code>. As shown, key and value are " +
"separated by a colon and map entries are separated by commas. Each listener name should only appear once in the map. " +
"Different security (SSL and SASL) settings can be configured for each listener by adding a normalised " +
"prefix (the listener name is lowercased) to the config name. For example, to set a different keystore for the " +
"INTERNAL listener, a config with name <code>listener.name.internal.ssl.keystore.location</code> would be set. " +
"If the config for the listener name is not set, the config will fallback to the generic config (i.e. <code>ssl.keystore.location</code>). " +
"Note that in KRaft a default mapping from the listener names defined by <code>controller.listener.names</code> to PLAINTEXT " +
"is assumed if no explicit mapping is provided and no other security protocol is in use.";
public static final String LISTENERS_CONFIG = "listeners";
public static final String LISTENERS_DEFAULT = "PLAINTEXT://:9092";
public static final String LISTENERS_DOC = String.format("Listener List - Comma-separated list of URIs we will listen on and the listener names." +
" If the listener name is not a security protocol, <code>%s</code> must also be set.%n" +
" Listener names and port numbers must be unique unless one listener is an IPv4 address and the other listener is an IPv6 address (for the same port).%n" +
" Specify hostname as 0.0.0.0 to bind to all interfaces.%n" +
" Leave hostname empty to bind to default interface.%n" +
" Examples of legal listener lists:%n" +
" <code>PLAINTEXT://myhost:9092,SSL://:9091</code>%n" +
" <code>CLIENT://0.0.0.0:9092,REPLICATION://localhost:9093</code>%n" +
" <code>PLAINTEXT://127.0.0.1:9092,SSL://[::1]:9092</code>%n", LISTENER_SECURITY_PROTOCOL_MAP_CONFIG);
public static final String ADVERTISED_LISTENERS_CONFIG = "advertised.listeners";
public static final String ADVERTISED_LISTENERS_DOC = String.format("Specifies the listener addresses that the Kafka brokers will advertise to clients and other brokers." +
" The config is useful where the actual listener configuration <code>%s</code> does not represent the addresses that clients should" +
" use to connect, such as in cloud environments." +
" The addresses are published to and managed by the controller, the brokers pull these data from the controller as needed." +
" In IaaS environments, this may need to be different from the
|
SocketServerConfigs
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/bug/Bug_for_maiksagill.java
|
{
"start": 224,
"end": 609
}
|
class ____ extends TestCase {
public void test_for_maiksagill() throws Exception {
String resource = "json/maiksagill.json";
InputStream is = Thread.currentThread().getContextClassLoader().getResourceAsStream(resource);
String text = IOUtils.toString(is);
JSON.parseObject(text, WareHouseInfo[].class);
}
}
|
Bug_for_maiksagill
|
java
|
quarkusio__quarkus
|
integration-tests/micrometer-opentelemetry/src/main/java/io/quarkus/micrometer/opentelemetry/services/TraceData.java
|
{
"start": 62,
"end": 109
}
|
class ____ {
public String message;
}
|
TraceData
|
java
|
apache__camel
|
components/camel-nitrite/src/main/java/org/apache/camel/component/nitrite/operation/common/ImportDatabaseOperation.java
|
{
"start": 1258,
"end": 1697
}
|
class ____ extends AbstractNitriteOperation implements CommonOperation {
public ImportDatabaseOperation() {
}
@Override
protected void execute(Exchange exchange, NitriteEndpoint endpoint) throws Exception {
InputStream stream = new ByteArrayInputStream(exchange.getMessage().getBody(byte[].class));
Importer.of(endpoint.getNitriteDatabase())
.importFrom(stream);
}
}
|
ImportDatabaseOperation
|
java
|
spring-projects__spring-framework
|
spring-context-support/src/test/java/org/springframework/cache/jcache/JCacheEhCacheAnnotationTests.java
|
{
"start": 3641,
"end": 4880
}
|
class ____ implements CachingConfigurer {
@Autowired
CachingProvider cachingProvider;
@Override
@Bean
public org.springframework.cache.CacheManager cacheManager() {
JCacheCacheManager cm = new JCacheCacheManager(jCacheManager());
cm.setTransactionAware(true);
return cm;
}
@Bean
public CacheManager jCacheManager() {
CacheManager cacheManager = this.cachingProvider.getCacheManager();
MutableConfiguration<Object, Object> mutableConfiguration = new MutableConfiguration<>();
mutableConfiguration.setStoreByValue(false); // otherwise value has to be Serializable
cacheManager.createCache("testCache", mutableConfiguration);
cacheManager.createCache("primary", mutableConfiguration);
cacheManager.createCache("secondary", mutableConfiguration);
return cacheManager;
}
@Bean
public CacheableService<?> service() {
return new DefaultCacheableService();
}
@Bean
public CacheableService<?> classService() {
return new AnnotatedClassCacheableService();
}
@Override
@Bean
public KeyGenerator keyGenerator() {
return new SimpleKeyGenerator();
}
@Bean
public KeyGenerator customKeyGenerator() {
return new SomeCustomKeyGenerator();
}
}
}
|
EnableCachingConfig
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InMemoryPartition.java
|
{
"start": 10888,
"end": 12795
}
|
class ____ extends AbstractPagedOutputView {
private final ArrayList<MemorySegment> pages;
private final MemorySegmentSource memSource;
private final int sizeBits;
private final int sizeMask;
private int currentPageNumber;
private int segmentNumberOffset;
private WriteView(
ArrayList<MemorySegment> pages,
MemorySegmentSource memSource,
int pageSize,
int pageSizeBits) {
super(pages.get(0), pageSize, 0);
this.pages = pages;
this.memSource = memSource;
this.sizeBits = pageSizeBits;
this.sizeMask = pageSize - 1;
this.segmentNumberOffset = 0;
}
@Override
protected MemorySegment nextSegment(MemorySegment current, int bytesUsed)
throws IOException {
MemorySegment next = this.memSource.nextSegment();
if (next == null) {
throw new EOFException();
}
this.pages.add(next);
this.currentPageNumber++;
return next;
}
private long getCurrentPointer() {
return (((long) this.currentPageNumber) << this.sizeBits)
+ getCurrentPositionInSegment();
}
private int resetTo(long pointer) {
final int pageNum = (int) (pointer >>> this.sizeBits);
final int offset = (int) (pointer & this.sizeMask);
this.currentPageNumber = pageNum;
int posInArray = pageNum - this.segmentNumberOffset;
seekOutput(this.pages.get(posInArray), offset);
return posInArray;
}
@SuppressWarnings("unused")
public void setSegmentNumberOffset(int offset) {
this.segmentNumberOffset = offset;
}
}
private static final
|
WriteView
|
java
|
apache__camel
|
components/camel-undertow/src/test/java/org/apache/camel/component/undertow/rest/RestUndertowProducerThrowExceptionErrorTest.java
|
{
"start": 1222,
"end": 3059
}
|
class ____ extends BaseUndertowTest {
@Test
public void testUndertowProducerOk() {
String out = fluentTemplate.withHeader("id", "123").to("direct:start").request(String.class);
assertEquals("123;Donald Duck", out);
}
@Test
public void testUndertowProducerFail() {
Exchange out = fluentTemplate.withHeader("id", "777").to("direct:start").request(Exchange.class);
assertNotNull(out);
assertFalse(out.isFailed(), "Should not have thrown exception");
assertEquals(500, out.getOut().getHeader(Exchange.HTTP_RESPONSE_CODE));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
// configure to use localhost with the given port
restConfiguration().component("undertow").host("localhost").port(getPort())
.endpointProperty("throwExceptionOnFailure", "false");
from("direct:start")
.to("rest:get:users/{id}/basic");
// use the rest DSL to define the rest services
rest("/users/")
.get("{id}/basic")
.to("direct:basic");
from("direct:basic")
.to("mock:input")
.process(exchange -> {
String id = exchange.getIn().getHeader("id", String.class);
if ("777".equals(id)) {
throw new IllegalArgumentException("Bad id number");
}
exchange.getMessage().setBody(id + ";Donald Duck");
});
}
};
}
}
|
RestUndertowProducerThrowExceptionErrorTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/boot/model/source/spi/PluralAttributeElementSourceManyToAny.java
|
{
"start": 332,
"end": 441
}
|
interface ____
extends PluralAttributeElementSource, AnyMappingSource {
}
|
PluralAttributeElementSourceManyToAny
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/ingest/AbstractProcessor.java
|
{
"start": 754,
"end": 2883
}
|
class ____ implements Processor {
protected final String tag;
protected final String description;
protected AbstractProcessor(String tag, String description) {
this.tag = tag;
this.description = description;
}
@Override
public String getTag() {
return tag;
}
@Override
public String getDescription() {
return description;
}
/**
* Helper method to be used by processors that need to catch and log Throwables.
* <p>
* If trace logging is enabled, then we log the provided message and the full stacktrace
* On the other hand if trace logging isn't enabled, then we log the provided message and the message from the Throwable (but not a
* stacktrace).
* <p>
* Regardless of the logging level, we throw an ElasticsearchException that has the context in its message
*
* @param message A message to be logged and to be included in the message of the returned ElasticsearchException
* @param throwable The Throwable that has been caught
* @return A new ElasticsearchException whose message includes the passed-in message and the message from the passed-in Throwable. It
* will not however wrap the given Throwable.
*/
protected ElasticsearchException logAndBuildException(String message, Throwable throwable) {
String cause = throwable.getClass().getName();
if (throwable.getMessage() != null) {
cause += ": " + throwable.getMessage();
}
String longMessage = message + ": " + cause;
// This method will only be called in exceptional situations, so the cost of looking up the logger won't be bad:
Logger logger = LogManager.getLogger(getClass());
if (logger.isTraceEnabled()) {
logger.trace(message, throwable);
} else {
logger.warn(longMessage);
}
// We don't want to wrap the Throwable here because it is probably not one of the exceptions that ElasticsearchException can
// serialize:
return new ElasticsearchException(longMessage);
}
}
|
AbstractProcessor
|
java
|
apache__camel
|
components/camel-ftp/src/test/java/org/apache/camel/component/file/remote/integration/FtpRecursiveDepthIT.java
|
{
"start": 1040,
"end": 2802
}
|
class ____ extends FtpServerTestSupport {
protected String getFtpUrl() {
return "ftp://admin@localhost:{{ftp.server.port}}/depth?password=admin&recursive=true";
}
@Test
public void testDepth() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceivedInAnyOrder("a2", "b2");
template.sendBodyAndHeader("ftp://admin@localhost:{{ftp.server.port}}/depth?password=admin", "a", Exchange.FILE_NAME,
"a.txt");
template.sendBodyAndHeader("ftp://admin@localhost:{{ftp.server.port}}/depth?password=admin", "b", Exchange.FILE_NAME,
"b.txt");
template.sendBodyAndHeader("ftp://admin@localhost:{{ftp.server.port}}/depth/foo?password=admin", "a2",
Exchange.FILE_NAME,
"a2.txt");
template.sendBodyAndHeader("ftp://admin@localhost:{{ftp.server.port}}/depth/foo/bar?password=admin", "a3",
Exchange.FILE_NAME, "a.txt");
template.sendBodyAndHeader("ftp://admin@localhost:{{ftp.server.port}}/depth/bar?password=admin", "b2",
Exchange.FILE_NAME,
"b2.txt");
template.sendBodyAndHeader("ftp://admin@localhost:{{ftp.server.port}}/depth/bar/foo?password=admin", "b3",
Exchange.FILE_NAME, "b.txt");
// only expect 2 of the 6 sent, those at depth 2
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from(getFtpUrl() + "&minDepth=2&maxDepth=2").convertBodyTo(String.class).to("mock:result");
}
};
}
}
|
FtpRecursiveDepthIT
|
java
|
hibernate__hibernate-orm
|
hibernate-scan-jandex/src/main/java/org/hibernate/archive/scan/internal/StandardScannerFactory.java
|
{
"start": 323,
"end": 601
}
|
class ____ implements ScannerFactory {
@Override
public Scanner getScanner(ArchiveDescriptorFactory archiveDescriptorFactory) {
return archiveDescriptorFactory == null
? new StandardScanner()
: new StandardScanner( archiveDescriptorFactory );
}
}
|
StandardScannerFactory
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/test/benchmark/encode/ListBoolean1000Encode.java
|
{
"start": 179,
"end": 638
}
|
class ____ extends BenchmarkCase {
private Object object;
public ListBoolean1000Encode(){
super("BooleanArray1000Encode");
boolean[] array = new boolean[1000];
for (int i = 0; i < array.length; ++i) {
array[i] = (i % 2 == 0);
}
this.object = Arrays.asList(array);
}
@Override
public void execute(Codec codec) throws Exception {
codec.encode(object);
}
}
|
ListBoolean1000Encode
|
java
|
spring-projects__spring-security
|
web/src/test/java/org/springframework/security/web/authentication/SavedRequestAwareAuthenticationSuccessHandlerTests.java
|
{
"start": 1307,
"end": 2796
}
|
class ____ {
@Test
public void defaultUrlMuststartWithSlashOrHttpScheme() {
SavedRequestAwareAuthenticationSuccessHandler handler = new SavedRequestAwareAuthenticationSuccessHandler();
handler.setDefaultTargetUrl("/acceptableRelativeUrl");
handler.setDefaultTargetUrl("https://some.site.org/index.html");
handler.setDefaultTargetUrl("https://some.site.org/index.html");
assertThatIllegalArgumentException().isThrownBy(() -> handler.setDefaultTargetUrl("missingSlash"));
}
@Test
public void onAuthenticationSuccessHasSavedRequest() throws Exception {
String redirectUrl = "http://localhost/appcontext/page";
RedirectStrategy redirectStrategy = mock(RedirectStrategy.class);
RequestCache requestCache = mock(RequestCache.class);
SavedRequest savedRequest = mock(SavedRequest.class);
MockHttpServletRequest request = new MockHttpServletRequest();
MockHttpServletResponse response = new MockHttpServletResponse();
given(savedRequest.getRedirectUrl()).willReturn(redirectUrl);
given(requestCache.getRequest(request, response)).willReturn(savedRequest);
SavedRequestAwareAuthenticationSuccessHandler handler = new SavedRequestAwareAuthenticationSuccessHandler();
handler.setRequestCache(requestCache);
handler.setRedirectStrategy(redirectStrategy);
handler.onAuthenticationSuccess(request, response, mock(Authentication.class));
verify(redirectStrategy).sendRedirect(request, response, redirectUrl);
}
}
|
SavedRequestAwareAuthenticationSuccessHandlerTests
|
java
|
elastic__elasticsearch
|
x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/CrossClusterApiKeySignatureManagerTests.java
|
{
"start": 969,
"end": 14949
}
|
class ____ extends ESTestCase {
private ThreadPool threadPool;
private Settings.Builder settingsBuilder;
@Override
public void setUp() throws Exception {
super.setUp();
threadPool = new TestThreadPool(getTestName());
settingsBuilder = Settings.builder()
.put("path.home", createTempDir())
.put(Node.NODE_NAME_SETTING.getKey(), randomAlphaOfLengthBetween(3, 8));
}
public void testSignAndVerifyPKCS12orBCFKS() throws GeneralSecurityException {
var builder = Settings.builder()
.put("cluster.remote.my_remote.signing.keystore.alias", "wholelottakey")
.put("path.home", createTempDir())
.put(Node.NODE_NAME_SETTING.getKey(), randomAlphaOfLengthBetween(3, 8));
addStorePathToBuilder("my_remote", "signing", "secretpassword", "secretpassword", builder);
addStorePathToBuilder("truststore", "changeit", "secretpassword", builder);
var manager = new CrossClusterApiKeySignatureManager(TestEnvironment.newEnvironment(builder.build()));
var signature = manager.signerForClusterAlias("my_remote").sign("a_header");
assertTrue(manager.verifier().verify(signature, "a_header"));
}
public void testSignAndVerifyDifferentPayloadFailsPKCS12orBCFKS() throws GeneralSecurityException {
var builder = Settings.builder()
.put("cluster.remote.my_remote.signing.keystore.alias", "wholelottakey")
.put("path.home", createTempDir())
.put(Node.NODE_NAME_SETTING.getKey(), randomAlphaOfLengthBetween(3, 8));
addStorePathToBuilder("my_remote", "signing", "secretpassword", "secretpassword", builder);
addStorePathToBuilder("truststore", "changeit", "secretpassword", builder);
var manager = new CrossClusterApiKeySignatureManager(TestEnvironment.newEnvironment(builder.build()));
var signature = manager.signerForClusterAlias("my_remote").sign("a_header");
assertFalse(manager.verifier().verify(signature, "another_header"));
}
public void testSignAndVerifyRSAorEC() throws GeneralSecurityException {
var builder = Settings.builder()
.put("path.home", createTempDir())
.put(Node.NODE_NAME_SETTING.getKey(), randomAlphaOfLengthBetween(3, 8));
addCertSigningTestCerts("my_remote", builder);
var manager = new CrossClusterApiKeySignatureManager(TestEnvironment.newEnvironment(builder.build()));
var signature = manager.signerForClusterAlias("my_remote").sign("a_header");
assertTrue(manager.verifier().verify(signature, "a_header"));
}
public void testSignAndVerifyDifferentPayloadFailsRSAorEC() throws GeneralSecurityException {
var builder = Settings.builder()
.put("path.home", createTempDir())
.put(Node.NODE_NAME_SETTING.getKey(), randomAlphaOfLengthBetween(3, 8));
addCertSigningTestCerts("my_remote", builder);
var manager = new CrossClusterApiKeySignatureManager(TestEnvironment.newEnvironment(builder.build()));
var signature = manager.signerForClusterAlias("my_remote").sign("a_header");
assertFalse(manager.verifier().verify(signature, "another_header"));
}
public void testSignAndVerifyWrongKeyRSAorEC() throws GeneralSecurityException {
var builder = Settings.builder()
.put("cluster.remote.my_remote2.signing.keystore.alias", "ainttalkinboutkeys")
.put("path.home", createTempDir())
.put(Node.NODE_NAME_SETTING.getKey(), randomAlphaOfLengthBetween(3, 8));
addStorePathToBuilder("my_remote2", "signing", "secretpassword", "secretpassword", builder);
addCertSigningTestCerts("my_remote1", builder, true);
var manager = new CrossClusterApiKeySignatureManager(TestEnvironment.newEnvironment(builder.build()));
var verifier = manager.verifier();
var signature1 = manager.signerForClusterAlias("my_remote1").sign("a_header");
assertTrue(verifier.verify(signature1, "a_header"));
// Signature generated with different key and cert chain
var signature2 = manager.signerForClusterAlias("my_remote2").sign("a_header");
// Replace only key
assertFalse(
verifier.verify(
new X509CertificateSignature(signature1.certificates(), signature1.algorithm(), signature2.signature()),
"a_header"
)
);
}
public void testSignAndVerifyManipulatedSignatureStringRSAorEC() throws GeneralSecurityException {
var builder = Settings.builder()
.put("path.home", createTempDir())
.put(Node.NODE_NAME_SETTING.getKey(), randomAlphaOfLengthBetween(3, 8));
addCertSigningTestCerts("my_remote", builder);
var manager = new CrossClusterApiKeySignatureManager(TestEnvironment.newEnvironment(builder.build()));
var signature = manager.signerForClusterAlias("my_remote").sign("a_header");
var bytes = signature.signature().array();
bytes[bytes.length - 1] ^= 0x01;
var manipulatedSignature = new X509CertificateSignature(signature.certificates(), signature.algorithm(), new BytesArray(bytes));
assertFalse(manager.verifier().verify(manipulatedSignature, "a_header"));
}
public void testLoadKeystoreMissingFile() {
var builder = Settings.builder()
.put("cluster.remote.my_remote.signing.keystore.path", "not_a_valid_path")
.put("path.home", createTempDir())
.put(Node.NODE_NAME_SETTING.getKey(), randomAlphaOfLengthBetween(3, 8));
var exception = assertThrows(
IllegalStateException.class,
() -> new CrossClusterApiKeySignatureManager(TestEnvironment.newEnvironment(builder.build()))
);
assertThat(exception.getMessage(), equalTo("Failed to load signing config for cluster [my_remote]"));
}
public void testLoadTruststoreMissingFile() {
var builder = Settings.builder()
.put("cluster.remote.signing.truststore.path", "not_a_valid_path")
.put("path.home", createTempDir())
.put(Node.NODE_NAME_SETTING.getKey(), randomAlphaOfLengthBetween(3, 8));
var exception = assertThrows(
IllegalStateException.class,
() -> new CrossClusterApiKeySignatureManager(TestEnvironment.newEnvironment(builder.build()))
);
assertThat(exception.getMessage(), equalTo("Failed to load trust config"));
}
public void testLoadSeveralAliasesWithoutAliasSettingKeystore() {
var builder = Settings.builder()
.put("path.home", createTempDir())
.put(Node.NODE_NAME_SETTING.getKey(), randomAlphaOfLengthBetween(3, 8));
addStorePathToBuilder("my_remote", "signing", "secretpassword", "secretpassword", builder);
var exception = assertThrows(
IllegalStateException.class,
() -> new CrossClusterApiKeySignatureManager(TestEnvironment.newEnvironment(builder.build()))
);
assertThat(exception.getMessage(), equalTo("Failed to load signing config for cluster [my_remote]"));
}
public void testSignAndVerifyIntermediateCertInChain() throws Exception {
var ca = getDataPath("/org/elasticsearch/xpack/security/signature/root.crt");
var builder = settingsBuilder.put("cluster.remote.signing.certificate_authorities", ca);
addStorePathToBuilder("my_remote", "signing_with_intermediate", "password123password", "password123password", builder);
var environment = TestEnvironment.newEnvironment(builder.build());
var manager = new CrossClusterApiKeySignatureManager(environment);
var signer = manager.signerForClusterAlias("my_remote");
var verifier = manager.verifier();
var signature = signer.sign("test");
assertThat(signature.certificates(), arrayWithSize(2));
assertTrue(verifier.verify(signature, "test"));
}
public void testSignAndVerifyFailsIntermediateCertMissing() {
var ca = getDataPath("/org/elasticsearch/xpack/security/signature/root.crt");
var builder = settingsBuilder.put("cluster.remote.signing.certificate_authorities", ca);
addStorePathToBuilder("my_remote", "signing_no_intermediate", "password123password", "password123password", builder);
var environment = TestEnvironment.newEnvironment(builder.build());
var manager = new CrossClusterApiKeySignatureManager(environment);
var signer = manager.signerForClusterAlias("my_remote");
var verifier = manager.verifier();
var signature = signer.sign("test");
assertThat(signature.certificates(), arrayWithSize(1));
var exception = assertThrows(GeneralSecurityException.class, () -> verifier.verify(signature, "test"));
assertThat(
exception.getMessage(),
containsString(
inFipsJvm() ? "Unable to construct a valid chain" : "unable to find valid certification path to requested target"
)
);
}
public void testSignAndVerifyExpiredCertFails() {
var builder = Settings.builder()
.put("path.home", createTempDir())
.put(Node.NODE_NAME_SETTING.getKey(), randomAlphaOfLengthBetween(3, 8));
builder.put(
"cluster.remote.signing.certificate_authorities",
getDataPath("/org/elasticsearch/xpack/security/signature/expired_cert.crt")
)
.put(
"cluster.remote.my_remote.signing.certificate",
getDataPath("/org/elasticsearch/xpack/security/signature/expired_cert.crt")
)
.put("cluster.remote.my_remote.signing.key", getDataPath("/org/elasticsearch/xpack/security/signature/expired_key.key"));
var manager = new CrossClusterApiKeySignatureManager(TestEnvironment.newEnvironment(builder.build()));
var signature = manager.signerForClusterAlias("my_remote").sign("a_header");
var verifier = manager.verifier();
var exception = assertThrows(CertificateException.class, () -> verifier.verify(signature, "test"));
assertThat(exception.getMessage(), containsString(inFipsJvm() ? "certificate expired on" : "NotAfter"));
}
private void addStorePathToBuilder(String storeName, String password, String passwordFips, Settings.Builder builder) {
String storeType = inFipsJvm() ? "BCFKS" : "PKCS12";
String extension = inFipsJvm() ? ".bcfks" : ".jks";
String keystorePassword = inFipsJvm() ? passwordFips : password;
if (builder.getSecureSettings() == null) {
builder.setSecureSettings(new MockSecureSettings());
}
MockSecureSettings secureSettings = (MockSecureSettings) builder.getSecureSettings();
secureSettings.setString("cluster.remote.signing.truststore.secure_password", keystorePassword);
builder.put("cluster.remote.signing.truststore.type", storeType)
.put(
"cluster.remote.signing.truststore.path",
getDataPath("/org/elasticsearch/xpack/security/signature/" + storeName + extension)
);
}
private void addStorePathToBuilder(
String remoteCluster,
String storeName,
String password,
String passwordFips,
Settings.Builder builder
) {
String storeType = inFipsJvm() ? "BCFKS" : "PKCS12";
String extension = inFipsJvm() ? ".bcfks" : ".jks";
String keystorePassword = inFipsJvm() ? passwordFips : password;
if (builder.getSecureSettings() == null) {
builder.setSecureSettings(new MockSecureSettings());
}
MockSecureSettings secureSettings = (MockSecureSettings) builder.getSecureSettings();
secureSettings.setString("cluster.remote." + remoteCluster + ".signing.keystore.secure_password", keystorePassword);
builder.put("cluster.remote." + remoteCluster + ".signing.keystore.type", storeType)
.put(
"cluster.remote." + remoteCluster + ".signing.keystore.path",
getDataPath("/org/elasticsearch/xpack/security/signature/" + storeName + extension)
);
}
private void addCertSigningTestCerts(String remoteCluster, Settings.Builder builder) {
addCertSigningTestCerts(remoteCluster, builder, false);
}
private void addCertSigningTestCerts(String remoteCluster, Settings.Builder builder, boolean forceRSA) {
String caPath;
String certPath;
String keyPath;
if (inFipsJvm() == false && forceRSA == false) {
if (builder.getSecureSettings() == null) {
builder.setSecureSettings(new MockSecureSettings());
}
MockSecureSettings secureSettings = (MockSecureSettings) builder.getSecureSettings();
secureSettings.setString("cluster.remote." + remoteCluster + ".signing.secure_key_passphrase", "marshall");
caPath = "/org/elasticsearch/xpack/security/signature/signing_ec.crt";
certPath = "/org/elasticsearch/xpack/security/signature/signing_ec.crt";
keyPath = "/org/elasticsearch/xpack/security/signature/signing_ec.key";
} else {
caPath = "/org/elasticsearch/xpack/security/signature/root.crt";
certPath = "/org/elasticsearch/xpack/security/signature/signing_rsa.crt";
keyPath = "/org/elasticsearch/xpack/security/signature/signing_rsa.key";
}
builder.put("cluster.remote.signing.certificate_authorities", getDataPath(caPath))
.put("cluster.remote." + remoteCluster + ".signing.certificate", getDataPath(certPath))
.put("cluster.remote." + remoteCluster + ".signing.key", getDataPath(keyPath));
}
@After
public void tearDownThreadPool() {
terminate(threadPool);
}
}
|
CrossClusterApiKeySignatureManagerTests
|
java
|
apache__rocketmq
|
proxy/src/main/java/org/apache/rocketmq/proxy/grpc/pipeline/AuthorizationPipeline.java
|
{
"start": 1675,
"end": 2999
}
|
class ____ implements RequestPipeline {
private static final Logger LOGGER = LoggerFactory.getLogger(LoggerName.PROXY_LOGGER_NAME);
private final AuthConfig authConfig;
private final AuthorizationEvaluator authorizationEvaluator;
public AuthorizationPipeline(AuthConfig authConfig, MessagingProcessor messagingProcessor) {
this.authConfig = authConfig;
this.authorizationEvaluator = AuthorizationFactory.getEvaluator(authConfig, messagingProcessor::getMetadataService);
}
@Override
public void execute(ProxyContext context, Metadata headers, GeneratedMessageV3 request) {
if (!authConfig.isAuthorizationEnabled()) {
return;
}
try {
List<AuthorizationContext> contexts = newContexts(context, headers, request);
authorizationEvaluator.evaluate(contexts);
} catch (AuthorizationException | AuthenticationException ex) {
throw ex;
} catch (Throwable ex) {
LOGGER.error("authorize failed, request:{}", request, ex);
throw ex;
}
}
protected List<AuthorizationContext> newContexts(ProxyContext context, Metadata headers, GeneratedMessageV3 request) {
return AuthorizationFactory.newContexts(authConfig, headers, request);
}
}
|
AuthorizationPipeline
|
java
|
spring-cloud__spring-cloud-gateway
|
spring-cloud-gateway-server-webflux/src/main/java/org/springframework/cloud/gateway/filter/factory/cache/CachedResponse.java
|
{
"start": 3437,
"end": 4544
}
|
class ____ {
private final HttpStatusCode statusCode;
private final HttpHeaders headers = new HttpHeaders();
private final List<ByteBuffer> body = new ArrayList<>();
private @Nullable Instant timestamp;
public Builder(HttpStatusCode statusCode) {
this.statusCode = statusCode;
}
public Builder header(String name, String value) {
this.headers.add(name, value);
return this;
}
public Builder headers(HttpHeaders headers) {
this.headers.addAll(headers);
return this;
}
public Builder timestamp(Instant timestamp) {
this.timestamp = timestamp;
return this;
}
public Builder timestamp(Date timestamp) {
this.timestamp = timestamp.toInstant();
return this;
}
public Builder body(String data) {
return appendToBody(ByteBuffer.wrap(data.getBytes(StandardCharsets.UTF_8)));
}
public Builder appendToBody(ByteBuffer byteBuffer) {
this.body.add(byteBuffer);
return this;
}
public CachedResponse build() {
return new CachedResponse(statusCode, headers, body, timestamp == null ? new Date() : Date.from(timestamp));
}
}
}
|
Builder
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/context/annotation/DeferredImportSelector.java
|
{
"start": 1955,
"end": 2069
}
|
interface ____ {
/**
* Process the {@link AnnotationMetadata} of the importing @{@link Configuration}
*
|
Group
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningField.java
|
{
"start": 794,
"end": 3241
}
|
class ____ {
public static final Setting<Boolean> AUTODETECT_PROCESS = Setting.boolSetting(
"xpack.ml.autodetect_process",
true,
Setting.Property.NodeScope
);
public static final Setting<ByteSizeValue> MAX_MODEL_MEMORY_LIMIT = Setting.memorySizeSetting(
"xpack.ml.max_model_memory_limit",
ByteSizeValue.ZERO,
Setting.Property.Dynamic,
Setting.Property.NodeScope
);
public static final Setting<Integer> MAX_LAZY_ML_NODES = Setting.intSetting(
"xpack.ml.max_lazy_ml_nodes",
0,
0,
Setting.Property.OperatorDynamic,
Setting.Property.NodeScope
);
/**
* This boolean value indicates if `max_machine_memory_percent` should be ignored and an automatic calculation is used instead.
*
* This calculation takes into account total node size and the size of the JVM on that node.
*
* If the calculation fails, we fall back to `max_machine_memory_percent`.
*/
public static final Setting<Boolean> USE_AUTO_MACHINE_MEMORY_PERCENT = Setting.boolSetting(
"xpack.ml.use_auto_machine_memory_percent",
false,
Setting.Property.OperatorDynamic,
Setting.Property.NodeScope
);
public static final TimeValue STATE_PERSIST_RESTORE_TIMEOUT = TimeValue.timeValueMinutes(30);
public static final String ML_FEATURE_FAMILY = "machine-learning";
public static final LicensedFeature.Momentary ML_API_FEATURE = LicensedFeature.momentary(
ML_FEATURE_FAMILY,
"api",
License.OperationMode.PLATINUM
);
// This is the last version when we changed the ML job snapshot format.
public static final MlConfigVersion MIN_SUPPORTED_SNAPSHOT_VERSION = MlConfigVersion.V_8_3_0;
private MachineLearningField() {}
public static String valuesToId(String... values) {
String combined = Arrays.stream(values).filter(Objects::nonNull).collect(Collectors.joining());
byte[] bytes = combined.getBytes(StandardCharsets.UTF_8);
MurmurHash3.Hash128 hash = MurmurHash3.hash128(bytes, 0, bytes.length, 0, new MurmurHash3.Hash128());
byte[] hashedBytes = new byte[16];
System.arraycopy(Numbers.longToBytes(hash.h1), 0, hashedBytes, 0, 8);
System.arraycopy(Numbers.longToBytes(hash.h2), 0, hashedBytes, 8, 8);
return new BigInteger(hashedBytes) + "_" + combined.length();
}
}
|
MachineLearningField
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/CriteriaBulkUpdateQuotedInheritanceTest.java
|
{
"start": 3609,
"end": 3857
}
|
class ____ {
@Id
private Long id;
@Column
private String name;
public UserEntity() {
}
public UserEntity(Long id, String name) {
this.id = id;
this.name = name;
}
}
@Entity( name = "PatientEntity" )
public static
|
UserEntity
|
java
|
quarkusio__quarkus
|
extensions/opentelemetry/deployment/src/test/java/io/quarkus/opentelemetry/deployment/OpenTelemetrySuppressNonAppUriManagementInterfaceTest.java
|
{
"start": 680,
"end": 2869
}
|
class ____ {
@RegisterExtension
final static QuarkusDevModeTest TEST = new QuarkusDevModeTest()
.withApplicationRoot((jar) -> jar
.addClasses(HelloResource.class, TestSpanExporter.class, TestSpanExporterProvider.class)
.addAsResource(new StringAsset(TestSpanExporterProvider.class.getCanonicalName()),
"META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider")
.add(new StringAsset(
"""
quarkus.otel.traces.exporter=test-span-exporter
quarkus.otel.metrics.exporter=none
quarkus.otel.bsp.export.timeout=1s
quarkus.otel.bsp.schedule.delay=50
quarkus.management.enabled=true
quarkus.management.port=9001
"""),
"application.properties"));
@Test
void test() {
// Must not be traced
RestAssured.given()
.get("http://localhost:9001/q/health/")
.then()
.statusCode(200);
RestAssured.given()
.get("/q/dev-ui/")
.then()
.statusCode(200);
RestAssured.given()
.get("/q/dev-ui/icon/font-awesome.js")
.then()
.statusCode(200);
// Valid trace
RestAssured.given()
.get("/hello")
.then()
.statusCode(200);
// Get span names
List<String> spans = Arrays.asList(
RestAssured.given()
.get("/hello/spans")
.then()
.statusCode(200)
.extract().body()
.asString()
.split(";"));
assertThat(spans).containsExactly("GET /hello");
}
@Path("/hello")
public static
|
OpenTelemetrySuppressNonAppUriManagementInterfaceTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/license/ExpirationCallback.java
|
{
"start": 862,
"end": 1559
}
|
class ____ extends ExpirationCallback {
/**
* Callback schedule prior to license expiry
*
* @param min latest relative time to execute before license expiry
* @param max earliest relative time to execute before license expiry
* @param frequency interval between execution
*/
Pre(TimeValue min, TimeValue max, TimeValue frequency) {
super(Orientation.PRE, min, max, frequency);
}
}
/**
* Callback that is triggered every <code>frequency</code> when
* current time is between <code>min</code> and <code>max</code>
* after license expiry.
*/
public abstract static
|
Pre
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/component/validator/ValidatorIncludeRelativeRouteTest.java
|
{
"start": 1037,
"end": 2526
}
|
class ____ extends ValidatorIncludeRouteTest {
@Override
@Test
public void testValidMessage() throws Exception {
validEndpoint.expectedMessageCount(1);
finallyEndpoint.expectedMessageCount(1);
String body
= "<p:person user=\"james\" xmlns:p=\"org.person\" xmlns:h=\"org.health.check.person\" xmlns:c=\"org.health.check.common\">\n"
+ " <p:firstName>James</p:firstName>\n" + " <p:lastName>Strachan</p:lastName>\n"
+ " <p:city>London</p:city>\n" + " <h:health>\n"
+ " <h:lastCheck>2011-12-23</h:lastCheck>\n" + " <h:status>OK</h:status>\n"
+ " <c:commonElement>" + " <c:element1/>"
+ " <c:element2/>" + " </c:commonElement>" + " </h:health>\n" + "</p:person>";
template.sendBody("direct:start", body);
MockEndpoint.assertIsSatisfied(validEndpoint, invalidEndpoint, finallyEndpoint);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").doTry().to("validator:org/apache/camel/component/validator/xsds/person.xsd")
.to("mock:valid").doCatch(ValidationException.class)
.to("mock:invalid").doFinally().to("mock:finally").end();
}
};
}
}
|
ValidatorIncludeRelativeRouteTest
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/CaffeineLoadCacheEndpointBuilderFactory.java
|
{
"start": 1469,
"end": 1615
}
|
interface ____ {
/**
* Builder for endpoint for the Caffeine LoadCache component.
*/
public
|
CaffeineLoadCacheEndpointBuilderFactory
|
java
|
apache__flink
|
flink-tests/src/test/java/org/apache/flink/connector/upserttest/sink/ImmutableByteArrayWrapperTest.java
|
{
"start": 1178,
"end": 1973
}
|
class ____ {
@Test
void testConstructorCopy() {
byte[] array = "immutability of constructor".getBytes();
byte[] clonedArray = new ImmutableByteArrayWrapper(array).bytes;
assertCopyIsReferenceFree(array, clonedArray);
}
@Test
void testGetterCopy() {
byte[] array = "immutability of getter".getBytes();
byte[] clonedArray = new ImmutableByteArrayWrapper(array).array();
assertCopyIsReferenceFree(array, clonedArray);
}
private static void assertCopyIsReferenceFree(byte[] original, byte[] clone) {
assertThat(clone).isNotSameAs(original);
assertThat(clone).isEqualTo(original);
Arrays.fill(original, (byte) 0);
assertThat(clone).isNotEqualTo(original);
}
}
|
ImmutableByteArrayWrapperTest
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/MissingBindingValidationTest.java
|
{
"start": 82608,
"end": 82933
}
|
interface ____ {",
" Child child();",
"}");
Source child =
CompilerTests.javaSource(
"test.Child",
"package test;",
"",
"import dagger.Subcomponent;",
"",
"@Subcomponent(modules = ChildModule.class)",
"
|
Parent
|
java
|
playframework__playframework
|
core/play-integration-test/src/test/java/play/routing/AbstractRoutingDslTest.java
|
{
"start": 1090,
"end": 24983
}
|
class ____ {
abstract Application application();
abstract RoutingDsl routingDsl();
private Router router(Function<RoutingDsl, Router> function) {
return function.apply(routingDsl());
}
@Test
public void shouldProvideJavaRequestToActionWithoutParameters() {
Router router =
router(
routingDsl ->
routingDsl
.GET("/with-request")
.routingTo(
request ->
request.header("X-Test").map(Results::ok).orElse(Results.notFound()))
.build());
String result =
makeRequest(router, "GET", "/with-request", rb -> rb.header("X-Test", "Header value"));
assertThat(result).isEqualTo("Header value");
}
@Test
public void shouldProvideJavaRequestToActionWithSingleParameter() {
Router router =
router(
routingDsl ->
routingDsl
.GET("/with-request/:p1")
.routingTo(
(request, number) ->
request
.header("X-Test")
.map(header -> Results.ok(header + " - " + number))
.orElse(Results.notFound()))
.build());
String result =
makeRequest(router, "GET", "/with-request/10", rb -> rb.header("X-Test", "Header value"));
assertThat(result).isEqualTo("Header value - 10");
}
@Test
public void shouldProvideJavaRequestToActionWith2Parameters() {
Router router =
router(
routingDsl ->
routingDsl
.GET("/with-request/:p1/:p2")
.routingTo(
(request, n1, n2) ->
request
.header("X-Test")
.map(header -> Results.ok(header + " - " + n1 + " - " + n2))
.orElse(Results.notFound()))
.build());
String result =
makeRequest(
router, "GET", "/with-request/10/20", rb -> rb.header("X-Test", "Header value"));
assertThat(result).isEqualTo("Header value - 10 - 20");
}
@Test
public void shouldProvideJavaRequestToActionWith3Parameters() {
Router router =
router(
routingDsl ->
routingDsl
.GET("/with-request/:p1/:p2/:p3")
.routingTo(
(request, n1, n2, n3) ->
request
.header("X-Test")
.map(
header ->
Results.ok(header + " - " + n1 + " - " + n2 + " - " + n3))
.orElse(Results.notFound()))
.build());
String result =
makeRequest(
router, "GET", "/with-request/10/20/30", rb -> rb.header("X-Test", "Header value"));
assertThat(result).isEqualTo("Header value - 10 - 20 - 30");
}
@Test
public void shouldProvideJavaRequestToAsyncActionWithoutParameters() {
Router router =
router(
routingDsl ->
routingDsl
.GET("/with-request")
.routingAsync(
request ->
CompletableFuture.completedFuture(
request
.header("X-Test")
.map(Results::ok)
.orElse(Results.notFound())))
.build());
String result =
makeRequest(router, "GET", "/with-request", rb -> rb.header("X-Test", "Header value"));
assertThat(result).isEqualTo("Header value");
}
@Test
public void shouldProvideJavaRequestToAsyncActionWithSingleParameter() {
Router router =
router(
routingDsl ->
routingDsl
.GET("/with-request/:p1")
.routingAsync(
(request, number) ->
CompletableFuture.completedFuture(
request
.header("X-Test")
.map(header -> Results.ok(header + " - " + number))
.orElse(Results.notFound())))
.build());
String result =
makeRequest(router, "GET", "/with-request/10", rb -> rb.header("X-Test", "Header value"));
assertThat(result).isEqualTo("Header value - 10");
}
@Test
public void shouldProvideJavaRequestToAsyncActionWith2Parameters() {
Router router =
router(
routingDsl ->
routingDsl
.GET("/with-request/:p1/:p2")
.routingAsync(
(request, n1, n2) ->
CompletableFuture.completedFuture(
request
.header("X-Test")
.map(header -> Results.ok(header + " - " + n1 + " - " + n2))
.orElse(Results.notFound())))
.build());
String result =
makeRequest(
router, "GET", "/with-request/10/20", rb -> rb.header("X-Test", "Header value"));
assertThat(result).isEqualTo("Header value - 10 - 20");
}
@Test
public void shouldProvideJavaRequestToAsyncActionWith3Parameters() {
Router router =
router(
routingDsl ->
routingDsl
.GET("/with-request/:p1/:p2/:p3")
.routingAsync(
(request, n1, n2, n3) ->
CompletableFuture.completedFuture(
request
.header("X-Test")
.map(
header ->
Results.ok(
header + " - " + n1 + " - " + n2 + " - " + n3))
.orElse(Results.notFound())))
.build());
String result =
makeRequest(
router, "GET", "/with-request/10/20/30", rb -> rb.header("X-Test", "Header value"));
assertThat(result).isEqualTo("Header value - 10 - 20 - 30");
}
@Test
public void shouldPreserveRequestBodyAsText() {
Router router =
router(
routingDsl ->
routingDsl
.POST("/with-body")
.routingTo(request -> Results.ok(request.body().asText()))
.build());
String result = makeRequest(router, "POST", "/with-body", rb -> rb.bodyText("The Body"));
assertThat(result).isEqualTo("The Body");
}
@Test
public void shouldPreserveRequestBodyAsJson() {
Router router =
router(
routingDsl ->
routingDsl
.POST("/with-body")
.routingTo(request -> Results.ok(request.body().asJson()))
.build());
String result =
makeRequest(
router,
"POST",
"/with-body",
requestBuilder -> requestBuilder.bodyJson(Json.parse("{ \"a\": \"b\" }")));
assertThat(result).isEqualTo("{\"a\":\"b\"}");
}
@Test
public void shouldPreserveRequestBodyAsXml() {
Router router =
router(
routingDsl ->
routingDsl
.POST("/with-body")
.routingTo(request -> ok(XML.toBytes(request.body().asXml()).utf8String()))
.build());
String result =
makeRequest(
router,
"POST",
"/with-body",
requestBuilder ->
requestBuilder.bodyXml(
XML.fromString("<?xml version=\"1.0\" encoding=\"UTF-8\"?><a>b</a>")));
assertThat(result).isEqualTo("<?xml version=\"1.0\" encoding=\"UTF-8\"?><a>b</a>");
}
@Test
public void shouldPreserveRequestBodyAsRawBuffer() {
Router router =
router(
routingDsl ->
routingDsl
.POST("/with-body")
.routingTo(request -> ok(request.body().asRaw().asBytes().utf8String()))
.build());
String result =
makeRequest(
router,
"POST",
"/with-body",
requestBuilder -> requestBuilder.bodyRaw(ByteString.fromString("The Raw Body")));
assertThat(result).isEqualTo("The Raw Body");
}
@Test
public void shouldAcceptMultipartFormData() throws IOException {
Router router =
router(
routingDsl ->
routingDsl
.POST("/with-body")
.routingTo(
request -> {
Http.MultipartFormData<Object> data =
request.body().asMultipartFormData();
Files.TemporaryFile ref =
(Files.TemporaryFile) data.getFile("document").getRef();
try {
String contents = java.nio.file.Files.readString(ref.path());
return ok(
"author: "
+ data.asFormUrlEncoded().get("author")[0]
+ "\n"
+ "filename: "
+ data.getFile("document").getFilename()
+ "\n"
+ "contentType: "
+ data.getFile("document").getContentType()
+ "\n"
+ "contents: "
+ contents
+ "\n");
} catch (IOException e) {
return internalServerError(e.getMessage());
}
})
.build());
Files.TemporaryFile tempFile = Files.singletonTemporaryFileCreator().create("temp", "txt");
java.nio.file.Files.write(tempFile.path(), "Twas brillig and the slithy Toves...".getBytes());
String result =
makeRequest(
router,
"POST",
"/with-body",
requestBuilder ->
requestBuilder.bodyMultipart(
Map.of("author", new String[] {"Lewis Carrol"}),
List.of(
new Http.MultipartFormData.FilePart<>(
"document", "jabberwocky.txt", "text/plain", tempFile))));
assertThat(result)
.isEqualTo(
"author: Lewis Carrol\n"
+ "filename: jabberwocky.txt\n"
+ "contentType: text/plain\n"
+ "contents: Twas brillig and the slithy Toves...\n");
}
@Test
public void shouldPreserveRequestBodyAsTextWhenUsingHttpRequest() {
Router router =
router(
routingDsl ->
routingDsl.POST("/with-body").routingTo(req -> ok(req.body().asText())).build());
String result = makeRequest(router, "POST", "/with-body", rb -> rb.bodyText("The Body"));
assertThat(result).isEqualTo("The Body");
}
@Test
public void shouldPreserveRequestBodyAsJsonWhenUsingHttpRequest() {
Router router =
router(
routingDsl ->
routingDsl.POST("/with-body").routingTo(req -> ok(req.body().asJson())).build());
String result =
makeRequest(
router,
"POST",
"/with-body",
requestBuilder -> requestBuilder.bodyJson(Json.parse("{ \"a\": \"b\" }")));
assertThat(result).isEqualTo("{\"a\":\"b\"}");
}
@Test
public void shouldPreserveRequestBodyAsXmlWhenUsingHttpRequest() {
Router router =
router(
routingDsl ->
routingDsl
.POST("/with-body")
.routingTo(req -> ok(XML.toBytes(req.body().asXml()).utf8String()))
.build());
String result =
makeRequest(
router,
"POST",
"/with-body",
requestBuilder ->
requestBuilder.bodyXml(
XML.fromString("<?xml version=\"1.0\" encoding=\"UTF-8\"?><a>b</a>")));
assertThat(result).isEqualTo("<?xml version=\"1.0\" encoding=\"UTF-8\"?><a>b</a>");
}
@Test
public void shouldPreserveRequestBodyAsRawBufferWhenUsingHttpRequest() {
Router router =
router(
routingDsl ->
routingDsl
.POST("/with-body")
.routingTo(req -> ok(req.body().asRaw().asBytes().utf8String()))
.build());
String result =
makeRequest(
router,
"POST",
"/with-body",
requestBuilder -> requestBuilder.bodyRaw(ByteString.fromString("The Raw Body")));
assertThat(result).isEqualTo("The Raw Body");
}
@Test
public void noParameters() {
Router router =
router(
routingDsl ->
routingDsl.GET("/hello/world").routingTo(req -> ok("Hello world")).build());
assertThat(makeRequest(router, "GET", "/hello/world")).isEqualTo("Hello world");
assertNull(makeRequest(router, "GET", "/foo/bar"));
}
@Test
public void oneParameter() {
Router router =
router(
routingDsl ->
routingDsl.GET("/hello/:to").routingTo((req, to) -> ok("Hello " + to)).build());
assertThat(makeRequest(router, "GET", "/hello/world")).isEqualTo("Hello world");
assertNull(makeRequest(router, "GET", "/foo/bar"));
}
@Test
public void twoParameters() {
Router router =
router(
routingDsl ->
routingDsl
.GET("/:say/:to")
.routingTo((req, say, to) -> ok(say + " " + to))
.build());
assertThat(makeRequest(router, "GET", "/Hello/world")).isEqualTo("Hello world");
assertNull(makeRequest(router, "GET", "/foo"));
}
@Test
public void threeParameters() {
Router router =
router(
routingDsl ->
routingDsl
.GET("/:say/:to/:extra")
.routingTo((req, say, to, extra) -> ok(say + " " + to + extra))
.build());
assertThat(makeRequest(router, "GET", "/Hello/world/!")).isEqualTo("Hello world!");
assertNull(makeRequest(router, "GET", "/foo/bar"));
}
@Test
public void noParametersAsync() {
Router router =
router(
routingDsl ->
routingDsl
.GET("/hello/world")
.routingAsync(req -> completedFuture(ok("Hello world")))
.build());
assertThat(makeRequest(router, "GET", "/hello/world")).isEqualTo("Hello world");
assertNull(makeRequest(router, "GET", "/foo/bar"));
}
@Test
public void oneParameterAsync() {
Router router =
router(
routingDsl ->
routingDsl
.GET("/hello/:to")
.routingAsync((req, to) -> completedFuture(ok("Hello " + to)))
.build());
assertThat(makeRequest(router, "GET", "/hello/world")).isEqualTo("Hello world");
assertNull(makeRequest(router, "GET", "/foo/bar"));
}
@Test
public void twoParametersAsync() {
Router router =
router(
routingDsl ->
routingDsl
.GET("/:say/:to")
.routingAsync((req, say, to) -> completedFuture(ok(say + " " + to)))
.build());
assertThat(makeRequest(router, "GET", "/Hello/world")).isEqualTo("Hello world");
assertNull(makeRequest(router, "GET", "/foo"));
}
@Test
public void threeParametersAsync() {
Router router =
router(
routingDsl ->
routingDsl
.GET("/:say/:to/:extra")
.routingAsync(
(req, say, to, extra) -> completedFuture(ok(say + " " + to + extra)))
.build());
assertThat(makeRequest(router, "GET", "/Hello/world/!")).isEqualTo("Hello world!");
assertNull(makeRequest(router, "GET", "/foo/bar"));
}
@Test
public void get() {
Router router =
router(
routingDsl ->
routingDsl.GET("/hello/world").routingTo(req -> ok("Hello world")).build());
assertThat(makeRequest(router, "GET", "/hello/world")).isEqualTo("Hello world");
assertNull(makeRequest(router, "POST", "/hello/world"));
}
@Test
public void head() {
Router router =
router(
routingDsl ->
routingDsl.HEAD("/hello/world").routingTo(req -> ok("Hello world")).build());
assertThat(makeRequest(router, "HEAD", "/hello/world")).isEqualTo("Hello world");
assertNull(makeRequest(router, "POST", "/hello/world"));
}
@Test
public void post() {
Router router =
router(
routingDsl ->
routingDsl.POST("/hello/world").routingTo(req -> ok("Hello world")).build());
assertThat(makeRequest(router, "POST", "/hello/world")).isEqualTo("Hello world");
assertNull(makeRequest(router, "GET", "/hello/world"));
}
@Test
public void put() {
Router router =
router(
routingDsl ->
routingDsl.PUT("/hello/world").routingTo(req -> ok("Hello world")).build());
assertThat(makeRequest(router, "PUT", "/hello/world")).isEqualTo("Hello world");
assertNull(makeRequest(router, "POST", "/hello/world"));
}
@Test
public void delete() {
Router router =
router(
routingDsl ->
routingDsl.DELETE("/hello/world").routingTo(req -> ok("Hello world")).build());
assertThat(makeRequest(router, "DELETE", "/hello/world")).isEqualTo("Hello world");
assertNull(makeRequest(router, "POST", "/hello/world"));
}
@Test
public void patch() {
Router router =
router(
routingDsl ->
routingDsl.PATCH("/hello/world").routingTo(req -> ok("Hello world")).build());
assertThat(makeRequest(router, "PATCH", "/hello/world")).isEqualTo("Hello world");
assertNull(makeRequest(router, "POST", "/hello/world"));
}
@Test
public void options() {
Router router =
router(
routingDsl ->
routingDsl.OPTIONS("/hello/world").routingTo(req -> ok("Hello world")).build());
assertThat(makeRequest(router, "OPTIONS", "/hello/world")).isEqualTo("Hello world");
assertNull(makeRequest(router, "POST", "/hello/world"));
}
@Test
public void withSessionAndHeader() {
Router router =
router(
routingDsl ->
routingDsl
.GET("/hello/world")
.routingTo(
req ->
ok("Hello world")
.addingToSession(req, "foo", "bar")
.withHeader("Foo", "Bar"))
.build());
Result result = routeAndCall(application(), router, fakeRequest("GET", "/hello/world"));
assertThat(result.session().get("foo")).isEqualTo(Optional.of("bar"));
assertThat(result.headers().get("Foo")).isEqualTo("Bar");
}
@Test
public void starMatcher() {
Router router =
router(
routingDsl ->
routingDsl.GET("/hello/*to").routingTo((req, to) -> ok("Hello " + to)).build());
assertThat(makeRequest(router, "GET", "/hello/blah/world")).isEqualTo("Hello blah/world");
assertNull(makeRequest(router, "GET", "/foo/bar"));
}
@Test
public void regexMatcher() {
Router router =
router(
routingDsl ->
routingDsl
.GET("/hello/$to<[a-z]+>")
.routingTo((req, to) -> ok("Hello " + to))
.build());
assertThat(makeRequest(router, "GET", "/hello/world")).isEqualTo("Hello world");
assertNull(makeRequest(router, "GET", "/hello/10"));
}
@Test
public void multipleRoutes() {
Router router =
router(
routingDsl ->
routingDsl
.GET("/hello/:to")
.routingTo((req, to) -> ok("Hello " + to))
.GET("/foo/bar")
.routingTo(req -> ok("foo bar"))
.POST("/hello/:to")
.routingTo((req, to) -> ok("Post " + to))
.GET("/*path")
.routingTo((req, path) -> ok("Path " + path))
.build());
assertThat(makeRequest(router, "GET", "/hello/world")).isEqualTo("Hello world");
assertThat(makeRequest(router, "GET", "/foo/bar")).isEqualTo("foo bar");
assertThat(makeRequest(router, "POST", "/hello/world")).isEqualTo("Post world");
assertThat(makeRequest(router, "GET", "/something/else")).isEqualTo("Path something/else");
}
@Test
public void encoding() {
Router router =
router(
routingDsl ->
routingDsl
.GET("/simple/:to")
.routingTo((req, to) -> ok("Simple " + to))
.GET("/path/*to")
.routingTo((req, to) -> ok("Path " + to))
.GET("/regex/$to<.*>")
.routingTo((req, to) -> ok("Regex " + to))
.build());
assertThat(makeRequest(router, "GET", "/simple/dollar%24")).isEqualTo("Simple dollar$");
assertThat(makeRequest(router, "GET", "/path/dollar%24")).isEqualTo("Path dollar%24");
assertThat(makeRequest(router, "GET", "/regex/dollar%24")).isEqualTo("Regex dollar%24");
}
@Test
public void typed() {
Router router =
router(
routingDsl ->
routingDsl
.GET("/:a/:b/:c")
.routingTo(
(Http.Request req, Integer a, Boolean b, String c) ->
ok("int " + a + " boolean " + b + " string " + c))
.build());
assertThat(makeRequest(router, "GET", "/20/true/foo"))
.isEqualTo("int 20 boolean true string foo");
}
@Test(expected = IllegalArgumentException.class)
public void wrongNumberOfParameters() {
routingDsl().GET("/:a/:b").routingTo((req, foo) -> ok(foo.toString()));
}
@Test(expected = IllegalArgumentException.class)
public void badParameterType() {
routingDsl().GET("/:a").routingTo((Http.Request req, InputStream is) -> ok());
}
@Test
public void bindError() {
Router router =
router(
routingDsl ->
routingDsl
.GET("/:a")
.routingTo((Http.Request req, Integer a) -> ok("int " + a))
.build());
assertThat(makeRequest(router, "GET", "/foo"))
.isEqualTo("Cannot parse parameter a as Int: For input string: \"foo\"");
}
@Test
public void customPathBindable() {
Router router =
router(
routingDsl ->
routingDsl
.GET("/:a")
.routingTo((Http.Request req, MyString myString) -> ok(myString.value))
.build());
assertThat(makeRequest(router, "GET", "/foo")).isEqualTo("a:foo");
}
public static
|
AbstractRoutingDslTest
|
java
|
apache__flink
|
flink-table/flink-table-common/src/main/java/org/apache/flink/table/types/logical/utils/LogicalTypeParser.java
|
{
"start": 12410,
"end": 13359
}
|
enum ____ {
CHAR,
VARCHAR,
STRING,
BOOLEAN,
BINARY,
VARBINARY,
BYTES,
DECIMAL,
NUMERIC,
DEC,
TINYINT,
SMALLINT,
INT,
INTEGER,
BIGINT,
FLOAT,
DOUBLE,
PRECISION,
DATE,
TIME,
WITH,
WITHOUT,
LOCAL,
ZONE,
TIMESTAMP,
TIMESTAMP_LTZ,
INTERVAL,
YEAR,
MONTH,
DAY,
HOUR,
MINUTE,
SECOND,
TO,
ARRAY,
MULTISET,
MAP,
ROW,
NULL,
RAW,
LEGACY,
NOT,
DESCRIPTOR,
STRUCTURED,
VARIANT
}
private static final Set<String> KEYWORDS =
Stream.of(Keyword.values())
.map(k -> k.toString().toUpperCase())
.collect(Collectors.toSet());
private static
|
Keyword
|
java
|
spring-projects__spring-security
|
web/src/main/java/org/springframework/security/web/header/writers/CrossOriginEmbedderPolicyHeaderWriter.java
|
{
"start": 1187,
"end": 2014
}
|
class ____ implements HeaderWriter {
private static final String EMBEDDER_POLICY = "Cross-Origin-Embedder-Policy";
private @Nullable CrossOriginEmbedderPolicy policy;
/**
* Sets the {@link CrossOriginEmbedderPolicy} value to be used in the
* {@code Cross-Origin-Embedder-Policy} header
* @param embedderPolicy the {@link CrossOriginEmbedderPolicy} to use
*/
public void setPolicy(CrossOriginEmbedderPolicy embedderPolicy) {
Assert.notNull(embedderPolicy, "embedderPolicy cannot be null");
this.policy = embedderPolicy;
}
@Override
public void writeHeaders(HttpServletRequest request, HttpServletResponse response) {
if (this.policy != null && !response.containsHeader(EMBEDDER_POLICY)) {
response.addHeader(EMBEDDER_POLICY, this.policy.getPolicy());
}
}
public
|
CrossOriginEmbedderPolicyHeaderWriter
|
java
|
apache__camel
|
components/camel-openapi-java/src/main/java/org/apache/camel/openapi/OpenApiRestProducerFactory.java
|
{
"start": 1560,
"end": 8075
}
|
class ____ implements RestProducerFactory {
private static final Logger LOG = LoggerFactory.getLogger(OpenApiRestProducerFactory.class);
@Override
public Producer createProducer(
CamelContext camelContext, String host,
String verb, String basePath, String uriTemplate, String queryParameters,
String consumes, String produces, RestConfiguration configuration, Map<String, Object> parameters)
throws Exception {
String apiDoc = (String) parameters.get("apiDoc");
// load json model
if (apiDoc == null) {
throw new IllegalArgumentException("OpenApi api-doc must be configured using the apiDoc option");
}
String path = uriTemplate != null ? uriTemplate : basePath;
// path must start with a leading slash
if (!path.startsWith("/")) {
path = "/" + path;
}
OpenAPI openApi = loadOpenApiModel(apiDoc);
Operation operation = getOpenApiOperation(openApi, verb, path);
if (operation == null) {
throw new IllegalArgumentException("OpenApi api-doc does not contain operation for " + verb + ":" + path);
}
// validate if we have the query parameters also
if (queryParameters != null) {
for (Parameter param : operation.getParameters()) {
if ("query".equals(param.getIn()) && Boolean.TRUE.equals(param.getRequired())) {
// check if we have the required query parameter defined
String key = param.getName();
String token = key + "=";
boolean hasQuery = queryParameters.contains(token);
if (!hasQuery) {
throw new IllegalArgumentException(
"OpenApi api-doc does not contain query parameter " + key + " for " + verb + ":" + path);
}
}
}
}
String componentName = (String) parameters.get("componentName");
return createHttpProducer(camelContext, openApi, operation, host, verb, path, queryParameters,
produces, consumes, componentName, parameters);
}
OpenAPI loadOpenApiModel(String apiDoc) throws Exception {
final OpenAPIV3Parser openApiParser = new OpenAPIV3Parser();
final SwaggerParseResult openApi = openApiParser.readLocation(apiDoc, null, null);
if (openApi != null && openApi.getOpenAPI() != null) {
return openApi.getOpenAPI();
}
// In theory there should be a message in the parse result but it has disappeared...
throw new IllegalArgumentException(
"The given OpenApi specification could not be loaded from `" + apiDoc + "`.");
}
private Operation getOpenApiOperation(OpenAPI openApi, String verb, String path) {
// path may include base path so skip that
String basePath = RestOpenApiSupport.getBasePathFromOasDocument(openApi);
if (basePath != null && path.startsWith(basePath)) {
path = path.substring(basePath.length());
}
PathItem modelPath = openApi.getPaths().get(path);
if (modelPath == null) {
return null;
}
// get,put,post,head,delete,patch,options
Operation op = null;
PathItem.HttpMethod method = PathItem.HttpMethod.valueOf(verb.toUpperCase());
if (method != null) {
return modelPath.readOperationsMap().get(method);
}
return op;
}
private Producer createHttpProducer(
CamelContext camelContext, OpenAPI openApi, Operation operation,
String host, String verb, String path, String queryParameters,
String consumes, String produces,
String componentName, Map<String, Object> parameters)
throws Exception {
LOG.debug("Using OpenApi operation: {} with {} {}", operation, verb, path);
RestProducerFactory factory = (RestProducerFactory) parameters.remove("restProducerFactory");
if (factory != null) {
LOG.debug("Using RestProducerFactory: {}", factory);
if (produces == null) {
StringJoiner producesBuilder = new StringJoiner(",");
if (operation.getResponses() != null) {
for (ApiResponse response : operation.getResponses().values()) {
if (response.getContent() != null) {
for (String mediaType : response.getContent().keySet()) {
producesBuilder.add(mediaType);
}
}
}
}
produces = producesBuilder.length() == 0 ? null : producesBuilder.toString();
}
if (consumes == null) {
StringJoiner consumesBuilder = new StringJoiner(",");
if (operation.getRequestBody() != null && operation.getRequestBody().getContent() != null) {
for (String mediaType : operation.getRequestBody().getContent().keySet()) {
consumesBuilder.add(mediaType);
}
}
consumes = consumesBuilder.length() == 0 ? null : consumesBuilder.toString();
}
String basePath;
String uriTemplate;
if (host == null) {
//if no explicit host has been configured then use host and base path from the openApi api-doc
host = RestOpenApiSupport.getHostFromOasDocument(openApi);
basePath = RestOpenApiSupport.getBasePathFromOasDocument(openApi);
uriTemplate = path;
} else {
// path includes also uri template
basePath = path;
uriTemplate = null;
}
RestConfiguration config = CamelContextHelper.getRestConfiguration(camelContext, null, componentName);
Producer answer = factory.createProducer(camelContext, host, verb, basePath, uriTemplate, queryParameters, consumes,
produces,
config, parameters);
CamelContextAware.trySetCamelContext(answer, camelContext);
return answer;
} else {
throw new IllegalStateException("Cannot find RestProducerFactory in Registry or as a Component to use");
}
}
}
|
OpenApiRestProducerFactory
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/nullness/NullArgumentForNonNullParameterTest.java
|
{
"start": 5362,
"end": 5934
}
|
class ____ {
void consume(String s) {}
void foo() {
// BUG: Diagnostic contains:
consume(null);
}
}
""")
.doTest();
}
@Test
public void positiveNullMarkedPackageInfo() {
aggressiveHelper
.addSourceLines(
"p/package-info.java",
"""
@org.jspecify.annotations.NullMarked
package p;
""")
.addSourceLines(
"p/Foo.java",
"""
package p;
|
Foo
|
java
|
google__guava
|
android/guava/src/com/google/common/graph/Graphs.java
|
{
"start": 6258,
"end": 9676
}
|
interface ____ not support parallel
// edges, so this traversal would require reusing the undirected AB edge.
return false;
}
/**
* Returns the transitive closure of {@code graph}. The transitive closure of a graph {@code G} is
* a graph {@code T} that is a supergraph of {@code G}, augmented by, for each pair of nodes A and
* B, an edge connecting node A to node B if there is a sequence of edges in {@code G} starting at
* A and ending at B.
*
* <p>{@code strategy} defines the circumstances under which self-loops will be added to the
* transitive closure graph.
*
* <p>This is a "snapshot" based on the current topology of {@code graph}, rather than a live view
* of the transitive closure of {@code graph}. In other words, the returned {@link Graph} will not
* be updated after modifications to {@code graph}.
*
* @since NEXT
*/
// TODO(b/31438252): Consider optimizing for undirected graphs.
public static <N> ImmutableGraph<N> transitiveClosure(
Graph<N> graph, TransitiveClosureSelfLoopStrategy strategy) {
ImmutableGraph.Builder<N> transitiveClosure =
GraphBuilder.from(graph).allowsSelfLoops(true).<N>immutable();
for (N node : graph.nodes()) {
// add each node explicitly to include isolated nodes
transitiveClosure.addNode(node);
for (N reachableNode : getReachableNodes(graph, node, strategy)) {
transitiveClosure.putEdge(node, reachableNode);
}
}
return transitiveClosure.build();
}
/**
* Equivalent to {@code transitiveClosure(graph, ADD_SELF_LOOPS_ALWAYS)}. Callers should look at
* the different strategy options that the new method supports rather than simply migrating to the
* new method with the existing behavior; we believe that most callers will want to use the {@code
* ADD_SELF_LOOPS_FOR_CYCLES} strategy.
*
* @since 33.1.0 (present with return type {@code Graph} since 20.0)
* @deprecated Use {@link #transitiveClosure(Graph, TransitiveClosureSelfLoopStrategy)} instead.
*/
@SuppressWarnings("InlineMeSuggester") // We expect most users to want to change behavior.
@Deprecated
public static <N> ImmutableGraph<N> transitiveClosure(Graph<N> graph) {
return transitiveClosure(graph, ADD_SELF_LOOPS_ALWAYS);
}
/**
* Returns the nodes reachable from {@code node} in {@code graph}, according to the given {@code
* strategy}.
*/
private static <N> Iterable<N> getReachableNodes(
Graph<N> graph, N node, TransitiveClosureSelfLoopStrategy strategy) {
Traverser<N> traverser = Traverser.forGraph(graph);
switch (strategy) {
case ADD_SELF_LOOPS_ALWAYS: // always include 'node'
return traverser.breadthFirst(node);
case ADD_SELF_LOOPS_FOR_CYCLES: // include 'node' iff there's an incident cycle
// note that if 'node' has a self-loop, it will appear in its successors
return traverser.breadthFirst(graph.successors(node));
}
throw new IllegalArgumentException("Unrecognized strategy: " + strategy);
}
/**
* A strategy for adding self-loops to {@linkplain #transitiveClosure(Graph,
* TransitiveClosureSelfLoopStrategy) the transitive closure graph}. All strategies preserve
* self-loops that are present in the original graph.
*
* <p>The strategies differ based on how they define "cycle incident to a node".
*
* @since NEXT
*/
public
|
does
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/FileBufferReaderITCase.java
|
{
"start": 6731,
"end": 7691
}
|
class ____ extends AbstractInvokable {
/**
* Create an Invokable task and set its environment.
*
* @param environment The environment assigned to this invokable.
*/
public TestSourceInvokable(Environment environment) {
super(environment);
}
@Override
public void invoke() throws Exception {
final RecordWriter<ByteArrayType> writer =
new RecordWriterBuilder<ByteArrayType>().build(getEnvironment().getWriter(0));
final ByteArrayType bytes = new ByteArrayType(dataSource);
int counter = 0;
while (counter++ < numRecords) {
writer.emit(bytes);
writer.flushAll();
}
}
}
/**
* Basic sink {@link AbstractInvokable} which verifies the sent elements from the {@link
* TestSourceInvokable}.
*/
public static final
|
TestSourceInvokable
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/jsontype/ext/TestSubtypesExternalPropertyMissingProperty.java
|
{
"start": 1035,
"end": 1102
}
|
class ____ requires the property to be present.
*/
static
|
that
|
java
|
apache__spark
|
common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/SslExternalShuffleSecuritySuite.java
|
{
"start": 982,
"end": 1539
}
|
class ____ extends ExternalShuffleSecuritySuite {
@Override
protected TransportConf createTransportConf(boolean encrypt) {
if (encrypt) {
return new TransportConf(
"shuffle",
SslSampleConfigs.createDefaultConfigProviderForRpcNamespaceWithAdditionalEntries(
Map.of("spark.authenticate.enableSaslEncryption", "true")
)
);
} else {
return new TransportConf(
"shuffle",
SslSampleConfigs.createDefaultConfigProviderForRpcNamespace()
);
}
}
}
|
SslExternalShuffleSecuritySuite
|
java
|
playframework__playframework
|
core/play/src/main/java/play/Logger.java
|
{
"start": 1271,
"end": 1826
}
|
class ____ {
/**
* @deprecated Deprecated as of 2.7.0. Create an instance of {@link ALogger} via {@link
* #of(String)} / {@link #of(Class)} and use the same-named method. Or use SLF4J directly.
*/
@Deprecated private static final ALogger logger = of("application");
/**
* Obtain a logger instance.
*
* @param name name of the logger
* @return a logger
*/
public static ALogger of(String name) {
return new ALogger(play.api.Logger.apply(name));
}
/**
* Obtain a logger instance.
*
* @param clazz a
|
Logger
|
java
|
elastic__elasticsearch
|
x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlTranslateRequestBuilder.java
|
{
"start": 940,
"end": 2419
}
|
class ____ extends ActionRequestBuilder<SqlTranslateRequest, SqlTranslateResponse> {
public SqlTranslateRequestBuilder(ElasticsearchClient client) {
this(
client,
null,
null,
emptyMap(),
emptyList(),
Protocol.TIME_ZONE,
Protocol.FETCH_SIZE,
Protocol.REQUEST_TIMEOUT,
Protocol.PAGE_TIMEOUT,
new RequestInfo(Mode.PLAIN)
);
}
public SqlTranslateRequestBuilder(
ElasticsearchClient client,
String query,
QueryBuilder filter,
Map<String, Object> runtimeMappings,
List<SqlTypedParamValue> params,
ZoneId zoneId,
int fetchSize,
TimeValue requestTimeout,
TimeValue pageTimeout,
RequestInfo requestInfo
) {
super(
client,
SqlTranslateAction.INSTANCE,
new SqlTranslateRequest(query, params, filter, runtimeMappings, zoneId, fetchSize, requestTimeout, pageTimeout, requestInfo)
);
}
public SqlTranslateRequestBuilder query(String query) {
request.query(query);
return this;
}
public SqlTranslateRequestBuilder zoneId(ZoneId zoneId) {
request.zoneId(zoneId);
return this;
}
public SqlTranslateRequestBuilder projectRouting(String projectRouting) {
request.projectRouting(projectRouting);
return this;
}
}
|
SqlTranslateRequestBuilder
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/SqlEndpointBuilderFactory.java
|
{
"start": 60762,
"end": 62246
}
|
class ____ a
* default constructor to create instance with. d) If the query resulted
* in more than one rows, it throws an non-unique result exception.
* StreamList streams the result of the query using an Iterator. This
* can be used with the Splitter EIP in streaming mode to process the
* ResultSet in streaming fashion.
*
* The option is a:
* <code>org.apache.camel.component.sql.SqlOutputType</code> type.
*
* Default: SelectList
* Group: common
*
* @param outputType the value to set
* @return the dsl builder
*/
default SqlEndpointProducerBuilder outputType(org.apache.camel.component.sql.SqlOutputType outputType) {
doSetProperty("outputType", outputType);
return this;
}
/**
* Make the output of consumer or producer to SelectList as List of Map,
* or SelectOne as single Java object in the following way: a) If the
* query has only single column, then that JDBC Column object is
* returned. (such as SELECT COUNT( ) FROM PROJECT will return a Long
* object. b) If the query has more than one column, then it will return
* a Map of that result. c) If the outputClass is set, then it will
* convert the query result into an Java bean object by calling all the
* setters that match the column names. It will assume your
|
has
|
java
|
netty__netty
|
testsuite-native-image-client-runtime-init/src/main/java/io/netty/testsuite/svm/client/NativeClientWithNettyInitAtRuntime.java
|
{
"start": 821,
"end": 1177
}
|
class ____ {
/**
* Main entry point (not instantiable)
*/
private NativeClientWithNettyInitAtRuntime() {
}
public static void main(String[] args) {
System.out.println(NetUtil.LOCALHOST4);
System.out.println(NetUtil.LOCALHOST6);
System.out.println(NetUtil.LOCALHOST);
}
}
|
NativeClientWithNettyInitAtRuntime
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableTakeWhile.java
|
{
"start": 1397,
"end": 3254
}
|
class ____<T> implements FlowableSubscriber<T>, Subscription {
final Subscriber<? super T> downstream;
final Predicate<? super T> predicate;
Subscription upstream;
boolean done;
TakeWhileSubscriber(Subscriber<? super T> actual, Predicate<? super T> predicate) {
this.downstream = actual;
this.predicate = predicate;
}
@Override
public void onSubscribe(Subscription s) {
if (SubscriptionHelper.validate(this.upstream, s)) {
this.upstream = s;
downstream.onSubscribe(this);
}
}
@Override
public void onNext(T t) {
if (done) {
return;
}
boolean b;
try {
b = predicate.test(t);
} catch (Throwable e) {
Exceptions.throwIfFatal(e);
upstream.cancel();
onError(e);
return;
}
if (!b) {
done = true;
upstream.cancel();
downstream.onComplete();
return;
}
downstream.onNext(t);
}
@Override
public void onError(Throwable t) {
if (done) {
RxJavaPlugins.onError(t);
return;
}
done = true;
downstream.onError(t);
}
@Override
public void onComplete() {
if (done) {
return;
}
done = true;
downstream.onComplete();
}
@Override
public void request(long n) {
upstream.request(n);
}
@Override
public void cancel() {
upstream.cancel();
}
}
}
|
TakeWhileSubscriber
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/sql/ast/expr/SQLCaseExpr.java
|
{
"start": 906,
"end": 3168
}
|
class ____ extends SQLExprImpl implements SQLReplaceable, Serializable {
private static final long serialVersionUID = 1L;
private final List<Item> items = new ArrayList<Item>();
private SQLExpr valueExpr;
private SQLExpr elseExpr;
public SQLCaseExpr() {
}
public SQLExpr getValueExpr() {
return this.valueExpr;
}
public void setValueExpr(SQLExpr valueExpr) {
if (valueExpr != null) {
valueExpr.setParent(this);
}
this.valueExpr = valueExpr;
}
public SQLExpr getElseExpr() {
return this.elseExpr;
}
public void setElseExpr(SQLExpr elseExpr) {
if (elseExpr != null) {
elseExpr.setParent(this);
}
this.elseExpr = elseExpr;
}
public List<Item> getItems() {
return this.items;
}
public void addItem(Item item) {
if (item != null) {
item.setParent(this);
this.items.add(item);
}
}
public void addItem(SQLExpr condition, SQLExpr value) {
this.addItem(new Item(condition, value));
}
protected void accept0(SQLASTVisitor visitor) {
if (visitor.visit(this)) {
if (valueExpr != null) {
valueExpr.accept(visitor);
}
for (Item item : this.items) {
if (item != null) {
item.accept(visitor);
}
}
if (elseExpr != null) {
elseExpr.accept(visitor);
}
}
visitor.endVisit(this);
}
@Override
public List getChildren() {
List<SQLObject> children = new ArrayList<SQLObject>();
if (valueExpr != null) {
children.add(this.valueExpr);
}
children.addAll(this.items);
if (elseExpr != null) {
children.add(this.elseExpr);
}
return children;
}
@Override
public boolean replace(SQLExpr expr, SQLExpr target) {
if (valueExpr == expr) {
setValueExpr(target);
return true;
}
if (elseExpr == expr) {
setElseExpr(target);
return true;
}
return false;
}
public static
|
SQLCaseExpr
|
java
|
playframework__playframework
|
web/play-java-forms/src/test/java/play/data/Subtask.java
|
{
"start": 439,
"end": 855
}
|
class ____ {
@Constraints.Min(10)
public Long id;
@Constraints.Required public String name;
public Boolean done = true;
@Constraints.Required
@DateTime(pattern = "dd/MM/yyyy")
public Date dueDate;
public Date endDate;
@I18Constraint(value = "patterns.zip")
public String zip;
@AnotherI18NConstraint(value = "patterns.zip")
public String anotherZip;
public List<String> emails;
}
|
Subtask
|
java
|
elastic__elasticsearch
|
x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlObjectHandler.java
|
{
"start": 20693,
"end": 21062
}
|
class ____ {
final String samlMessage;
final boolean hasSignature;
final String relayState;
ParsedQueryString(String samlMessage, boolean hasSignature, String relayState) {
this.samlMessage = samlMessage;
this.hasSignature = hasSignature;
this.relayState = relayState;
}
}
}
|
ParsedQueryString
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierTelemetryPlugin.java
|
{
"start": 2678,
"end": 3764
}
|
class ____ extends TransportXPackInfoAction {
@Inject
public DataTiersTransportXPackInfoAction(
TransportService transportService,
ActionFilters actionFilters,
LicenseService licenseService,
NodeClient client
) {
super(transportService, actionFilters, licenseService, client);
}
@Override
protected List<ActionType<XPackInfoFeatureResponse>> infoActions() {
return Collections.singletonList(XPackInfoFeatureAction.DATA_TIERS);
}
}
public DataTierTelemetryPlugin(final Settings settings, final Path configPath) {
super(settings, configPath);
}
@Override
protected Class<? extends TransportAction<XPackUsageRequest, XPackUsageResponse>> getUsageAction() {
return DataTiersTransportXPackUsageAction.class;
}
@Override
protected Class<? extends TransportAction<XPackInfoRequest, XPackInfoResponse>> getInfoAction() {
return DataTiersTransportXPackInfoAction.class;
}
}
|
DataTiersTransportXPackInfoAction
|
java
|
junit-team__junit5
|
junit-jupiter-api/src/main/java/org/junit/jupiter/api/extension/ClassTemplateInvocationContext.java
|
{
"start": 940,
"end": 1129
}
|
interface ____ {
/**
* Get the display name for this invocation.
*
* <p>The supplied {@code invocationIndex} is incremented by the framework
* with each
|
ClassTemplateInvocationContext
|
java
|
reactor__reactor-core
|
reactor-core/src/test/java/reactor/core/publisher/MonoDoFinallyTest.java
|
{
"start": 1459,
"end": 5498
}
|
class ____ implements Consumer<SignalType> {
volatile SignalType signalType;
volatile int calls;
@BeforeEach
public void before() {
signalType = null;
calls = 0;
}
@Override
public void accept(SignalType signalType) {
this.signalType = signalType;
this.calls++;
}
@Test
public void normalJust() {
StepVerifier.create(Mono.just(1).hide().doFinally(this))
.expectNoFusionSupport()
.expectNext(1)
.expectComplete()
.verify();
assertThat(calls).isEqualTo(1);
assertThat(signalType).isEqualTo(SignalType.ON_COMPLETE);
}
@Test
public void normalEmpty() {
StepVerifier.create(Mono.empty().doFinally(this))
.expectNoFusionSupport()
.expectComplete()
.verify();
assertThat(calls).isEqualTo(1);
assertThat(signalType).isEqualTo(SignalType.ON_COMPLETE);
}
@Test
public void normalError() {
StepVerifier.create(Mono.error(new IllegalArgumentException()).doFinally(this))
.expectNoFusionSupport()
.expectError(IllegalArgumentException.class)
.verify();
assertThat(calls).isEqualTo(1);
assertThat(signalType).isEqualTo(SignalType.ON_ERROR);
}
@Test
public void normalCancel() {
AtomicBoolean cancelCheck = new AtomicBoolean(false);
StepVerifier.create(Mono.just(1).hide()
.doOnCancel(() -> cancelCheck.set(true))
.doFinally(this))
.expectNoFusionSupport()
.expectNext(1)
.thenCancel()
.verify();
assertThat(calls).as("expected doFinally to be invoked exactly once").isEqualTo(1);
assertThat(signalType).isEqualTo(SignalType.CANCEL);
assertThat(cancelCheck.get()).as("expected tested mono to be cancelled").isTrue();
}
@Test
public void normalJustConditional() {
StepVerifier.create(Mono.just(1)
.hide()
.doFinally(this)
.filter(i -> true))
.expectNoFusionSupport()
.expectNext(1)
.expectComplete()
.verify();
assertThat(calls).isEqualTo(1);
assertThat(signalType).isEqualTo(SignalType.ON_COMPLETE);
}
@Test
public void nullCallback() {
assertThatExceptionOfType(NullPointerException.class).isThrownBy(() -> {
Mono.just(1).doFinally(null);
});
}
@Test
public void callbackThrows() {
try {
StepVerifier.create(Mono.just(1)
.doFinally(signal -> {
throw new IllegalStateException();
}))
.expectNext(1)
.expectComplete()
.verify();
}
catch (Throwable e) {
Throwable _e = Exceptions.unwrap(e);
assertThat(_e).isNotSameAs(e);
assertThat(_e).isInstanceOf(IllegalStateException.class);
}
}
@Test
public void callbackThrowsConditional() {
try {
StepVerifier.create(Mono.just(1)
.doFinally(signal -> {
throw new IllegalStateException();
})
.filter(i -> true))
.expectNext(1)
.expectComplete()
.verify();
}
catch (Throwable e) {
Throwable _e = Exceptions.unwrap(e);
assertThat(_e).isNotSameAs(e);
assertThat(_e).isInstanceOf(IllegalStateException.class);
}
}
@Test
public void severalInARowExecutedInReverseOrder() {
Queue<String> finallyOrder = new ConcurrentLinkedDeque<>();
Flux.just("b")
.hide()
.doFinally(s -> finallyOrder.offer("FIRST"))
.doFinally(s -> finallyOrder.offer("SECOND"))
.blockLast();
Assertions.assertThat(finallyOrder)
.containsExactly("SECOND", "FIRST");
}
@Test
public void scanOperator(){
MonoDoFinally<String> test = new MonoDoFinally<>(Mono.just("foo"), this);
Assertions.assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
}
}
|
MonoDoFinallyTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/AbstractStringPattern.java
|
{
"start": 601,
"end": 1761
}
|
class ____ implements StringPattern {
private Automaton automaton;
public final Automaton createAutomaton(boolean ignoreCase) {
try {
return doCreateAutomaton(ignoreCase);
} catch (TooComplexToDeterminizeException e) {
throw new IllegalArgumentException("Pattern was too complex to determinize", e);
}
}
protected abstract Automaton doCreateAutomaton(boolean ignoreCase);
private Automaton automaton() {
if (automaton == null) {
automaton = createAutomaton(false);
}
return automaton;
}
@Override
public boolean matchesAll() {
return Operations.isTotal(automaton());
}
@Override
public String exactMatch() {
Automaton a = automaton();
if (a.getNumStates() == 0) { // workaround for https://github.com/elastic/elasticsearch/pull/128887
return null; // Empty automaton has no matches
}
IntsRef singleton = Operations.getSingleton(a);
return singleton != null ? UnicodeUtil.newString(singleton.ints, singleton.offset, singleton.length) : null;
}
}
|
AbstractStringPattern
|
java
|
lettuce-io__lettuce-core
|
src/test/java/io/lettuce/core/masterreplica/ConnectionsUnitTests.java
|
{
"start": 784,
"end": 1397
}
|
class ____ {
@Mock
private StatefulRedisConnection<String, String> connection1;
@BeforeEach
void before() {
when(connection1.closeAsync()).thenReturn(CompletableFuture.completedFuture(null));
}
@Test
void shouldCloseConnectionCompletingAfterCloseSignal() {
Connections connections = new Connections(5, Collections.emptyList());
connections.closeAsync();
verifyNoInteractions(connection1);
connections.onAccept(Tuples.of(RedisURI.create("localhost", 6379), connection1));
verify(connection1).closeAsync();
}
}
|
ConnectionsUnitTests
|
java
|
elastic__elasticsearch
|
x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/HistogramRateAggregator.java
|
{
"start": 1086,
"end": 3036
}
|
class ____ extends AbstractRateAggregator {
public HistogramRateAggregator(
String name,
ValuesSourceConfig valuesSourceConfig,
Rounding.DateTimeUnit rateUnit,
RateMode rateMode,
AggregationContext context,
Aggregator parent,
Map<String, Object> metadata
) throws IOException {
super(name, valuesSourceConfig, rateUnit, rateMode, context, parent, metadata);
}
@Override
public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, final LeafBucketCollector sub) throws IOException {
final CompensatedSum kahanSummation = new CompensatedSum(0, 0);
final HistogramValues values = ((HistogramValuesSource.Histogram) valuesSource).getHistogramValues(aggCtx.getLeafReaderContext());
return new LeafBucketCollectorBase(sub, values) {
@Override
public void collect(int doc, long bucket) throws IOException {
if (values.advanceExact(doc)) {
sums = bigArrays().grow(sums, bucket + 1);
compensations = bigArrays().grow(compensations, bucket + 1);
final HistogramValue sketch = values.histogram();
while (sketch.next()) {
double sum = sums.get(bucket);
double compensation = compensations.get(bucket);
kahanSummation.reset(sum, compensation);
final double value = switch (rateMode) {
case SUM -> sketch.value();
case VALUE_COUNT -> sketch.count();
};
kahanSummation.add(value);
compensations.set(bucket, kahanSummation.delta());
sums.set(bucket, kahanSummation.value());
}
}
}
};
}
}
|
HistogramRateAggregator
|
java
|
junit-team__junit5
|
platform-tests/src/test/java/org/junit/platform/launcher/tagexpression/TokenTests.java
|
{
"start": 475,
"end": 1528
}
|
class ____ {
@Test
void startIndexOfTokenString() {
assertThat(new Token(0, "!").strippedTokenStartIndex()).isEqualTo(0);
assertThat(new Token(0, " !").strippedTokenStartIndex()).isEqualTo(2);
assertThat(new Token(7, "!").strippedTokenStartIndex()).isEqualTo(7);
}
@Test
void endIndexExclusive() {
assertThat(new Token(0, "!").endIndexExclusive()).isEqualTo(1);
assertThat(new Token(0, " !").endIndexExclusive()).isEqualTo(3);
assertThat(new Token(7, "!").endIndexExclusive()).isEqualTo(8);
}
@Test
void lastCharacterIndex() {
assertThat(new Token(0, "!").lastCharacterIndex()).isEqualTo(0);
assertThat(new Token(0, " !").lastCharacterIndex()).isEqualTo(2);
assertThat(new Token(7, "!").lastCharacterIndex()).isEqualTo(7);
}
@Test
void concatenateTwoTokens() {
var tokens = new Tokenizer().tokenize(" ! foo");
var one = tokens.get(0);
var two = tokens.get(1);
var joined = one.concatenate(two);
assertThat(joined.rawString()).isEqualTo(" ! foo");
assertThat(joined.startIndex()).isEqualTo(0);
}
}
|
TokenTests
|
java
|
spring-projects__spring-data-jpa
|
spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/config/AbstractRepositoryConfigTests.java
|
{
"start": 1387,
"end": 2370
}
|
class ____ {
@Autowired(required = false) UserRepository userRepository;
@Autowired(required = false) RoleRepository roleRepository;
@Autowired(required = false) AuditableUserRepository auditableUserRepository;
@Autowired JpaMetamodelMappingContext mappingContext;
/**
* Asserts that context creation detects 3 repository beans.
*/
@Test
void testContextCreation() {
assertNotNull(userRepository);
assertNotNull(roleRepository);
assertNotNull(auditableUserRepository);
}
@Test // DATAJPA-330
void repositoriesHaveExceptionTranslationApplied() {
JpaRepositoriesRegistrarIntegrationTests.assertExceptionTranslationActive(userRepository);
JpaRepositoriesRegistrarIntegrationTests.assertExceptionTranslationActive(roleRepository);
JpaRepositoriesRegistrarIntegrationTests.assertExceptionTranslationActive(auditableUserRepository);
}
@Test // DATAJPA-484
void exposesJpaMappingContext() {
assertNotNull(mappingContext);
}
}
|
AbstractRepositoryConfigTests
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/embeddable/ParentCacheTest.java
|
{
"start": 2748,
"end": 3140
}
|
class ____ {
@Parent
public ParentEntity parent;
public String field;
public ChildEmbeddable() {
}
public ChildEmbeddable(String field) {
this.field = field;
}
public ParentEntity getParent() {
return parent;
}
public void setParent(ParentEntity parent) {
this.parent = parent;
}
}
@Entity( name = "ParentEntity" )
@Cacheable
public static
|
ChildEmbeddable
|
java
|
junit-team__junit5
|
junit-platform-commons/src/main/java/org/junit/platform/commons/logging/LoggerFactory.java
|
{
"start": 842,
"end": 1079
}
|
class ____ {
private LoggerFactory() {
/* no-op */
}
private static final Set<LogRecordListener> listeners = ConcurrentHashMap.newKeySet();
/**
* Get a {@link Logger} for the specified class.
*
* @param clazz the
|
LoggerFactory
|
java
|
spring-projects__spring-framework
|
spring-webmvc/src/main/java/org/springframework/web/servlet/mvc/method/annotation/SseEmitter.java
|
{
"start": 5327,
"end": 7377
}
|
class ____ implements SseEventBuilder {
private final Set<DataWithMediaType> dataToSend = new LinkedHashSet<>(4);
private @Nullable StringBuilder sb;
private boolean hasName;
@Override
public SseEventBuilder id(String id) {
append("id:").append(id).append('\n');
return this;
}
@Override
public SseEventBuilder name(String name) {
this.hasName = true;
append("event:").append(name).append('\n');
return this;
}
@Override
public SseEventBuilder reconnectTime(long reconnectTimeMillis) {
append("retry:").append(String.valueOf(reconnectTimeMillis)).append('\n');
return this;
}
@Override
public SseEventBuilder comment(String comment) {
append(':').append(comment).append('\n');
return this;
}
@Override
public SseEventBuilder data(Object object) {
return data(object, null);
}
@Override
public SseEventBuilder data(Object object, @Nullable MediaType mediaType) {
if (object instanceof ModelAndView mav && !this.hasName && mav.getViewName() != null) {
name(mav.getViewName());
}
append("data:");
saveAppendedText();
if (object instanceof String text) {
object = StringUtils.replace(text, "\n", "\ndata:");
}
this.dataToSend.add(new DataWithMediaType(object, mediaType));
append('\n');
return this;
}
SseEventBuilderImpl append(String text) {
if (this.sb == null) {
this.sb = new StringBuilder();
}
this.sb.append(text);
return this;
}
SseEventBuilderImpl append(char ch) {
if (this.sb == null) {
this.sb = new StringBuilder();
}
this.sb.append(ch);
return this;
}
@Override
public Set<DataWithMediaType> build() {
if (!StringUtils.hasLength(this.sb) && this.dataToSend.isEmpty()) {
return Collections.emptySet();
}
append('\n');
saveAppendedText();
return this.dataToSend;
}
private void saveAppendedText() {
if (this.sb != null) {
this.dataToSend.add(new DataWithMediaType(this.sb.toString(), TEXT_PLAIN));
this.sb = null;
}
}
}
}
|
SseEventBuilderImpl
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
|
{
"start": 2011,
"end": 2077
}
|
class ____ {@link org.apache.hadoop.fs.shell.Count}
*
*/
public
|
for
|
java
|
bumptech__glide
|
library/src/main/java/com/bumptech/glide/request/target/SimpleTarget.java
|
{
"start": 3238,
"end": 3953
}
|
class ____ possible.
*
* @see <a href="http://bumptech.github.io/glide/doc/targets.html">Glide's Target docs page</a>
* @param <Z> The type of resource that this target will receive.
* @deprecated Use {@link CustomViewTarget} if loading the content into a view, the download API if
* in the background
* (http://bumptech.github.io/glide/doc/getting-started.html#background-threads), or a {@link
* CustomTarget} for any specialized use-cases. Using {@link SimpleTarget} or {@link BaseTarget}
* is unsafe if the user does not implement {@link #onLoadCleared}, resulting in recycled
* bitmaps being referenced from the UI and hard to root-cause crashes.
*/
@Deprecated
public abstract
|
whenver
|
java
|
apache__camel
|
test-infra/camel-test-infra-openai-mock/src/test/java/org/apache/camel/test/infra/openai/mock/OpenAIMockMultipleToolsTest.java
|
{
"start": 1219,
"end": 5356
}
|
class ____ {
@RegisterExtension
OpenAIMock openAIMock = new OpenAIMock().builder()
.when("What is the weather in london?")
.invokeTool("FindsTheLatitudeAndLongitudeOfAGivenCity")
.withParam("name", "London")
.andThenInvokeTool("ForecastsTheWeatherForTheGivenLatitudeAndLongitude")
.withParam("latitude", "51.50758961965397")
.withParam("longitude", "-0.13388057363742217")
.build();
@Test
void testInvokeToolAndThenInvokeTool() throws Exception {
HttpClient client = HttpClient.newHttpClient();
ObjectMapper objectMapper = new ObjectMapper();
// First request: User asks for weather in London
HttpRequest request1 = HttpRequest.newBuilder()
.uri(URI.create(openAIMock.getBaseUrl() + "/v1/chat/completions"))
.header("Content-Type", "application/json")
.POST(HttpRequest.BodyPublishers
.ofString("{\"messages\": [{\"role\": \"user\", \"content\": \"What is the weather in london?\"}]}"))
.build();
HttpResponse<String> response1 = client.send(request1, HttpResponse.BodyHandlers.ofString());
String responseBody1 = response1.body();
JsonNode responseJson1 = objectMapper.readTree(responseBody1);
JsonNode choice1 = responseJson1.path("choices").get(0);
JsonNode message1 = choice1.path("message");
// Assert first tool call
Assertions.assertEquals("assistant", message1.path("role").asText());
JsonNode toolCalls1 = message1.path("tool_calls");
Assertions.assertEquals(1, toolCalls1.size());
JsonNode toolCall1 = toolCalls1.get(0);
Assertions.assertEquals("FindsTheLatitudeAndLongitudeOfAGivenCity", toolCall1.path("function").path("name").asText());
Assertions.assertEquals("{\"name\":\"London\"}", toolCall1.path("function").path("arguments").asText());
String toolCallId1 = toolCall1.path("id").asText();
Assertions.assertEquals("tool_calls", choice1.path("finish_reason").asText());
// Second request: LLM provides tool output for the first tool call
String secondRequestBody = String.format(
"{\"messages\": [{\"role\": \"user\", \"content\": \"What is the weather in london?\"}, {\"role\":\"assistant\", \"tool_calls\": [{\"id\":\"%s\", \"type\":\"function\", \"function\":{\"name\":\"FindsTheLatitudeAndLongitudeOfAGivenCity\", \"arguments\":\"{\\\"name\\\":\\\"London\\\"}\"}}]}, {\"role\":\"tool\", \"tool_call_id\":\"%s\", \"content\":\"{\\\"latitude\\\": \\\"51.50758961965397\\\", \\\"longitude\\\": \\\"-0.13388057363742217\\\"}\"}]}",
toolCallId1, toolCallId1);
HttpRequest request2 = HttpRequest.newBuilder()
.uri(URI.create(openAIMock.getBaseUrl() + "/v1/chat/completions"))
.header("Content-Type", "application/json")
.POST(HttpRequest.BodyPublishers.ofString(secondRequestBody))
.build();
HttpResponse<String> response2 = client.send(request2, HttpResponse.BodyHandlers.ofString());
String responseBody2 = response2.body();
JsonNode responseJson2 = objectMapper.readTree(responseBody2);
JsonNode choice2 = responseJson2.path("choices").get(0);
JsonNode message2 = choice2.path("message");
// Assert second tool call
Assertions.assertEquals("assistant", message2.path("role").asText());
JsonNode toolCalls2 = message2.path("tool_calls");
Assertions.assertEquals(1, toolCalls2.size());
JsonNode toolCall2 = toolCalls2.get(0);
Assertions.assertEquals("ForecastsTheWeatherForTheGivenLatitudeAndLongitude",
toolCall2.path("function").path("name").asText());
Assertions.assertEquals("{\"latitude\":\"51.50758961965397\",\"longitude\":\"-0.13388057363742217\"}",
toolCall2.path("function").path("arguments").asText());
Assertions.assertEquals("tool_calls", choice2.path("finish_reason").asText());
}
}
|
OpenAIMockMultipleToolsTest
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/impl/prefetch/TestBufferData.java
|
{
"start": 1514,
"end": 6295
}
|
class ____ extends AbstractHadoopTestBase {
@Test
public void testArgChecks() throws Exception {
// Should not throw.
ByteBuffer buffer = ByteBuffer.allocate(1);
BufferData data = new BufferData(1, buffer);
// Verify it throws correctly.
intercept(IllegalArgumentException.class,
"'blockNumber' must not be negative",
() -> new BufferData(-1, buffer));
intercept(IllegalArgumentException.class, "'buffer' must not be null",
() -> new BufferData(1, null));
intercept(IllegalArgumentException.class, "'actionFuture' must not be null",
() -> data.setPrefetch(null));
intercept(IllegalArgumentException.class, "'actionFuture' must not be null",
() -> data.setCaching(null));
intercept(IllegalArgumentException.class, "'states' must not be null",
() -> data.throwIfStateIncorrect((BufferData.State[]) null));
intercept(IllegalStateException.class,
"Expected buffer state to be 'READY or CACHING' but found",
() -> data.throwIfStateIncorrect(BufferData.State.READY,
BufferData.State.CACHING));
}
@Test
public void testValidStateUpdates() {
ByteBuffer buffer = ByteBuffer.allocate(1);
BufferData data = new BufferData(1, buffer);
assertEquals(BufferData.State.BLANK, data.getState());
CompletableFuture<Void> actionFuture = new CompletableFuture<>();
actionFuture.complete(null);
data.setPrefetch(actionFuture);
assertEquals(BufferData.State.PREFETCHING, data.getState());
assertNotNull(data.getActionFuture());
assertSame(actionFuture, data.getActionFuture());
CompletableFuture<Void> actionFuture2 = new CompletableFuture<>();
data.setCaching(actionFuture2);
assertEquals(BufferData.State.CACHING, data.getState());
assertNotNull(data.getActionFuture());
assertSame(actionFuture2, data.getActionFuture());
assertNotSame(actionFuture, actionFuture2);
List<BufferData.State> states = Arrays.asList(
BufferData.State.BLANK,
BufferData.State.PREFETCHING,
BufferData.State.CACHING,
BufferData.State.READY
);
BufferData data2 = new BufferData(1, buffer);
BufferData.State prevState = null;
for (BufferData.State state : states) {
if (prevState != null) {
assertEquals(prevState, data2.getState());
data2.updateState(state, prevState);
assertEquals(state, data2.getState());
}
prevState = state;
}
}
@Test
public void testInvalidStateUpdates() throws Exception {
CompletableFuture<Void> actionFuture = new CompletableFuture<>();
actionFuture.complete(null);
testInvalidStateUpdatesHelper(
(d) -> d.setPrefetch(actionFuture),
BufferData.State.BLANK,
BufferData.State.READY);
testInvalidStateUpdatesHelper(
(d) -> d.setCaching(actionFuture),
BufferData.State.PREFETCHING,
BufferData.State.READY);
}
@Test
public void testSetReady() throws Exception {
byte[] bytes1 = new byte[5];
initBytes(bytes1);
ByteBuffer buffer = ByteBuffer.allocate(10);
buffer.put(bytes1);
buffer.limit(bytes1.length);
BufferData data = new BufferData(1, buffer);
assertNotEquals(BufferData.State.READY, data.getState());
assertEquals(0, data.getChecksum());
data.setReady(BufferData.State.BLANK);
assertEquals(BufferData.State.READY, data.getState());
assertNotEquals(0, data.getChecksum());
// Verify that buffer cannot be modified once in READY state.
ExceptionAsserts.assertThrows(
ReadOnlyBufferException.class,
null,
() -> data.getBuffer().put(bytes1));
// Verify that buffer cannot be set to READY state more than once.
ExceptionAsserts.assertThrows(
IllegalStateException.class,
"Checksum cannot be changed once set",
() -> data.setReady(BufferData.State.BLANK));
// Verify that we detect post READY buffer modification.
buffer.array()[2] = (byte) 42;
ExceptionAsserts.assertThrows(
IllegalStateException.class,
"checksum changed after setReady()",
() -> data.setDone());
}
@Test
public void testChecksum() {
byte[] bytes1 = new byte[5];
byte[] bytes2 = new byte[10];
initBytes(bytes1);
initBytes(bytes2);
ByteBuffer buffer1 = ByteBuffer.wrap(bytes1);
ByteBuffer buffer2 = ByteBuffer.wrap(bytes2);
buffer2.limit(bytes1.length);
long checksum1 = BufferData.getChecksum(buffer1);
long checksum2 = BufferData.getChecksum(buffer2);
assertEquals(checksum1, checksum2);
}
private void initBytes(byte[] bytes) {
for (int i = 0; i < bytes.length; i++) {
bytes[i] = (byte) i;
}
}
@FunctionalInterface
public
|
TestBufferData
|
java
|
google__guice
|
core/src/com/google/inject/internal/CycleDetectingLock.java
|
{
"start": 693,
"end": 1265
}
|
class ____ to its size
* and complexity.
*
* @param <ID> Lock identification provided by the client, is returned unmodified to the client when
* lock cycle is detected to identify it. Only toString() needs to be implemented. Lock
* references this object internally, for the purposes of Garbage Collection you should not use
* heavy IDs. Lock is referenced by a lock factory as long as it's owned by a thread.
* @see SingletonScope
* @see com.google.inject.internal.CycleDetectingLock.CycleDetectingLockFactory
* @author timofeyb (Timothy Basanov)
*/
|
due
|
java
|
apache__flink
|
flink-table/flink-table-code-splitter/src/main/java/org/apache/flink/table/codesplit/BlockStatementSplitter.java
|
{
"start": 1683,
"end": 2922
}
|
class ____
* variable. Because of that, code must be preprocessed by {@link DeclarationRewriter} which
* converts all local variables extracted as to member variables.
*
* <p><i>Before</i>
*
* <pre><code>
* while (counter > 0) {
* int localA = a + 1000;
* System.out.println(localA);
* if (a > 0) {
* b = a * 2;
* c = b * 2;
* System.out.println(b);
* } else {
* b = a * 3;
* System.out.println(b);
* }
* counter--;
* }
*
* </code></pre>
*
* <p><i>After</i>
*
* <pre><code>
* while (counter > 0) {
* myFun_0_1(a, b, c);
* if (a > 0) {
* myFun_0_1_2(a, b, c);
* } else {
* myFun_0_1_3(a, b, c);
* }
* counter--;
* }
* </code></pre>
*
* <p>Where bodies of extracted "methods" are:
*
* <pre><code>
* myFun_0_1(int a, int b, int c) ->
* int localA = a + 1000;
* System.out.println(localA);
* </code></pre>
*
* <pre><code>
* myFun_0_1_3(int a, int b, int c) ->
* b = a * 2;
* c = b * 2;
* System.out.println(b);
* </code></pre>
*
* <pre><code>
* myFun_whileBody0_0_ifBody1(int a) ->
* b = a * 3;
* System.out.println(b);
* </code></pre>
*/
@Internal
public
|
member
|
java
|
apache__kafka
|
clients/src/test/java/org/apache/kafka/clients/admin/ScramMechanismTest.java
|
{
"start": 939,
"end": 1517
}
|
class ____ {
@Test
public void testFromMechanismName() {
assertEquals(ScramMechanism.UNKNOWN, ScramMechanism.fromMechanismName("UNKNOWN"));
assertEquals(ScramMechanism.SCRAM_SHA_256, ScramMechanism.fromMechanismName("SCRAM-SHA-256"));
assertEquals(ScramMechanism.SCRAM_SHA_512, ScramMechanism.fromMechanismName("SCRAM-SHA-512"));
assertEquals(ScramMechanism.UNKNOWN, ScramMechanism.fromMechanismName("some string"));
assertEquals(ScramMechanism.UNKNOWN, ScramMechanism.fromMechanismName("scram-sha-256"));
}
}
|
ScramMechanismTest
|
java
|
spring-projects__spring-boot
|
module/spring-boot-pulsar/src/test/java/org/springframework/boot/pulsar/autoconfigure/Customizers.java
|
{
"start": 2539,
"end": 3625
}
|
class ____ implements AssertDelegateTarget {
private final List<C> customizers;
@SuppressWarnings("unchecked")
private CustomizersAssert(Object customizers) {
this.customizers = (customizers instanceof List) ? (List<C>) customizers : List.of((C) customizers);
}
/**
* Assert that the customize method is called in a specified order. It is expected
* that each customizer has set a unique value so the expected values can be used
* as a verify step.
* @param <V> the value type
* @param call the call the customizer makes
* @param expectedValues the expected values
*/
@SuppressWarnings("unchecked")
<V> void callsInOrder(BiConsumer<T, V> call, V... expectedValues) {
T target = mock(Customizers.this.targetClass);
BiConsumer<C, T> customizeAction = Customizers.this.customizeAction;
this.customizers.forEach((customizer) -> customizeAction.accept(customizer, target));
InOrder ordered = inOrder(target);
for (V expectedValue : expectedValues) {
call.accept(ordered.verify(target), expectedValue);
}
}
}
}
|
CustomizersAssert
|
java
|
apache__kafka
|
group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupCoordinatorShardTest.java
|
{
"start": 8470,
"end": 108761
}
|
class ____ {
@Test
public void testConsumerGroupHeartbeat() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
RequestContext context = requestContext(ApiKeys.CONSUMER_GROUP_HEARTBEAT);
ConsumerGroupHeartbeatRequestData request = new ConsumerGroupHeartbeatRequestData();
CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result = new CoordinatorResult<>(
List.of(),
new ConsumerGroupHeartbeatResponseData()
);
when(groupMetadataManager.consumerGroupHeartbeat(
context,
request
)).thenReturn(result);
assertEquals(result, coordinator.consumerGroupHeartbeat(context, request));
}
@Test
public void testStreamsGroupHeartbeat() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
RequestContext context = requestContext(ApiKeys.STREAMS_GROUP_HEARTBEAT);
StreamsGroupHeartbeatRequestData request = new StreamsGroupHeartbeatRequestData();
CoordinatorResult<StreamsGroupHeartbeatResult, CoordinatorRecord> result = new CoordinatorResult<>(
List.of(),
new StreamsGroupHeartbeatResult(new StreamsGroupHeartbeatResponseData(), Map.of())
);
when(groupMetadataManager.streamsGroupHeartbeat(
context,
request
)).thenReturn(result);
assertEquals(result, coordinator.streamsGroupHeartbeat(context, request));
}
@Test
public void testCommitOffset() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
RequestContext context = requestContext(ApiKeys.OFFSET_COMMIT);
OffsetCommitRequestData request = new OffsetCommitRequestData();
CoordinatorResult<OffsetCommitResponseData, CoordinatorRecord> result = new CoordinatorResult<>(
List.of(),
new OffsetCommitResponseData()
);
when(offsetMetadataManager.commitOffset(
context,
request
)).thenReturn(result);
assertEquals(result, coordinator.commitOffset(context, request));
}
@Test
public void testCommitTransactionalOffset() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(new MockTime()),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
RequestContext context = requestContext(ApiKeys.TXN_OFFSET_COMMIT);
TxnOffsetCommitRequestData request = new TxnOffsetCommitRequestData();
CoordinatorResult<TxnOffsetCommitResponseData, CoordinatorRecord> result = new CoordinatorResult<>(
List.of(),
new TxnOffsetCommitResponseData()
);
when(offsetMetadataManager.commitTransactionalOffset(
context,
request
)).thenReturn(result);
assertEquals(result, coordinator.commitTransactionalOffset(context, request));
}
@Test
public void testDeleteGroups() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
mock(CoordinatorMetrics.class),
mock(CoordinatorMetricsShard.class)
);
RequestContext context = requestContext(ApiKeys.DELETE_GROUPS);
List<String> groupIds = Arrays.asList("group-id-1", "group-id-2");
DeleteGroupsResponseData.DeletableGroupResultCollection expectedResultCollection = new DeleteGroupsResponseData.DeletableGroupResultCollection();
List<CoordinatorRecord> expectedRecords = new ArrayList<>();
for (String groupId : groupIds) {
expectedResultCollection.add(new DeleteGroupsResponseData.DeletableGroupResult().setGroupId(groupId));
expectedRecords.addAll(Arrays.asList(
GroupCoordinatorRecordHelpers.newOffsetCommitTombstoneRecord(groupId, "topic-name", 0),
GroupCoordinatorRecordHelpers.newGroupMetadataTombstoneRecord(groupId)
));
}
CoordinatorResult<DeleteGroupsResponseData.DeletableGroupResultCollection, CoordinatorRecord> expectedResult = new CoordinatorResult<>(
expectedRecords,
expectedResultCollection
);
when(offsetMetadataManager.deleteAllOffsets(anyString(), anyList())).thenAnswer(invocation -> {
String groupId = invocation.getArgument(0);
List<CoordinatorRecord> records = invocation.getArgument(1);
records.add(GroupCoordinatorRecordHelpers.newOffsetCommitTombstoneRecord(groupId, "topic-name", 0));
return 1;
});
// Mockito#when only stubs method returning non-void value, so we use Mockito#doAnswer instead.
doAnswer(invocation -> {
String groupId = invocation.getArgument(0);
List<CoordinatorRecord> records = invocation.getArgument(1);
records.add(GroupCoordinatorRecordHelpers.newGroupMetadataTombstoneRecord(groupId));
return null;
}).when(groupMetadataManager).createGroupTombstoneRecordsAndCancelTimers(anyString(), anyList());
CoordinatorResult<DeleteGroupsResponseData.DeletableGroupResultCollection, CoordinatorRecord> coordinatorResult =
coordinator.deleteGroups(context, groupIds);
for (String groupId : groupIds) {
verify(groupMetadataManager, times(1)).validateDeleteGroup(ArgumentMatchers.eq(groupId));
verify(groupMetadataManager, times(1)).createGroupTombstoneRecordsAndCancelTimers(ArgumentMatchers.eq(groupId), anyList());
verify(offsetMetadataManager, times(1)).deleteAllOffsets(ArgumentMatchers.eq(groupId), anyList());
}
assertEquals(expectedResult, coordinatorResult);
}
@Test
public void testDeleteGroupsInvalidGroupId() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
mock(CoordinatorMetrics.class),
mock(CoordinatorMetricsShard.class)
);
RequestContext context = requestContext(ApiKeys.DELETE_GROUPS);
List<String> groupIds = Arrays.asList("group-id-1", "group-id-2", "group-id-3");
DeleteGroupsResponseData.DeletableGroupResultCollection expectedResultCollection =
new DeleteGroupsResponseData.DeletableGroupResultCollection(Arrays.asList(
new DeleteGroupsResponseData.DeletableGroupResult()
.setGroupId("group-id-1"),
new DeleteGroupsResponseData.DeletableGroupResult()
.setGroupId("group-id-2")
.setErrorCode(Errors.INVALID_GROUP_ID.code()),
new DeleteGroupsResponseData.DeletableGroupResult()
.setGroupId("group-id-3")
).iterator());
List<CoordinatorRecord> expectedRecords = Arrays.asList(
GroupCoordinatorRecordHelpers.newOffsetCommitTombstoneRecord("group-id-1", "topic-name", 0),
GroupCoordinatorRecordHelpers.newGroupMetadataTombstoneRecord("group-id-1"),
GroupCoordinatorRecordHelpers.newOffsetCommitTombstoneRecord("group-id-3", "topic-name", 0),
GroupCoordinatorRecordHelpers.newGroupMetadataTombstoneRecord("group-id-3")
);
CoordinatorResult<DeleteGroupsResponseData.DeletableGroupResultCollection, CoordinatorRecord> expectedResult = new CoordinatorResult<>(
expectedRecords,
expectedResultCollection
);
// Mockito#when only stubs method returning non-void value, so we use Mockito#doAnswer and Mockito#doThrow instead.
doThrow(Errors.INVALID_GROUP_ID.exception())
.when(groupMetadataManager).validateDeleteGroup(ArgumentMatchers.eq("group-id-2"));
doAnswer(invocation -> {
String groupId = invocation.getArgument(0);
List<CoordinatorRecord> records = invocation.getArgument(1);
records.add(GroupCoordinatorRecordHelpers.newOffsetCommitTombstoneRecord(groupId, "topic-name", 0));
return null;
}).when(offsetMetadataManager).deleteAllOffsets(anyString(), anyList());
doAnswer(invocation -> {
String groupId = invocation.getArgument(0);
List<CoordinatorRecord> records = invocation.getArgument(1);
records.add(GroupCoordinatorRecordHelpers.newGroupMetadataTombstoneRecord(groupId));
return null;
}).when(groupMetadataManager).createGroupTombstoneRecordsAndCancelTimers(anyString(), anyList());
CoordinatorResult<DeleteGroupsResponseData.DeletableGroupResultCollection, CoordinatorRecord> coordinatorResult =
coordinator.deleteGroups(context, groupIds);
for (String groupId : groupIds) {
verify(groupMetadataManager, times(1)).validateDeleteGroup(eq(groupId));
if (!groupId.equals("group-id-2")) {
verify(groupMetadataManager, times(1)).createGroupTombstoneRecordsAndCancelTimers(eq(groupId), anyList());
verify(offsetMetadataManager, times(1)).deleteAllOffsets(eq(groupId), anyList());
}
}
assertEquals(expectedResult, coordinatorResult);
}
@Test
public void testReplayOffsetCommit() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
OffsetCommitKey key = new OffsetCommitKey()
.setGroup("goo")
.setTopic("foo")
.setPartition(0);
OffsetCommitValue value = new OffsetCommitValue()
.setOffset(100L)
.setCommitTimestamp(12345L)
.setExpireTimestamp(6789L)
.setMetadata("Metadata")
.setLeaderEpoch(10);
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.record(
new LegacyOffsetCommitKey()
.setGroup("goo")
.setTopic("foo")
.setPartition(0),
new ApiMessageAndVersion(
new LegacyOffsetCommitValue()
.setOffset(100L)
.setCommitTimestamp(12345L)
.setMetadata("Metadata"),
(short) 0
)
));
coordinator.replay(1L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.record(
key,
new ApiMessageAndVersion(value, (short) 0)
));
verify(offsetMetadataManager, times(1)).replay(
0L,
RecordBatch.NO_PRODUCER_ID,
new OffsetCommitKey()
.setGroup("goo")
.setTopic("foo")
.setPartition(0),
new OffsetCommitValue()
.setOffset(100L)
.setCommitTimestamp(12345L)
.setMetadata("Metadata")
);
verify(offsetMetadataManager, times(1)).replay(
1L,
RecordBatch.NO_PRODUCER_ID,
key,
value
);
}
@Test
public void testReplayTransactionalOffsetCommit() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(new MockTime()),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
OffsetCommitKey key = new OffsetCommitKey()
.setGroup("goo")
.setTopic("foo")
.setPartition(0);
OffsetCommitValue value = new OffsetCommitValue()
.setOffset(100L)
.setCommitTimestamp(12345L)
.setExpireTimestamp(6789L)
.setMetadata("Metadata")
.setLeaderEpoch(10);
coordinator.replay(0L, 100L, (short) 0, CoordinatorRecord.record(
new LegacyOffsetCommitKey()
.setGroup("goo")
.setTopic("foo")
.setPartition(0),
new ApiMessageAndVersion(
new LegacyOffsetCommitValue()
.setOffset(100L)
.setCommitTimestamp(12345L)
.setMetadata("Metadata"),
(short) 0
)
));
coordinator.replay(1L, 101L, (short) 1, CoordinatorRecord.record(
key,
new ApiMessageAndVersion(value, (short) 0)
));
verify(offsetMetadataManager, times(1)).replay(
0L,
100L,
new OffsetCommitKey()
.setGroup("goo")
.setTopic("foo")
.setPartition(0),
new OffsetCommitValue()
.setOffset(100L)
.setCommitTimestamp(12345L)
.setMetadata("Metadata")
);
verify(offsetMetadataManager, times(1)).replay(
1L,
101L,
key,
value
);
}
@Test
public void testReplayOffsetCommitWithNullValue() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
OffsetCommitKey key = new OffsetCommitKey()
.setGroup("goo")
.setTopic("foo")
.setPartition(0);
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.tombstone(
new LegacyOffsetCommitKey()
.setGroup("goo")
.setTopic("foo")
.setPartition(0)
));
coordinator.replay(1L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.tombstone(
key
));
verify(offsetMetadataManager, times(1)).replay(
0L,
RecordBatch.NO_PRODUCER_ID,
key,
null
);
verify(offsetMetadataManager, times(1)).replay(
1L,
RecordBatch.NO_PRODUCER_ID,
key,
null
);
}
@Test
public void testReplayConsumerGroupMetadata() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
ConsumerGroupMetadataKey key = new ConsumerGroupMetadataKey();
ConsumerGroupMetadataValue value = new ConsumerGroupMetadataValue();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.record(
key,
new ApiMessageAndVersion(value, (short) 0)
));
verify(groupMetadataManager, times(1)).replay(key, value);
}
@Test
public void testReplayConsumerGroupMetadataWithNullValue() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
ConsumerGroupMetadataKey key = new ConsumerGroupMetadataKey();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.tombstone(
key
));
verify(groupMetadataManager, times(1)).replay(key, null);
}
@Test
public void testReplayConsumerGroupPartitionMetadata() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
ConsumerGroupPartitionMetadataKey key = new ConsumerGroupPartitionMetadataKey();
ConsumerGroupPartitionMetadataValue value = new ConsumerGroupPartitionMetadataValue();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.record(
key,
new ApiMessageAndVersion(value, (short) 0)
));
verify(groupMetadataManager, times(1)).replay(key, value);
}
@Test
public void testReplayConsumerGroupPartitionMetadataWithNullValue() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
ConsumerGroupPartitionMetadataKey key = new ConsumerGroupPartitionMetadataKey();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.tombstone(
key
));
verify(groupMetadataManager, times(1)).replay(key, null);
}
@Test
public void testReplayConsumerGroupMemberMetadata() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
ConsumerGroupMemberMetadataKey key = new ConsumerGroupMemberMetadataKey();
ConsumerGroupMemberMetadataValue value = new ConsumerGroupMemberMetadataValue();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.record(
key,
new ApiMessageAndVersion(value, (short) 0)
));
verify(groupMetadataManager, times(1)).replay(key, value);
}
@Test
public void testReplayConsumerGroupMemberMetadataWithNullValue() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
ConsumerGroupMemberMetadataKey key = new ConsumerGroupMemberMetadataKey();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.tombstone(
key
));
verify(groupMetadataManager, times(1)).replay(key, null);
}
@Test
public void testReplayConsumerGroupTargetAssignmentMetadata() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
ConsumerGroupTargetAssignmentMetadataKey key = new ConsumerGroupTargetAssignmentMetadataKey();
ConsumerGroupTargetAssignmentMetadataValue value = new ConsumerGroupTargetAssignmentMetadataValue();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.record(
key,
new ApiMessageAndVersion(value, (short) 0)
));
verify(groupMetadataManager, times(1)).replay(key, value);
}
@Test
public void testReplayConsumerGroupTargetAssignmentMetadataWithNullValue() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
ConsumerGroupTargetAssignmentMetadataKey key = new ConsumerGroupTargetAssignmentMetadataKey();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.tombstone(
key
));
verify(groupMetadataManager, times(1)).replay(key, null);
}
@Test
public void testReplayConsumerGroupTargetAssignmentMember() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
ConsumerGroupTargetAssignmentMemberKey key = new ConsumerGroupTargetAssignmentMemberKey();
ConsumerGroupTargetAssignmentMemberValue value = new ConsumerGroupTargetAssignmentMemberValue();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.record(
key,
new ApiMessageAndVersion(value, (short) 0)
));
verify(groupMetadataManager, times(1)).replay(key, value);
}
@Test
public void testReplayConsumerGroupTargetAssignmentMemberKeyWithNullValue() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
ConsumerGroupTargetAssignmentMemberKey key = new ConsumerGroupTargetAssignmentMemberKey();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.tombstone(
key
));
verify(groupMetadataManager, times(1)).replay(key, null);
}
@Test
public void testReplayConsumerGroupCurrentMemberAssignment() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
ConsumerGroupCurrentMemberAssignmentKey key = new ConsumerGroupCurrentMemberAssignmentKey();
ConsumerGroupCurrentMemberAssignmentValue value = new ConsumerGroupCurrentMemberAssignmentValue();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.record(
key,
new ApiMessageAndVersion(value, (short) 0)
));
verify(groupMetadataManager, times(1)).replay(key, value);
}
@Test
public void testReplayConsumerGroupCurrentMemberAssignmentWithNullValue() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
ConsumerGroupCurrentMemberAssignmentKey key = new ConsumerGroupCurrentMemberAssignmentKey();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.tombstone(
key
));
verify(groupMetadataManager, times(1)).replay(key, null);
}
@Test
public void testReplayStreamsGroupMetadata() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
StreamsGroupMetadataKey key = new StreamsGroupMetadataKey();
StreamsGroupMetadataValue value = new StreamsGroupMetadataValue();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.record(
key,
new ApiMessageAndVersion(value, (short) 0)
));
verify(groupMetadataManager).replay(key, value);
}
@Test
public void testReplayStreamsGroupMetadataWithNullValue() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
StreamsGroupMetadataKey key = new StreamsGroupMetadataKey();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.tombstone(
key
));
verify(groupMetadataManager).replay(key, null);
}
@Test
public void testReplayStreamsGroupTopology() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
StreamsGroupTopologyKey key = new StreamsGroupTopologyKey();
StreamsGroupTopologyValue value = new StreamsGroupTopologyValue();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.record(
key,
new ApiMessageAndVersion(value, (short) 0)
));
verify(groupMetadataManager).replay(key, value);
}
@Test
public void testReplayStreamsGroupTopologyWithNullValue() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
StreamsGroupTopologyKey key = new StreamsGroupTopologyKey();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.tombstone(
key
));
verify(groupMetadataManager).replay(key, null);
}
@Test
public void testReplayStreamsGroupMemberMetadata() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
StreamsGroupMemberMetadataKey key = new StreamsGroupMemberMetadataKey();
StreamsGroupMemberMetadataValue value = new StreamsGroupMemberMetadataValue();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.record(
key,
new ApiMessageAndVersion(value, (short) 0)
));
verify(groupMetadataManager).replay(key, value);
}
@Test
public void testReplayStreamsGroupMemberMetadataWithNullValue() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
StreamsGroupMemberMetadataKey key = new StreamsGroupMemberMetadataKey();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.tombstone(
key
));
verify(groupMetadataManager).replay(key, null);
}
@Test
public void testReplayStreamsGroupTargetAssignmentMetadata() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
StreamsGroupTargetAssignmentMetadataKey key = new StreamsGroupTargetAssignmentMetadataKey();
StreamsGroupTargetAssignmentMetadataValue value = new StreamsGroupTargetAssignmentMetadataValue();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.record(
key,
new ApiMessageAndVersion(value, (short) 0)
));
verify(groupMetadataManager).replay(key, value);
}
@Test
public void testReplayStreamsGroupTargetAssignmentMetadataWithNullValue() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
StreamsGroupTargetAssignmentMetadataKey key = new StreamsGroupTargetAssignmentMetadataKey();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.tombstone(
key
));
verify(groupMetadataManager).replay(key, null);
}
@Test
public void testReplayStreamsGroupTargetAssignmentMember() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
StreamsGroupTargetAssignmentMemberKey key = new StreamsGroupTargetAssignmentMemberKey();
StreamsGroupTargetAssignmentMemberValue value = new StreamsGroupTargetAssignmentMemberValue();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.record(
key,
new ApiMessageAndVersion(value, (short) 0)
));
verify(groupMetadataManager).replay(key, value);
}
@Test
public void testReplayStreamsGroupTargetAssignmentMemberKeyWithNullValue() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
StreamsGroupTargetAssignmentMemberKey key = new StreamsGroupTargetAssignmentMemberKey();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.tombstone(
key
));
verify(groupMetadataManager).replay(key, null);
}
@Test
public void testReplayStreamsGroupCurrentMemberAssignment() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
StreamsGroupCurrentMemberAssignmentKey key = new StreamsGroupCurrentMemberAssignmentKey();
StreamsGroupCurrentMemberAssignmentValue value = new StreamsGroupCurrentMemberAssignmentValue();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.record(
key,
new ApiMessageAndVersion(value, (short) 0)
));
verify(groupMetadataManager).replay(key, value);
}
@Test
public void testReplayStreamsGroupCurrentMemberAssignmentWithNullValue() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
StreamsGroupCurrentMemberAssignmentKey key = new StreamsGroupCurrentMemberAssignmentKey();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.tombstone(
key
));
verify(groupMetadataManager).replay(key, null);
}
@Test
public void testReplayKeyCannotBeNull() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
assertThrows(NullPointerException.class, () ->
coordinator.replay(
0L,
RecordBatch.NO_PRODUCER_ID,
RecordBatch.NO_PRODUCER_EPOCH,
CoordinatorRecord.record(null, null))
);
}
@Test
public void testOnLoaded() {
CoordinatorMetadataImage image = CoordinatorMetadataImage.EMPTY;
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
coordinator.onLoaded(image);
verify(groupMetadataManager, times(1)).onNewMetadataImage(
eq(image),
any()
);
verify(groupMetadataManager, times(1)).onLoaded();
}
@Test
public void testReplayGroupMetadata() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
GroupMetadataKey key = new GroupMetadataKey();
GroupMetadataValue value = new GroupMetadataValue();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.record(
key,
new ApiMessageAndVersion(value, (short) 4)
));
verify(groupMetadataManager, times(1)).replay(key, value);
}
@Test
public void testReplayGroupMetadataWithNullValue() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
GroupMetadataKey key = new GroupMetadataKey();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.tombstone(
key
));
verify(groupMetadataManager, times(1)).replay(key, null);
}
@Test
public void testScheduleCleanupGroupMetadata() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
Time mockTime = new MockTime();
MockCoordinatorTimer<Void, CoordinatorRecord> timer = new MockCoordinatorTimer<>(mockTime);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
mockTime,
timer,
GroupCoordinatorConfigTest.createGroupCoordinatorConfig(4096, 1000L, 24 * 60),
mock(CoordinatorMetrics.class),
mock(CoordinatorMetricsShard.class)
);
CoordinatorMetadataImage image = CoordinatorMetadataImage.EMPTY;
// Confirm the cleanup is scheduled when the coordinator is initially loaded.
coordinator.onLoaded(image);
assertTrue(timer.contains(GROUP_EXPIRATION_KEY));
// Confirm that it is rescheduled after completion.
mockTime.sleep(1000L);
List<MockCoordinatorTimer.ExpiredTimeout<Void, CoordinatorRecord>> tasks = timer.poll();
assertEquals(1, tasks.size());
assertTrue(timer.contains(GROUP_EXPIRATION_KEY));
coordinator.onUnloaded();
assertFalse(timer.contains(GROUP_EXPIRATION_KEY));
}
@Test
public void testCleanupGroupMetadataForConsumerGroup() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
Time mockTime = new MockTime();
MockCoordinatorTimer<Void, CoordinatorRecord> timer = new MockCoordinatorTimer<>(mockTime);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
mockTime,
timer,
mock(GroupCoordinatorConfig.class),
mock(CoordinatorMetrics.class),
mock(CoordinatorMetricsShard.class)
);
CoordinatorRecord offsetCommitTombstone = GroupCoordinatorRecordHelpers.newOffsetCommitTombstoneRecord("group-id", "topic", 0);
CoordinatorRecord groupMetadataTombstone = GroupCoordinatorRecordHelpers.newGroupMetadataTombstoneRecord("group-id");
@SuppressWarnings("unchecked")
ArgumentCaptor<List<CoordinatorRecord>> recordsCapture = ArgumentCaptor.forClass(List.class);
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
ConsumerGroup group1 = new ConsumerGroup(snapshotRegistry, "group-id");
ConsumerGroup group2 = new ConsumerGroup(snapshotRegistry, "other-group-id");
when(groupMetadataManager.groupIds()).thenReturn(Set.of("group-id", "other-group-id"));
when(groupMetadataManager.group("group-id")).thenReturn(group1);
when(groupMetadataManager.group("other-group-id")).thenReturn(group2);
when(offsetMetadataManager.cleanupExpiredOffsets(eq("group-id"), recordsCapture.capture()))
.thenAnswer(invocation -> {
List<CoordinatorRecord> records = recordsCapture.getValue();
records.add(offsetCommitTombstone);
return true;
});
when(offsetMetadataManager.cleanupExpiredOffsets("other-group-id", List.of())).thenReturn(false);
doAnswer(invocation -> {
List<CoordinatorRecord> records = recordsCapture.getValue();
records.add(groupMetadataTombstone);
return null;
}).when(groupMetadataManager).maybeDeleteGroup(eq("group-id"), recordsCapture.capture());
assertFalse(timer.contains(GROUP_EXPIRATION_KEY));
CoordinatorResult<Void, CoordinatorRecord> result = coordinator.cleanupGroupMetadata();
assertTrue(timer.contains(GROUP_EXPIRATION_KEY));
List<CoordinatorRecord> expectedRecords = Arrays.asList(offsetCommitTombstone, groupMetadataTombstone);
assertEquals(expectedRecords, result.records());
assertNull(result.response());
assertNull(result.appendFuture());
verify(groupMetadataManager, times(1)).groupIds();
verify(offsetMetadataManager, times(1)).cleanupExpiredOffsets(eq("group-id"), any());
verify(offsetMetadataManager, times(1)).cleanupExpiredOffsets(eq("other-group-id"), any());
verify(groupMetadataManager, times(1)).maybeDeleteGroup(eq("group-id"), any());
verify(groupMetadataManager, times(0)).maybeDeleteGroup(eq("other-group-id"), any());
}
@Test
public void testCleanupGroupMetadataForShareGroup() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
Time mockTime = new MockTime();
MockCoordinatorTimer<Void, CoordinatorRecord> timer = new MockCoordinatorTimer<>(mockTime);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
mockTime,
timer,
mock(GroupCoordinatorConfig.class),
mock(CoordinatorMetrics.class),
mock(CoordinatorMetricsShard.class)
);
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
ShareGroup group = new ShareGroup(snapshotRegistry, "group-id");
when(groupMetadataManager.groupIds()).thenReturn(Set.of("group-id"));
when(groupMetadataManager.group("group-id")).thenReturn(group);
assertFalse(timer.contains(GROUP_EXPIRATION_KEY));
CoordinatorResult<Void, CoordinatorRecord> result = coordinator.cleanupGroupMetadata();
assertTrue(timer.contains(GROUP_EXPIRATION_KEY));
List<CoordinatorRecord> expectedRecords = List.of();
assertEquals(expectedRecords, result.records());
assertNull(result.response());
assertNull(result.appendFuture());
verify(groupMetadataManager, times(1)).groupIds();
verify(offsetMetadataManager, times(0)).cleanupExpiredOffsets(eq("group-id"), any());
verify(groupMetadataManager, times(0)).maybeDeleteGroup(eq("group-id"), any());
}
@Test
public void testScheduleGroupSizeCounter() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
MockTime time = new MockTime();
MockCoordinatorTimer<Void, CoordinatorRecord> timer = new MockCoordinatorTimer<>(time);
GroupCoordinatorConfig config = mock(GroupCoordinatorConfig.class);
when(config.offsetsRetentionCheckIntervalMs()).thenReturn(60 * 60 * 1000L);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
timer,
config,
coordinatorMetrics,
metricsShard
);
coordinator.onLoaded(CoordinatorMetadataImage.EMPTY);
// The counter is scheduled.
assertEquals(
DEFAULT_GROUP_GAUGES_UPDATE_INTERVAL_MS,
timer.timeout(GROUP_SIZE_COUNTER_KEY).deadlineMs() - time.milliseconds()
);
// Advance the timer to trigger the update.
time.sleep(DEFAULT_GROUP_GAUGES_UPDATE_INTERVAL_MS + 1);
timer.poll();
verify(groupMetadataManager, times(1)).updateGroupSizeCounter();
// The counter is scheduled.
assertEquals(
DEFAULT_GROUP_GAUGES_UPDATE_INTERVAL_MS,
timer.timeout(GROUP_SIZE_COUNTER_KEY).deadlineMs() - time.milliseconds()
);
}
@ParameterizedTest
@EnumSource(value = TransactionResult.class)
public void testReplayEndTransactionMarker(TransactionResult result) {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
coordinator.replayEndTransactionMarker(
100L,
(short) 5,
result
);
verify(offsetMetadataManager, times(1)).replayEndTransactionMarker(
100L,
result
);
}
@Test
public void testOnPartitionsDeleted() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
List<CoordinatorRecord> records = List.of(GroupCoordinatorRecordHelpers.newOffsetCommitTombstoneRecord(
"group",
"foo",
0
));
when(offsetMetadataManager.onPartitionsDeleted(
List.of(new TopicPartition("foo", 0))
)).thenReturn(records);
CoordinatorResult<Void, CoordinatorRecord> result = coordinator.onPartitionsDeleted(
List.of(new TopicPartition("foo", 0))
);
assertEquals(records, result.records());
assertNull(result.response());
}
@Test
public void testOnUnloaded() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
Time mockTime = new MockTime();
MockCoordinatorTimer<Void, CoordinatorRecord> timer = new MockCoordinatorTimer<>(mockTime);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
mockTime,
timer,
GroupCoordinatorConfigTest.createGroupCoordinatorConfig(4096, 1000L, 24 * 60),
mock(CoordinatorMetrics.class),
mock(CoordinatorMetricsShard.class)
);
coordinator.onUnloaded();
assertEquals(0, timer.size());
verify(groupMetadataManager, times(1)).onUnloaded();
}
@Test
public void testShareGroupHeartbeat() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
RequestContext context = requestContext(ApiKeys.SHARE_GROUP_HEARTBEAT);
ShareGroupHeartbeatRequestData request = new ShareGroupHeartbeatRequestData();
CoordinatorResult<Map.Entry<ShareGroupHeartbeatResponseData, Optional<InitializeShareGroupStateParameters>>, CoordinatorRecord> result = new CoordinatorResult<>(
List.of(),
Map.entry(
new ShareGroupHeartbeatResponseData(),
Optional.empty()
)
);
when(groupMetadataManager.shareGroupHeartbeat(
context,
request
)).thenReturn(result);
assertEquals(result, coordinator.shareGroupHeartbeat(context, request));
}
@Test
public void testReplayShareGroupMetadata() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
ShareGroupMetadataKey key = new ShareGroupMetadataKey();
ShareGroupMetadataValue value = new ShareGroupMetadataValue();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.record(
key,
new ApiMessageAndVersion(value, (short) 0)
));
verify(groupMetadataManager, times(1)).replay(key, value);
}
@Test
public void testReplayShareGroupMetadataWithNullValue() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
ShareGroupMetadataKey key = new ShareGroupMetadataKey();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.tombstone(
key
));
verify(groupMetadataManager, times(1)).replay(key, null);
}
@Test
public void testReplayShareGroupMemberMetadata() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
ShareGroupMemberMetadataKey key = new ShareGroupMemberMetadataKey();
ShareGroupMemberMetadataValue value = new ShareGroupMemberMetadataValue();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.record(
key,
new ApiMessageAndVersion(value, (short) 0)
));
verify(groupMetadataManager, times(1)).replay(key, value);
}
@Test
public void testReplayShareGroupMemberMetadataWithNullValue() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
ShareGroupMemberMetadataKey key = new ShareGroupMemberMetadataKey();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.tombstone(
key
));
verify(groupMetadataManager, times(1)).replay(key, null);
}
@Test
public void testReplayConsumerGroupRegularExpression() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
ConsumerGroupRegularExpressionKey key = new ConsumerGroupRegularExpressionKey()
.setGroupId("group")
.setRegularExpression("ab*");
ConsumerGroupRegularExpressionValue value = new ConsumerGroupRegularExpressionValue()
.setTopics(Arrays.asList("abc", "abcd"))
.setVersion(10L)
.setTimestamp(12345L);
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.record(
key,
new ApiMessageAndVersion(value, (short) 0)
));
verify(groupMetadataManager, times(1)).replay(key, value);
}
@Test
public void testReplayConsumerGroupRegularExpressionTombstone() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
ConsumerGroupRegularExpressionKey key = new ConsumerGroupRegularExpressionKey()
.setGroupId("group")
.setRegularExpression("ab*");
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, CoordinatorRecord.tombstone(
key
));
verify(groupMetadataManager, times(1)).replay(key, null);
}
@Test
public void testSharePartitionDeleteRequests() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
String groupId = "share-group";
ShareGroup shareGroup = new ShareGroup(new SnapshotRegistry(mock(LogContext.class)), groupId);
when(groupMetadataManager.shareGroup(eq(groupId))).thenReturn(shareGroup);
when(groupMetadataManager.shareGroup(eq("non-share-group"))).thenThrow(GroupIdNotFoundException.class);
TopicData<PartitionIdData> topicData = new TopicData<>(Uuid.randomUuid(),
List.of(
PartitionFactory.newPartitionIdData(0),
PartitionFactory.newPartitionIdData(1)
));
DeleteShareGroupStateParameters params = new DeleteShareGroupStateParameters.Builder()
.setGroupTopicPartitionData(new GroupTopicPartitionData.Builder<PartitionIdData>()
.setGroupId(groupId)
.setTopicsData(List.of(topicData))
.build())
.build();
when(groupMetadataManager.shareGroupBuildPartitionDeleteRequest(eq(groupId), anyList())).thenReturn(Optional.of(params));
CoordinatorResult<Map<String, Map.Entry<DeleteShareGroupStateParameters, Errors>>, CoordinatorRecord> expectedResult =
new CoordinatorResult<>(List.of(), Map.of(groupId, Map.entry(params, Errors.NONE)));
assertEquals(expectedResult, coordinator.sharePartitionDeleteRequests(List.of(groupId, "non-share-group")));
verify(groupMetadataManager, times(1)).shareGroup(eq(groupId));
verify(groupMetadataManager, times(1)).shareGroup(eq("non-share-group"));
verify(groupMetadataManager, times(1)).shareGroupBuildPartitionDeleteRequest(eq(groupId), anyList());
// empty list
Mockito.reset(groupMetadataManager);
expectedResult = new CoordinatorResult<>(List.of(), Map.of());
assertEquals(
expectedResult,
coordinator.sharePartitionDeleteRequests(List.of())
);
verify(groupMetadataManager, times(0)).group(eq(groupId));
verify(groupMetadataManager, times(0)).group(eq("non-share-group"));
verify(groupMetadataManager, times(0)).shareGroupBuildPartitionDeleteRequest(eq(groupId), anyList());
}
@Test
public void testSharePartitionDeleteRequestsNonEmptyShareGroup() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
String groupId = "share-group";
ShareGroup shareGroup = mock(ShareGroup.class);
doThrow(new GroupNotEmptyException("bad stuff")).when(shareGroup).validateDeleteGroup();
when(groupMetadataManager.shareGroup(eq(groupId))).thenReturn(shareGroup);
CoordinatorResult<Map<String, Map.Entry<DeleteShareGroupStateParameters, Errors>>, CoordinatorRecord> expectedResult =
new CoordinatorResult<>(
List.of(),
Map.of(
groupId,
Map.entry(DeleteShareGroupStateParameters.EMPTY_PARAMS, Errors.forException(new GroupNotEmptyException("bad stuff")))
)
);
assertEquals(expectedResult, coordinator.sharePartitionDeleteRequests(List.of(groupId)));
verify(groupMetadataManager, times(1)).shareGroup(eq(groupId));
// Not called because of NON-EMPTY group.
verify(groupMetadataManager, times(0)).shareGroupBuildPartitionDeleteRequest(eq(groupId), anyList());
// empty list
Mockito.reset(groupMetadataManager);
expectedResult = new CoordinatorResult<>(List.of(), Map.of());
assertEquals(
expectedResult,
coordinator.sharePartitionDeleteRequests(List.of())
);
verify(groupMetadataManager, times(0)).group(eq("share-group"));
verify(groupMetadataManager, times(0)).shareGroupBuildPartitionDeleteRequest(eq(groupId), anyList());
}
@Test
public void testInitiateDeleteShareGroupOffsetsGroupNotFound() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
String groupId = "share-group";
DeleteShareGroupOffsetsRequestData requestData = new DeleteShareGroupOffsetsRequestData()
.setGroupId(groupId)
.setTopics(List.of(new DeleteShareGroupOffsetsRequestData.DeleteShareGroupOffsetsRequestTopic()
.setTopicName("topic-1")
));
GroupIdNotFoundException exception = new GroupIdNotFoundException("group Id not found");
doThrow(exception).when(groupMetadataManager).shareGroup(eq(groupId));
CoordinatorResult<GroupCoordinatorShard.DeleteShareGroupOffsetsResultHolder, CoordinatorRecord> expectedResult =
new CoordinatorResult<>(
List.of(),
new GroupCoordinatorShard.DeleteShareGroupOffsetsResultHolder(Errors.forException(exception).code(), exception.getMessage())
);
assertEquals(expectedResult, coordinator.initiateDeleteShareGroupOffsets(groupId, requestData));
verify(groupMetadataManager, times(1)).shareGroup(eq(groupId));
// Not called because of Group not found.
verify(groupMetadataManager, times(0)).sharePartitionsEligibleForOffsetDeletion(any(), any(), any(), any());
}
@Test
public void testInitiateDeleteShareGroupOffsetsNonEmptyShareGroup() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
String groupId = "share-group";
DeleteShareGroupOffsetsRequestData requestData = new DeleteShareGroupOffsetsRequestData()
.setGroupId(groupId)
.setTopics(List.of(new DeleteShareGroupOffsetsRequestData.DeleteShareGroupOffsetsRequestTopic()
.setTopicName("topic-1")
));
ShareGroup shareGroup = mock(ShareGroup.class);
GroupNotEmptyException exception = new GroupNotEmptyException("group is not empty");
doThrow(exception).when(shareGroup).validateDeleteGroup();
when(groupMetadataManager.shareGroup(eq(groupId))).thenReturn(shareGroup);
CoordinatorResult<GroupCoordinatorShard.DeleteShareGroupOffsetsResultHolder, CoordinatorRecord> expectedResult =
new CoordinatorResult<>(
List.of(),
new GroupCoordinatorShard.DeleteShareGroupOffsetsResultHolder(Errors.forException(exception).code(), exception.getMessage())
);
assertEquals(expectedResult, coordinator.initiateDeleteShareGroupOffsets(groupId, requestData));
verify(groupMetadataManager, times(1)).shareGroup(eq(groupId));
// Not called because of Group not found.
verify(groupMetadataManager, times(0)).sharePartitionsEligibleForOffsetDeletion(any(), any(), any(), any());
}
@Test
public void testInitiateDeleteShareGroupOffsetsEmptyResult() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
String groupId = "share-group";
String topicName = "topic-1";
Uuid topicId = Uuid.randomUuid();
DeleteShareGroupOffsetsRequestData requestData = new DeleteShareGroupOffsetsRequestData()
.setGroupId(groupId)
.setTopics(List.of(new DeleteShareGroupOffsetsRequestData.DeleteShareGroupOffsetsRequestTopic()
.setTopicName(topicName)
));
ShareGroup shareGroup = mock(ShareGroup.class);
doNothing().when(shareGroup).validateDeleteGroup();
when(groupMetadataManager.shareGroup(eq(groupId))).thenReturn(shareGroup);
List<DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic> errorTopicResponseList = List.of(
new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic()
.setTopicName(topicName)
.setTopicId(topicId)
.setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code())
.setErrorMessage(Errors.UNKNOWN_TOPIC_OR_PARTITION.message())
);
List<CoordinatorRecord> records = new ArrayList<>();
when(groupMetadataManager.sharePartitionsEligibleForOffsetDeletion(eq(groupId), eq(requestData), any(), any()))
.thenAnswer(invocation -> {
List<DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic> inputList = invocation.getArgument(2);
inputList.addAll(errorTopicResponseList);
return List.of();
});
CoordinatorResult<GroupCoordinatorShard.DeleteShareGroupOffsetsResultHolder, CoordinatorRecord> expectedResult =
new CoordinatorResult<>(
records,
new GroupCoordinatorShard.DeleteShareGroupOffsetsResultHolder(Errors.NONE.code(), null, errorTopicResponseList)
);
assertEquals(expectedResult, coordinator.initiateDeleteShareGroupOffsets(groupId, requestData));
verify(groupMetadataManager, times(1)).shareGroup(eq(groupId));
verify(groupMetadataManager, times(1)).sharePartitionsEligibleForOffsetDeletion(any(), any(), any(), any());
}
@Test
public void testInitiateDeleteShareGroupOffsetsSuccess() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
String groupId = "share-group";
String topicName1 = "topic-1";
Uuid topicId1 = Uuid.randomUuid();
String topicName2 = "topic-2";
Uuid topicId2 = Uuid.randomUuid();
int partition = 0;
DeleteShareGroupOffsetsRequestData requestData = new DeleteShareGroupOffsetsRequestData()
.setGroupId(groupId)
.setTopics(List.of(
new DeleteShareGroupOffsetsRequestData.DeleteShareGroupOffsetsRequestTopic()
.setTopicName(topicName1),
new DeleteShareGroupOffsetsRequestData.DeleteShareGroupOffsetsRequestTopic()
.setTopicName(topicName2)
));
ShareGroup shareGroup = mock(ShareGroup.class);
doNothing().when(shareGroup).validateDeleteGroup();
when(groupMetadataManager.shareGroup(eq(groupId))).thenReturn(shareGroup);
List<DeleteShareGroupStateRequestData.DeleteStateData> deleteShareGroupStateRequestTopicsData =
List.of(
new DeleteShareGroupStateRequestData.DeleteStateData()
.setTopicId(topicId1)
.setPartitions(List.of(
new DeleteShareGroupStateRequestData.PartitionData()
.setPartition(partition)
)),
new DeleteShareGroupStateRequestData.DeleteStateData()
.setTopicId(topicId2)
.setPartitions(List.of(
new DeleteShareGroupStateRequestData.PartitionData()
.setPartition(partition)
))
);
List<CoordinatorRecord> expectedRecords = List.of(
GroupCoordinatorRecordHelpers.newShareGroupStatePartitionMetadataRecord(
groupId,
Map.of(),
Map.of(),
Map.of(
topicId1, topicName1,
topicId2, topicName2
)
)
);
when(groupMetadataManager.sharePartitionsEligibleForOffsetDeletion(eq(groupId), eq(requestData), any(), any()))
.thenAnswer(invocation -> {
List<CoordinatorRecord> records = invocation.getArgument(3);
records.addAll(expectedRecords);
return deleteShareGroupStateRequestTopicsData;
});
CoordinatorResult<GroupCoordinatorShard.DeleteShareGroupOffsetsResultHolder, CoordinatorRecord> expectedResult =
new CoordinatorResult<>(
expectedRecords,
new GroupCoordinatorShard.DeleteShareGroupOffsetsResultHolder(
Errors.NONE.code(),
null,
List.of(),
DeleteShareGroupStateParameters.from(
new DeleteShareGroupStateRequestData()
.setGroupId(requestData.groupId())
.setTopics(deleteShareGroupStateRequestTopicsData)
))
);
assertEquals(expectedResult, coordinator.initiateDeleteShareGroupOffsets(groupId, requestData));
verify(groupMetadataManager, times(1)).shareGroup(eq(groupId));
verify(groupMetadataManager, times(1)).sharePartitionsEligibleForOffsetDeletion(any(), any(), any(), any());
}
@Test
public void testInitiateDeleteShareGroupOffsetsSuccessWithErrorTopics() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
String groupId = "share-group";
String topicName1 = "topic-1";
Uuid topicId1 = Uuid.randomUuid();
String topicName2 = "topic-2";
Uuid topicId2 = Uuid.randomUuid();
int partition = 0;
DeleteShareGroupOffsetsRequestData requestData = new DeleteShareGroupOffsetsRequestData()
.setGroupId(groupId)
.setTopics(List.of(
new DeleteShareGroupOffsetsRequestData.DeleteShareGroupOffsetsRequestTopic()
.setTopicName(topicName1),
new DeleteShareGroupOffsetsRequestData.DeleteShareGroupOffsetsRequestTopic()
.setTopicName(topicName2)
));
ShareGroup shareGroup = mock(ShareGroup.class);
doNothing().when(shareGroup).validateDeleteGroup();
when(groupMetadataManager.shareGroup(eq(groupId))).thenReturn(shareGroup);
List<DeleteShareGroupStateRequestData.DeleteStateData> deleteShareGroupStateRequestTopicsData =
List.of(
new DeleteShareGroupStateRequestData.DeleteStateData()
.setTopicId(topicId1)
.setPartitions(List.of(
new DeleteShareGroupStateRequestData.PartitionData()
.setPartition(partition)
))
);
List<DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic> errorTopicResponseList =
List.of(
new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic()
.setTopicName(topicName2)
.setTopicId(topicId2)
.setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code())
.setErrorMessage(Errors.UNKNOWN_TOPIC_OR_PARTITION.message())
);
List<CoordinatorRecord> expectedRecord = List.of(
GroupCoordinatorRecordHelpers.newShareGroupStatePartitionMetadataRecord(
groupId,
Map.of(),
Map.of(),
Map.of(
topicId1, topicName1
)
)
);
when(groupMetadataManager.sharePartitionsEligibleForOffsetDeletion(eq(groupId), eq(requestData), any(), any()))
.thenAnswer(invocation -> {
List<DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic> inputList = invocation.getArgument(2);
inputList.addAll(errorTopicResponseList);
List<CoordinatorRecord> records = invocation.getArgument(3);
records.addAll(expectedRecord);
return deleteShareGroupStateRequestTopicsData;
});
CoordinatorResult<GroupCoordinatorShard.DeleteShareGroupOffsetsResultHolder, CoordinatorRecord> expectedResult =
new CoordinatorResult<>(
expectedRecord,
new GroupCoordinatorShard.DeleteShareGroupOffsetsResultHolder(
Errors.NONE.code(),
null,
errorTopicResponseList,
DeleteShareGroupStateParameters.from(
new DeleteShareGroupStateRequestData()
.setGroupId(requestData.groupId())
.setTopics(deleteShareGroupStateRequestTopicsData)
))
);
assertEquals(expectedResult, coordinator.initiateDeleteShareGroupOffsets(groupId, requestData));
verify(groupMetadataManager, times(1)).shareGroup(eq(groupId));
verify(groupMetadataManager, times(1)).sharePartitionsEligibleForOffsetDeletion(any(), any(), any(), any());
}
@Test
public void testCompleteDeleteShareGroupOffsetsSuccess() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
String groupId = "share-group";
String topicName1 = "topic-1";
Uuid topicId1 = Uuid.randomUuid();
String topicName2 = "topic-2";
Uuid topicId2 = Uuid.randomUuid();
Map<Uuid, String> topics = Map.of(
topicId1, topicName1,
topicId2, topicName2
);
ShareGroup shareGroup = mock(ShareGroup.class);
doNothing().when(shareGroup).validateDeleteGroup();
when(groupMetadataManager.shareGroup(eq(groupId))).thenReturn(shareGroup);
List<DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic> resultTopics = List.of(
new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic()
.setTopicId(topicId1)
.setTopicName(topicName1)
.setErrorCode(Errors.NONE.code())
.setErrorMessage(null),
new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic()
.setTopicId(topicId2)
.setTopicName(topicName2)
.setErrorCode(Errors.NONE.code())
.setErrorMessage(null)
);
List<CoordinatorRecord> expectedRecords = List.of(
GroupCoordinatorRecordHelpers.newShareGroupStatePartitionMetadataRecord(
groupId,
Map.of(),
Map.of(),
Map.of()
)
);
when(groupMetadataManager.completeDeleteShareGroupOffsets(eq(groupId), eq(topics), any()))
.thenAnswer(invocation -> {
List<CoordinatorRecord> records = invocation.getArgument(2);
records.addAll(expectedRecords);
return resultTopics;
});
CoordinatorResult<DeleteShareGroupOffsetsResponseData, CoordinatorRecord> expectedResult =
new CoordinatorResult<>(
expectedRecords,
new DeleteShareGroupOffsetsResponseData()
.setResponses(resultTopics)
);
assertEquals(expectedResult, coordinator.completeDeleteShareGroupOffsets(groupId, topics, List.of()));
verify(groupMetadataManager, times(1)).completeDeleteShareGroupOffsets(any(), any(), any());
}
@Test
public void testCompleteDeleteShareGroupOffsetsSuccessWithErrorTopics() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
String groupId = "share-group";
String topicName1 = "topic-1";
Uuid topicId1 = Uuid.randomUuid();
String topicName2 = "topic-2";
Uuid topicId2 = Uuid.randomUuid();
String topicName3 = "topic-3";
Uuid topicId3 = Uuid.randomUuid();
Map<Uuid, String> topics = Map.of(
topicId1, topicName1,
topicId2, topicName2
);
ShareGroup shareGroup = mock(ShareGroup.class);
doNothing().when(shareGroup).validateDeleteGroup();
when(groupMetadataManager.shareGroup(eq(groupId))).thenReturn(shareGroup);
List<DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic> resultTopics = List.of(
new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic()
.setTopicId(topicId1)
.setTopicName(topicName1)
.setErrorCode(Errors.NONE.code())
.setErrorMessage(null),
new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic()
.setTopicId(topicId2)
.setTopicName(topicName2)
.setErrorCode(Errors.NONE.code())
.setErrorMessage(null)
);
List<CoordinatorRecord> expectedRecords = List.of(
GroupCoordinatorRecordHelpers.newShareGroupStatePartitionMetadataRecord(
groupId,
Map.of(),
Map.of(),
Map.of()
)
);
when(groupMetadataManager.completeDeleteShareGroupOffsets(eq(groupId), eq(topics), any()))
.thenAnswer(invocation -> {
List<CoordinatorRecord> records = invocation.getArgument(2);
records.addAll(expectedRecords);
return resultTopics;
});
List<DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic> errorTopicResponseList = new ArrayList<>();
errorTopicResponseList.add(
new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic()
.setTopicId(topicId3)
.setTopicName(topicName3)
.setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code())
.setErrorMessage(Errors.UNKNOWN_TOPIC_OR_PARTITION.message())
);
List<DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic> expectedResultTopics = new ArrayList<>(resultTopics);
expectedResultTopics.addAll(errorTopicResponseList);
CoordinatorResult<DeleteShareGroupOffsetsResponseData, CoordinatorRecord> expectedResult =
new CoordinatorResult<>(
expectedRecords,
new DeleteShareGroupOffsetsResponseData()
.setResponses(expectedResultTopics)
);
assertEquals(expectedResult, coordinator.completeDeleteShareGroupOffsets(groupId, topics, errorTopicResponseList));
verify(groupMetadataManager, times(1)).completeDeleteShareGroupOffsets(any(), any(), any());
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testFetchOffsets(boolean fetchAllOffsets) {
var offsetMetadataManager = mock(OffsetMetadataManager.class);
var coordinator = new GroupCoordinatorShard(
new LogContext(),
mock(GroupMetadataManager.class),
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
mock(CoordinatorMetrics.class),
mock(CoordinatorMetricsShard.class)
);
var request = new OffsetFetchRequestData.OffsetFetchRequestGroup()
.setGroupId("foo");
if (fetchAllOffsets) {
request.setTopics(null);
} else {
request.setTopics(List.of(new OffsetFetchRequestData.OffsetFetchRequestTopics()
.setName("foo")
.setPartitionIndexes(List.of(0))
));
}
var result = new OffsetFetchResponseData.OffsetFetchResponseGroup()
.setGroupId("foo");
if (fetchAllOffsets) {
when(offsetMetadataManager.fetchAllOffsets(
request,
Long.MAX_VALUE
)).thenReturn(result);
} else {
when(offsetMetadataManager.fetchOffsets(
request,
Long.MAX_VALUE
)).thenReturn(result);
}
assertEquals(result, coordinator.fetchOffsets(request, Long.MAX_VALUE));
}
}
|
GroupCoordinatorShardTest
|
java
|
quarkusio__quarkus
|
extensions/keycloak-admin-rest-client/deployment/src/test/java/io/quarkus/keycloak/admin/rest/client/deployment/test/KeycloakAdminClientInjectionDevServicesTest.java
|
{
"start": 2056,
"end": 2293
}
|
class ____ {
@Inject
Keycloak keycloak;
@GET
@Path("/roles")
public List<RoleRepresentation> getRoles() {
return keycloak.realm("quarkus").roles().list();
}
}
}
|
AdminResource
|
java
|
assertj__assertj-core
|
assertj-core/src/main/java/org/assertj/core/api/UniversalComparableAssert.java
|
{
"start": 701,
"end": 968
}
|
class ____ better compatibility than {@link ComparableAssert} and related implementations, currently limited
* due to the upper bound of {@link ComparableAssert}'s type parameters.
*
* @see Assertions#assertThatComparable(Comparable)
* @since 3.23.0
*/
public
|
offers
|
java
|
apache__flink
|
flink-filesystems/flink-s3-fs-presto/src/main/java/org/apache/flink/fs/s3presto/S3PFileSystemFactory.java
|
{
"start": 942,
"end": 1075
}
|
class ____ extends S3FileSystemFactory {
@Override
public String getScheme() {
return "s3p";
}
}
|
S3PFileSystemFactory
|
java
|
quarkusio__quarkus
|
extensions/security/deployment/src/main/java/io/quarkus/security/deployment/PermissionSecurityChecks.java
|
{
"start": 2707,
"end": 3066
}
|
interface ____ {
DotName PERMISSION_CHECKER_NAME = DotName.createSimple(PermissionChecker.class);
DotName BLOCKING = DotName.createSimple(Blocking.class);
Map<MethodInfo, SecurityCheck> getMethodSecurityChecks();
Map<DotName, SecurityCheck> getClassNameSecurityChecks();
Set<String> permissionClasses();
final
|
PermissionSecurityChecks
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/annotation/web/configurers/SessionManagementConfigurerTests.java
|
{
"start": 29697,
"end": 30303
}
|
class ____ {
private static SessionRegistry SESSION_REGISTRY_ONE;
private static SessionRegistry SESSION_REGISTRY_TWO;
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.sessionManagement((management) -> management
.maximumSessions(1));
return http.build();
// @formatter:on
}
@Bean
SessionRegistry sessionRegistryOne() {
return SESSION_REGISTRY_ONE;
}
@Bean
SessionRegistry sessionRegistryTwo() {
return SESSION_REGISTRY_TWO;
}
}
@Configuration
@EnableWebSecurity
static
|
SessionRegistryTwoBeansConfig
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/inference/SettingsConfigurationTests.java
|
{
"start": 1136,
"end": 5636
}
|
class ____ extends ESTestCase {
public void testToXContent() throws IOException {
String content = XContentHelper.stripWhitespace("""
{
"description": "Wow, this tooltip is useful.",
"label": "Very important field",
"required": true,
"sensitive": false,
"updatable": true,
"type": "str",
"supported_task_types": ["text_embedding", "completion", "sparse_embedding", "rerank"]
}
""");
SettingsConfiguration configuration = SettingsConfiguration.fromXContentBytes(new BytesArray(content), XContentType.JSON);
boolean humanReadable = true;
BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable);
SettingsConfiguration parsed;
try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) {
parsed = SettingsConfiguration.fromXContent(parser);
}
assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON);
}
public void testToXContent_WithNumericSelectOptions() throws IOException {
String content = XContentHelper.stripWhitespace("""
{
"description": "Wow, this tooltip is useful.",
"label": "Very important field",
"required": true,
"sensitive": false,
"updatable": true,
"type": "str",
"supported_task_types": ["text_embedding"]
}
""");
SettingsConfiguration configuration = SettingsConfiguration.fromXContentBytes(new BytesArray(content), XContentType.JSON);
boolean humanReadable = true;
BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable);
SettingsConfiguration parsed;
try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) {
parsed = SettingsConfiguration.fromXContent(parser);
}
assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON);
}
public void testToXContentCrawlerConfig_WithNullValue() throws IOException {
String content = XContentHelper.stripWhitespace("""
{
"label": "nextSyncConfig",
"value": null,
"supported_task_types": ["text_embedding", "completion", "sparse_embedding", "rerank"]
}
""");
SettingsConfiguration configuration = SettingsConfiguration.fromXContentBytes(new BytesArray(content), XContentType.JSON);
boolean humanReadable = true;
BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable);
SettingsConfiguration parsed;
try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) {
parsed = SettingsConfiguration.fromXContent(parser);
}
assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON);
}
public void testToMap() {
SettingsConfiguration configField = SettingsConfigurationTestUtils.getRandomSettingsConfigurationField();
Map<String, Object> configFieldAsMap = configField.toMap();
assertThat(configFieldAsMap.get("default_value"), equalTo(configField.getDefaultValue()));
if (configField.getDescription() != null) {
assertThat(configFieldAsMap.get("description"), equalTo(configField.getDescription()));
} else {
assertFalse(configFieldAsMap.containsKey("description"));
}
assertThat(configFieldAsMap.get("label"), equalTo(configField.getLabel()));
assertThat(configFieldAsMap.get("required"), equalTo(configField.isRequired()));
assertThat(configFieldAsMap.get("sensitive"), equalTo(configField.isSensitive()));
assertThat(configFieldAsMap.get("updatable"), equalTo(configField.isUpdatable()));
if (configField.getType() != null) {
assertThat(configFieldAsMap.get("type"), equalTo(configField.getType().toString()));
} else {
assertFalse(configFieldAsMap.containsKey("type"));
}
}
}
|
SettingsConfigurationTests
|
java
|
spring-projects__spring-security
|
core/src/main/java/org/springframework/security/authorization/AuthenticatedReactiveAuthorizationManager.java
|
{
"start": 1247,
"end": 2531
}
|
class ____<T> implements ReactiveAuthorizationManager<T> {
private AuthenticationTrustResolver authTrustResolver = new AuthenticationTrustResolverImpl();
AuthenticatedReactiveAuthorizationManager() {
}
@Override
public Mono<AuthorizationResult> authorize(Mono<Authentication> authentication, T object) {
return authentication.filter(this::isNotAnonymous)
.map(this::getAuthorizationDecision)
.defaultIfEmpty(new AuthorizationDecision(false));
}
private AuthorizationResult getAuthorizationDecision(Authentication authentication) {
return new AuthorizationDecision(authentication.isAuthenticated());
}
/**
* Verify (via {@link AuthenticationTrustResolver}) that the given authentication is
* not anonymous.
* @param authentication to be checked
* @return <code>true</code> if not anonymous, otherwise <code>false</code>.
*/
private boolean isNotAnonymous(Authentication authentication) {
return !this.authTrustResolver.isAnonymous(authentication);
}
/**
* Gets an instance of {@link AuthenticatedReactiveAuthorizationManager}
* @param <T>
* @return
*/
public static <T> AuthenticatedReactiveAuthorizationManager<T> authenticated() {
return new AuthenticatedReactiveAuthorizationManager<>();
}
}
|
AuthenticatedReactiveAuthorizationManager
|
java
|
reactor__reactor-core
|
reactor-core/src/main/java/reactor/core/publisher/ParallelSource.java
|
{
"start": 10285,
"end": 11405
}
|
class ____<T> implements InnerProducer<T> {
final ParallelSourceMain<T> parent;
final int index;
final int length;
ParallelSourceInner(ParallelSourceMain<T> parent, int index, int length) {
this.index = index;
this.length = length;
this.parent = parent;
}
@Override
public CoreSubscriber<? super T> actual() {
return parent.subscribers[index];
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.PARENT) return parent;
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return InnerProducer.super.scanUnsafe(key);
}
@Override
public void request(long n) {
if (Operators.validate(n)) {
AtomicLongArray ra = parent.requests;
for (;;) {
long r = ra.get(index);
if (r == Long.MAX_VALUE) {
return;
}
long u = Operators.addCap(r, n);
if (ra.compareAndSet(index, r, u)) {
break;
}
}
if (parent.subscriberCount == length) {
parent.drain();
}
}
}
@Override
public void cancel() {
parent.cancel();
}
}
}
}
|
ParallelSourceInner
|
java
|
elastic__elasticsearch
|
modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupUtility.java
|
{
"start": 3506,
"end": 3701
}
|
class ____ def and excluding array
* types </li>
*
* <li> - type (Class) - a painless type represented by a java
|
including
|
java
|
spring-projects__spring-security
|
core/src/test/java/org/springframework/security/authentication/ReactiveUserDetailsServiceAuthenticationManagerTests.java
|
{
"start": 1747,
"end": 5556
}
|
class ____ {
@Mock
ReactiveUserDetailsService repository;
@Mock
PasswordEncoder passwordEncoder;
UserDetailsRepositoryReactiveAuthenticationManager manager;
String username;
String password;
@BeforeEach
public void setup() {
this.manager = new UserDetailsRepositoryReactiveAuthenticationManager(this.repository);
this.username = "user";
this.password = "pass";
}
@Test
public void constructorNullUserDetailsService() {
assertThatIllegalArgumentException()
.isThrownBy(() -> new UserDetailsRepositoryReactiveAuthenticationManager(null));
}
@Test
public void authenticateWhenUserNotFoundThenBadCredentials() {
given(this.repository.findByUsername(this.username)).willReturn(Mono.empty());
UsernamePasswordAuthenticationToken token = UsernamePasswordAuthenticationToken.unauthenticated(this.username,
this.password);
Mono<Authentication> authentication = this.manager.authenticate(token);
// @formatter:off
StepVerifier.create(authentication)
.expectError(BadCredentialsException.class)
.verify();
// @formatter:on
}
@Test
public void authenticateWhenPasswordNotEqualThenBadCredentials() {
// @formatter:off
UserDetails user = PasswordEncodedUser.withUsername(this.username)
.password(this.password)
.roles("USER")
.build();
// @formatter:on
given(this.repository.findByUsername(user.getUsername())).willReturn(Mono.just(user));
UsernamePasswordAuthenticationToken token = UsernamePasswordAuthenticationToken.unauthenticated(this.username,
this.password + "INVALID");
Mono<Authentication> authentication = this.manager.authenticate(token);
// @formatter:off
StepVerifier.create(authentication)
.expectError(BadCredentialsException.class)
.verify();
// @formatter:on
}
@Test
public void authenticateWhenSuccessThenSuccess() {
// @formatter:off
UserDetails user = PasswordEncodedUser.withUsername(this.username)
.password(this.password)
.roles("USER")
.build();
// @formatter:on
given(this.repository.findByUsername(user.getUsername())).willReturn(Mono.just(user));
UsernamePasswordAuthenticationToken token = UsernamePasswordAuthenticationToken.unauthenticated(this.username,
this.password);
Authentication authentication = this.manager.authenticate(token).block();
assertThat(authentication).isEqualTo(authentication);
}
@Test
public void authenticateWhenPasswordEncoderAndSuccessThenSuccess() {
this.manager.setPasswordEncoder(this.passwordEncoder);
given(this.passwordEncoder.matches(any(), any())).willReturn(true);
User user = new User(this.username, this.password, AuthorityUtils.createAuthorityList("ROLE_USER"));
given(this.repository.findByUsername(user.getUsername())).willReturn(Mono.just(user));
UsernamePasswordAuthenticationToken token = UsernamePasswordAuthenticationToken.unauthenticated(this.username,
this.password);
Authentication authentication = this.manager.authenticate(token).block();
assertThat(authentication).isEqualTo(authentication);
}
@Test
public void authenticateWhenPasswordEncoderAndFailThenFail() {
this.manager.setPasswordEncoder(this.passwordEncoder);
given(this.passwordEncoder.matches(any(), any())).willReturn(false);
User user = new User(this.username, this.password, AuthorityUtils.createAuthorityList("ROLE_USER"));
given(this.repository.findByUsername(user.getUsername())).willReturn(Mono.just(user));
UsernamePasswordAuthenticationToken token = UsernamePasswordAuthenticationToken.unauthenticated(this.username,
this.password);
Mono<Authentication> authentication = this.manager.authenticate(token);
// @formatter:off
StepVerifier.create(authentication)
.expectError(BadCredentialsException.class)
.verify();
// @formatter:on
}
}
|
ReactiveUserDetailsServiceAuthenticationManagerTests
|
java
|
apache__flink
|
flink-tests/src/test/java/org/apache/flink/test/recovery/SimpleRecoveryExponentialDelayRestartStrategyITBase.java
|
{
"start": 1344,
"end": 2775
}
|
class ____ extends SimpleRecoveryITCaseBase {
@ClassRule
public static final MiniClusterWithClientResource MINI_CLUSTER_RESOURCE =
new MiniClusterWithClientResource(
new MiniClusterResourceConfiguration.Builder()
.setConfiguration(getConfiguration())
.setNumberTaskManagers(2)
.setNumberSlotsPerTaskManager(2)
.build());
private static Configuration getConfiguration() {
Configuration config = new Configuration();
config.set(RestartStrategyOptions.RESTART_STRATEGY, EXPONENTIAL_DELAY.getMainValue());
config.set(
RestartStrategyOptions.RESTART_STRATEGY_EXPONENTIAL_DELAY_INITIAL_BACKOFF,
Duration.ofMillis(5));
config.set(
RestartStrategyOptions.RESTART_STRATEGY_EXPONENTIAL_DELAY_MAX_BACKOFF,
Duration.ofMillis(100));
config.set(
RestartStrategyOptions.RESTART_STRATEGY_EXPONENTIAL_DELAY_BACKOFF_MULTIPLIER, 2D);
config.set(
RestartStrategyOptions.RESTART_STRATEGY_EXPONENTIAL_DELAY_RESET_BACKOFF_THRESHOLD,
Duration.ofMillis(1000));
config.set(RestartStrategyOptions.RESTART_STRATEGY_EXPONENTIAL_DELAY_JITTER_FACTOR, 0.2);
return config;
}
}
|
SimpleRecoveryExponentialDelayRestartStrategyITBase
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/ReleaseSharedCacheResourceResponsePBImpl.java
|
{
"start": 1058,
"end": 1988
}
|
class ____ extends
ReleaseSharedCacheResourceResponse {
ReleaseSharedCacheResourceResponseProto proto =
ReleaseSharedCacheResourceResponseProto.getDefaultInstance();
ReleaseSharedCacheResourceResponseProto.Builder builder = null;
boolean viaProto = false;
public ReleaseSharedCacheResourceResponsePBImpl() {
builder = ReleaseSharedCacheResourceResponseProto.newBuilder();
}
public ReleaseSharedCacheResourceResponsePBImpl(
ReleaseSharedCacheResourceResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public ReleaseSharedCacheResourceResponseProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = ReleaseSharedCacheResourceResponseProto.newBuilder(proto);
}
viaProto = false;
}
}
|
ReleaseSharedCacheResourceResponsePBImpl
|
java
|
FasterXML__jackson-databind
|
src/main/java/tools/jackson/databind/annotation/JsonAppend.java
|
{
"start": 1795,
"end": 3027
}
|
interface ____
{
/**
* Name of attribute of which value to serialize. Is also used as the
* name of external property to write, unless overridden by
* assigning a value for {@link #propName()}.
*/
public String value();
/**
* Name to use for serializing value of the attribute; if not defined,
* {@link #value} will be used instead.
*/
public String propName() default "";
/**
* Optional namespace to use; only relevant for data formats that use
* namespaces (like XML).
*/
public String propNamespace() default "";
/**
* When to include attribute-property. Default value indicates that
* property should only be written if specified attribute has a non-null
* value.
*/
public JsonInclude.Include include() default JsonInclude.Include.NON_NULL;
/**
* Metadata about property, similar to
* {@link com.fasterxml.jackson.annotation.JsonProperty#required()}.
*/
public boolean required() default false;
}
/**
* Definition of a single general virtual property.
*/
public @
|
Attr
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/SetStepInfoUpdateTask.java
|
{
"start": 1000,
"end": 3379
}
|
class ____ extends IndexLifecycleClusterStateUpdateTask {
private static final Logger logger = LogManager.getLogger(SetStepInfoUpdateTask.class);
private final String policy;
private final ToXContentObject stepInfo;
public SetStepInfoUpdateTask(ProjectId projectId, Index index, String policy, Step.StepKey currentStepKey, ToXContentObject stepInfo) {
super(projectId, index, currentStepKey);
this.policy = policy;
this.stepInfo = stepInfo;
}
String getPolicy() {
return policy;
}
ToXContentObject getStepInfo() {
return stepInfo;
}
@Override
protected ClusterState doExecute(ProjectState currentState) throws IOException {
IndexMetadata idxMeta = currentState.metadata().index(index);
if (idxMeta == null) {
// Index must have been since deleted, ignore it
return currentState.cluster();
}
LifecycleExecutionState lifecycleState = idxMeta.getLifecycleExecutionState();
if (policy.equals(idxMeta.getLifecyclePolicyName()) && Objects.equals(currentStepKey, Step.getCurrentStepKey(lifecycleState))) {
return currentState.updatedState(IndexLifecycleTransition.addStepInfoToProject(index, currentState.metadata(), stepInfo));
} else {
// either the policy has changed or the step is now
// not the same as when we submitted the update task. In
// either case we don't want to do anything now
return currentState.cluster();
}
}
@Override
public void handleFailure(Exception e) {
logger.warn(
() -> format("policy [%s] for index [%s] failed trying to set step info for step [%s].", policy, index, currentStepKey),
e
);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
SetStepInfoUpdateTask that = (SetStepInfoUpdateTask) o;
return index.equals(that.index)
&& policy.equals(that.policy)
&& currentStepKey.equals(that.currentStepKey)
&& Objects.equals(stepInfo, that.stepInfo);
}
@Override
public int hashCode() {
return Objects.hash(index, policy, currentStepKey, stepInfo);
}
public static
|
SetStepInfoUpdateTask
|
java
|
elastic__elasticsearch
|
modules/apm/src/test/java/org/elasticsearch/telemetry/apm/RecordingOtelMeter.java
|
{
"start": 14403,
"end": 14458
}
|
interface ____ {}
// Gauges
private
|
OtelInstrument
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/bean/override/convention/TestBeanForMultipleNestingLevelsIntegrationTests.java
|
{
"start": 1548,
"end": 1771
}
|
class ____ {
@TestBean(name = "field1", methodName = "testField1")
String field1;
@Test
void test() {
assertThat(field0).isEqualTo("zero");
assertThat(field1).isEqualTo("one");
}
@Nested
|
NestedLevel1Tests
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
|
{
"start": 4290,
"end": 4883
}
|
class ____.
* @param allowUpdate True if update of exiting record is allowed.
* @param errorIfExists True if an error should be returned when inserting
* an existing record. Only used if allowUpdate = false.
* @return The result of the putAll operation.
* @throws IOException Throws exception if unable to query the data store.
*/
@AtMostOnce
<T extends BaseRecord> StateStoreOperationResult putAll(
List<T> records, boolean allowUpdate, boolean errorIfExists)
throws IOException;
/**
* Remove a single record.
*
* @param <T> Record
|
clazz
|
java
|
spring-projects__spring-boot
|
module/spring-boot-neo4j/src/test/java/org/springframework/boot/neo4j/docker/compose/Neo4jEnvironmentTests.java
|
{
"start": 1048,
"end": 2264
}
|
class ____ {
@Test
void whenNeo4jAuthAndPasswordAreNullThenAuthTokenIsNull() {
Neo4jEnvironment environment = new Neo4jEnvironment(Collections.emptyMap());
assertThat(environment.getAuthToken()).isNull();
}
@Test
void whenNeo4jAuthIsNoneThenAuthTokenIsNone() {
Neo4jEnvironment environment = new Neo4jEnvironment(Map.of("NEO4J_AUTH", "none"));
assertThat(environment.getAuthToken()).isEqualTo(AuthTokens.none());
}
@Test
void whenNeo4jAuthIsNeo4jSlashPasswordThenAuthTokenIsBasic() {
Neo4jEnvironment environment = new Neo4jEnvironment(Map.of("NEO4J_AUTH", "neo4j/custom-password"));
assertThat(environment.getAuthToken()).isEqualTo(AuthTokens.basic("neo4j", "custom-password"));
}
@Test
void whenNeo4jAuthIsNeitherNoneNorNeo4jSlashPasswordEnvironmentCreationThrows() {
assertThatIllegalStateException()
.isThrownBy(() -> new Neo4jEnvironment(Map.of("NEO4J_AUTH", "graphdb/custom-password")));
}
@Test
void whenNeo4jPasswordIsProvidedThenAuthTokenIsBasic() {
Neo4jEnvironment environment = new Neo4jEnvironment(Map.of("NEO4J_PASSWORD", "custom-password"));
assertThat(environment.getAuthToken()).isEqualTo(AuthTokens.basic("neo4j", "custom-password"));
}
}
|
Neo4jEnvironmentTests
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/instrument/classloading/WeavingTransformer.java
|
{
"start": 1645,
"end": 1848
}
|
class ____.
* @param classLoader the ClassLoader to build a transformer for
*/
public WeavingTransformer(@Nullable ClassLoader classLoader) {
this.classLoader = classLoader;
}
/**
* Add a
|
loader
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/metrics/TraceSetupTest.java
|
{
"start": 15790,
"end": 16128
}
|
class ____ implements TraceReporterFactory {
static final TraceReporter REPORTER = new TestTraceReporter();
@Override
public TraceReporter createTraceReporter(Properties config) {
return REPORTER;
}
}
/** Factory that always throws an error. */
public static
|
TestTraceReporterFactory
|
java
|
google__guava
|
android/guava/src/com/google/common/cache/LoadingCache.java
|
{
"start": 1776,
"end": 8517
}
|
interface ____<K, V> extends Cache<K, V>, Function<K, V> {
/**
* Returns the value associated with {@code key} in this cache, first loading that value if
* necessary. No observable state associated with this cache is modified until loading completes.
*
* <p>If another call to {@link #get} or {@link #getUnchecked} is currently loading the value for
* {@code key}, simply waits for that thread to finish and returns its loaded value. Note that
* multiple threads can concurrently load values for distinct keys.
*
* <p>Caches loaded by a {@link CacheLoader} will call {@link CacheLoader#load} to load new values
* into the cache. Newly loaded values are added to the cache using {@code
* Cache.asMap().putIfAbsent} after loading has completed; if another value was associated with
* {@code key} while the new value was loading then a removal notification will be sent for the
* new value.
*
* <p>If the cache loader associated with this cache is known not to throw checked exceptions,
* then prefer {@link #getUnchecked} over this method.
*
* @throws ExecutionException if a checked exception was thrown while loading the value. ({@code
* ExecutionException} is thrown <a
* href="https://github.com/google/guava/wiki/CachesExplained#interruption">even if
* computation was interrupted by an {@code InterruptedException}</a>.)
* @throws UncheckedExecutionException if an unchecked exception was thrown while loading the
* value
* @throws ExecutionError if an error was thrown while loading the value
*/
@CanIgnoreReturnValue // TODO(b/27479612): consider removing this?
V get(K key) throws ExecutionException;
/**
* Returns the value associated with {@code key} in this cache, first loading that value if
* necessary. No observable state associated with this cache is modified until loading completes.
* Unlike {@link #get}, this method does not throw a checked exception, and thus should only be
* used in situations where checked exceptions are not thrown by the cache loader.
*
* <p>If another call to {@link #get} or {@link #getUnchecked} is currently loading the value for
* {@code key}, simply waits for that thread to finish and returns its loaded value. Note that
* multiple threads can concurrently load values for distinct keys.
*
* <p>Caches loaded by a {@link CacheLoader} will call {@link CacheLoader#load} to load new values
* into the cache. Newly loaded values are added to the cache using {@code
* Cache.asMap().putIfAbsent} after loading has completed; if another value was associated with
* {@code key} while the new value was loading then a removal notification will be sent for the
* new value.
*
* <p><b>Warning:</b> this method silently converts checked exceptions to unchecked exceptions,
* and should not be used with cache loaders which throw checked exceptions. In such cases use
* {@link #get} instead.
*
* @throws UncheckedExecutionException if an exception was thrown while loading the value. (As
* explained in the last paragraph above, this should be an unchecked exception only.)
* @throws ExecutionError if an error was thrown while loading the value
*/
@CanIgnoreReturnValue // TODO(b/27479612): consider removing this?
V getUnchecked(K key);
/**
* Returns a map of the values associated with {@code keys}, creating or retrieving those values
* if necessary. The returned map contains entries that were already cached, combined with newly
* loaded entries; it will never contain null keys or values.
*
* <p>Caches loaded by a {@link CacheLoader} will issue a single request to {@link
* CacheLoader#loadAll} for all keys which are not already present in the cache. All entries
* returned by {@link CacheLoader#loadAll} will be stored in the cache, over-writing any
* previously cached values. This method will throw an exception if {@link CacheLoader#loadAll}
* returns {@code null}, returns a map containing null keys or values, or fails to return an entry
* for each requested key.
*
* <p>Note that duplicate elements in {@code keys}, as determined by {@link Object#equals}, will
* be ignored.
*
* @throws ExecutionException if a checked exception was thrown while loading the value. ({@code
* ExecutionException} is thrown <a
* href="https://github.com/google/guava/wiki/CachesExplained#interruption">even if
* computation was interrupted by an {@code InterruptedException}</a>.)
* @throws UncheckedExecutionException if an unchecked exception was thrown while loading the
* values
* @throws ExecutionError if an error was thrown while loading the values
* @since 11.0
*/
@CanIgnoreReturnValue // TODO(b/27479612): consider removing this
ImmutableMap<K, V> getAll(Iterable<? extends K> keys) throws ExecutionException;
/**
* @deprecated Provided to satisfy the {@code Function} interface; use {@link #get} or {@link
* #getUnchecked} instead.
* @throws UncheckedExecutionException if an exception was thrown while loading the value. (As
* described in the documentation for {@link #getUnchecked}, {@code LoadingCache} should be
* used as a {@code Function} only with cache loaders that throw only unchecked exceptions.)
*/
@Deprecated
@Override
V apply(K key);
/**
* Loads a new value for {@code key}, possibly asynchronously. While the new value is loading the
* previous value (if any) will continue to be returned by {@code get(key)} unless it is evicted.
* If the new value is loaded successfully it will replace the previous value in the cache; if an
* exception is thrown while refreshing the previous value will remain, <i>and the exception will
* be logged (using {@link java.util.logging.Logger}) and swallowed</i>.
*
* <p>Caches loaded by a {@link CacheLoader} will call {@link CacheLoader#reload} if the cache
* currently contains a value for {@code key}, and {@link CacheLoader#load} otherwise. Loading is
* asynchronous only if {@link CacheLoader#reload} was overridden with an asynchronous
* implementation.
*
* <p>Returns without doing anything if another thread is currently loading the value for {@code
* key}. If the cache loader associated with this cache performs refresh asynchronously then this
* method may return before refresh completes.
*
* @since 11.0
*/
void refresh(K key);
/**
* {@inheritDoc}
*
* <p><b>Note that although the view <i>is</i> modifiable, no method on the returned map will ever
* cause entries to be automatically loaded.</b>
*/
@Override
ConcurrentMap<K, V> asMap();
}
|
LoadingCache
|
java
|
google__dagger
|
dagger-compiler/main/java/dagger/internal/codegen/validation/ProvidesMethodValidator.java
|
{
"start": 1512,
"end": 2345
}
|
class ____ extends BindingMethodValidator {
private final DependencyRequestValidator dependencyRequestValidator;
@Inject
ProvidesMethodValidator(
XProcessingEnv processingEnv,
DependencyRequestValidator dependencyRequestValidator,
InjectionAnnotations injectionAnnotations) {
super(
XTypeNames.PROVIDES,
ImmutableSet.of(XTypeNames.MODULE, XTypeNames.PRODUCER_MODULE),
MUST_BE_CONCRETE,
RUNTIME_EXCEPTION,
ALLOWS_MULTIBINDINGS,
ALLOWS_SCOPING,
processingEnv,
dependencyRequestValidator,
injectionAnnotations);
this.dependencyRequestValidator = dependencyRequestValidator;
}
@Override
protected ElementValidator elementValidator(XMethodElement method) {
return new Validator(method);
}
private
|
ProvidesMethodValidator
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/bugs/_2537/ImplicitSourceTest.java
|
{
"start": 544,
"end": 650
}
|
class ____ {
@ProcessorTest
public void situationCompilesWithoutErrors() {
}
}
|
ImplicitSourceTest
|
java
|
spring-projects__spring-security
|
access/src/main/java/org/springframework/security/web/access/channel/InsecureChannelProcessor.java
|
{
"start": 1291,
"end": 1930
}
|
class ____ to one case-sensitive keyword, {@link #getInsecureKeyword}. If this
* keyword is detected, <code>HttpServletRequest.isSecure()</code> is used to determine
* the channel security offered. If channel security is present, the configured
* <code>ChannelEntryPoint</code> is called. By default the entry point is
* {@link RetryWithHttpEntryPoint}.
* <p>
* The default <code>insecureKeyword</code> is <code>REQUIRES_INSECURE_CHANNEL</code>.
*
* @author Ben Alex
* @deprecated no replacement is planned, though consider using a custom
* {@link RequestMatcher} for any sophisticated decision-making
*/
@Deprecated
public
|
responds
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/test/benchmark/encode/ArrayInt1000Encode.java
|
{
"start": 153,
"end": 568
}
|
class ____ extends BenchmarkCase {
private Object object;
public ArrayInt1000Encode(){
super("ArrayInt1000Encode");
int[] array = new int[1000];
for (int i = 0; i < array.length; ++i) {
array[i] = i;
}
this.object = array;
}
@Override
public void execute(Codec codec) throws Exception {
codec.encode(object);
}
}
|
ArrayInt1000Encode
|
java
|
spring-projects__spring-data-jpa
|
spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/sample/UserRepository.java
|
{
"start": 29784,
"end": 29870
}
|
interface ____ {
String getFirstname();
Set<Role> getRoles();
}
|
RolesAndFirstname
|
java
|
spring-projects__spring-framework
|
spring-test/src/main/java/org/springframework/mock/http/MockHttpOutputMessage.java
|
{
"start": 1093,
"end": 2018
}
|
class ____ implements HttpOutputMessage {
private final HttpHeaders headers = new HttpHeaders();
private final ByteArrayOutputStream body = new ByteArrayOutputStream(1024);
@Override
public HttpHeaders getHeaders() {
return this.headers;
}
@Override
public OutputStream getBody() throws IOException {
return this.body;
}
/**
* Return the body content as a byte array.
*/
public byte[] getBodyAsBytes() {
return this.body.toByteArray();
}
/**
* Return the body content interpreted as a UTF-8 string.
*/
public String getBodyAsString() {
return getBodyAsString(StandardCharsets.UTF_8);
}
/**
* Return the body content interpreted as a string using the supplied character set.
* @param charset the charset to use to turn the body content into a String
*/
public String getBodyAsString(Charset charset) {
return StreamUtils.copyToString(this.body, charset);
}
}
|
MockHttpOutputMessage
|
java
|
apache__flink
|
flink-core/src/main/java/org/apache/flink/api/common/operators/util/OperatorValidationUtils.java
|
{
"start": 1095,
"end": 3779
}
|
class ____ {
private OperatorValidationUtils() {}
public static void validateParallelism(int parallelism) {
validateParallelism(parallelism, true);
}
public static void validateParallelism(int parallelism, boolean canBeParallel) {
Preconditions.checkArgument(
canBeParallel || parallelism == 1,
"The parallelism of non parallel operator must be 1.");
Preconditions.checkArgument(
parallelism > 0 || parallelism == ExecutionConfig.PARALLELISM_DEFAULT,
"The parallelism of an operator must be at least 1, or ExecutionConfig.PARALLELISM_DEFAULT (use system default).");
}
public static void validateMaxParallelism(int maxParallelism) {
validateMaxParallelism(maxParallelism, Integer.MAX_VALUE, true);
}
public static void validateMaxParallelism(int maxParallelism, int upperBound) {
validateMaxParallelism(maxParallelism, upperBound, true);
}
public static void validateMaxParallelism(int maxParallelism, boolean canBeParallel) {
validateMaxParallelism(maxParallelism, Integer.MAX_VALUE, canBeParallel);
}
public static void validateMaxParallelism(
int maxParallelism, int upperBound, boolean canBeParallel) {
Preconditions.checkArgument(
maxParallelism > 0, "The maximum parallelism must be greater than 0.");
Preconditions.checkArgument(
canBeParallel || maxParallelism == 1,
"The maximum parallelism of non parallel operator must be 1.");
Preconditions.checkArgument(
maxParallelism > 0 && maxParallelism <= upperBound,
"Maximum parallelism must be between 1 and "
+ upperBound
+ ". Found: "
+ maxParallelism);
}
public static void validateResources(ResourceSpec resources) {
Preconditions.checkNotNull(resources, "The resources must be not null.");
}
public static void validateMinAndPreferredResources(
ResourceSpec minResources, ResourceSpec preferredResources) {
Preconditions.checkNotNull(minResources, "The min resources must be not null.");
Preconditions.checkNotNull(preferredResources, "The preferred resources must be not null.");
Preconditions.checkArgument(
minResources.lessThanOrEqual(preferredResources),
"The resources must be either both UNKNOWN or both not UNKNOWN. If not UNKNOWN,"
+ " the preferred resources must be greater than or equal to the min resources.");
}
}
|
OperatorValidationUtils
|
java
|
apache__flink
|
flink-connectors/flink-hadoop-compatibility/src/main/java/org/apache/flink/api/java/typeutils/runtime/WritableSerializer.java
|
{
"start": 1633,
"end": 5067
}
|
class ____<T extends Writable> extends TypeSerializer<T> {
private static final long serialVersionUID = 1L;
private final Class<T> typeClass;
private transient Kryo kryo;
private transient T copyInstance;
public WritableSerializer(Class<T> typeClass) {
this.typeClass = typeClass;
}
@SuppressWarnings("unchecked")
@Override
public T createInstance() {
if (typeClass == NullWritable.class) {
return (T) NullWritable.get();
}
return InstantiationUtil.instantiate(typeClass);
}
@Override
public T copy(T from) {
checkKryoInitialized();
return KryoUtils.copy(from, kryo, this);
}
@Override
public T copy(T from, T reuse) {
checkKryoInitialized();
return KryoUtils.copy(from, reuse, kryo, this);
}
@Override
public int getLength() {
return -1;
}
@Override
public void serialize(T record, DataOutputView target) throws IOException {
record.write(target);
}
@Override
public T deserialize(DataInputView source) throws IOException {
return deserialize(createInstance(), source);
}
@Override
public T deserialize(T reuse, DataInputView source) throws IOException {
reuse.readFields(source);
return reuse;
}
@Override
public void copy(DataInputView source, DataOutputView target) throws IOException {
ensureInstanceInstantiated();
copyInstance.readFields(source);
copyInstance.write(target);
}
@Override
public boolean isImmutableType() {
return false;
}
@Override
public WritableSerializer<T> duplicate() {
return new WritableSerializer<T>(typeClass);
}
// --------------------------------------------------------------------------------------------
private void ensureInstanceInstantiated() {
if (copyInstance == null) {
copyInstance = createInstance();
}
}
private void checkKryoInitialized() {
if (this.kryo == null) {
this.kryo = new Kryo();
DefaultInstantiatorStrategy instantiatorStrategy = new DefaultInstantiatorStrategy();
instantiatorStrategy.setFallbackInstantiatorStrategy(new StdInstantiatorStrategy());
kryo.setInstantiatorStrategy(instantiatorStrategy);
this.kryo.register(typeClass);
}
}
// --------------------------------------------------------------------------------------------
@Override
public int hashCode() {
return this.typeClass.hashCode();
}
@Override
public boolean equals(Object obj) {
if (obj instanceof WritableSerializer) {
WritableSerializer<?> other = (WritableSerializer<?>) obj;
return typeClass == other.typeClass;
} else {
return false;
}
}
// --------------------------------------------------------------------------------------------
// Serializer configuration snapshotting & compatibility
// --------------------------------------------------------------------------------------------
@Override
public TypeSerializerSnapshot<T> snapshotConfiguration() {
return new WritableSerializerSnapshot<>(typeClass);
}
/** {@link WritableSerializer} snapshot class. */
@Internal
public static final
|
WritableSerializer
|
java
|
apache__camel
|
components/camel-huawei/camel-huaweicloud-obs/src/test/java/org/apache/camel/component/huaweicloud/obs/CreateBucketFunctionalTest.java
|
{
"start": 1360,
"end": 3617
}
|
class ____ extends CamelTestSupport {
private static final String ACCESS_KEY = "replace_this_with_access_key";
private static final String SECRET_KEY = "replace_this_with_secret_key";
private static final String REGION = "replace_this_with_region";
private static final String BUCKET_NAME = "replace_this_with_bucket_name";
private static final String BUCKET_LOCATION = "replace_this_with_bucket_location";
@BindToRegistry("serviceKeys")
ServiceKeys serviceKeys = new ServiceKeys(ACCESS_KEY, SECRET_KEY);
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("direct:create_bucket")
.to("hwcloud-obs:createBucket?" +
"serviceKeys=#serviceKeys" +
"&bucketName=" + BUCKET_NAME +
"&bucketLocation=" + BUCKET_LOCATION +
"®ion=" + REGION +
"&ignoreSslVerification=true")
.log("Create bucket successful")
.to("log:LOG?showAll=true")
.to("mock:create_bucket_result");
}
};
}
/**
* The following test cases should be manually enabled to perform test against the actual HuaweiCloud OBS server
* with real user credentials. To perform this test, manually comment out the @Ignore annotation and enter relevant
* service parameters in the placeholders above (static variables of this test class)
*
* @throws Exception
*/
@Disabled("Manually enable this once you configure the parameters in the placeholders above")
@Test
public void testCreateBucket() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:create_bucket_result");
mock.expectedMinimumMessageCount(1);
template.sendBody("direct:create_bucket", "sampleBody");
Exchange responseExchange = mock.getExchanges().get(0);
mock.assertIsSatisfied();
assertNotNull(responseExchange.getIn().getBody(String.class));
assertTrue(responseExchange.getIn().getBody(String.class).length() > 0);
}
}
|
CreateBucketFunctionalTest
|
java
|
apache__commons-lang
|
src/main/java/org/apache/commons/lang3/concurrent/BackgroundInitializer.java
|
{
"start": 1236,
"end": 1816
}
|
class ____ allows complex initialization operations in a background task.
*
* <p>
* Applications often have to do some expensive initialization steps when they
* are started, e.g. constructing a connection to a database, reading a
* configuration file, etc. Doing these things in parallel can enhance
* performance as the CPU load can be improved. However, when access to the
* resources initialized in a background thread is actually required,
* synchronization has to be performed to ensure that their initialization is
* complete.
* </p>
* <p>
* This abstract base
|
that
|
java
|
spring-projects__spring-framework
|
spring-test/src/main/java/org/springframework/test/web/client/response/DefaultResponseCreator.java
|
{
"start": 1557,
"end": 4678
}
|
class ____ implements ResponseCreator {
private final HttpStatusCode statusCode;
private byte[] content = new byte[0];
private @Nullable Resource contentResource;
private final HttpHeaders headers = new HttpHeaders();
/**
* Protected constructor.
* Use static factory methods in {@link MockRestResponseCreators}.
* @since 5.3.17
*/
protected DefaultResponseCreator(int statusCode) {
this(HttpStatusCode.valueOf(statusCode));
}
/**
* Protected constructor.
* Use static factory methods in {@link MockRestResponseCreators}.
*/
protected DefaultResponseCreator(HttpStatusCode statusCode) {
Assert.notNull(statusCode, "HttpStatusCode must not be null");
this.statusCode = statusCode;
}
/**
* Set the body as a UTF-8 String.
*/
public DefaultResponseCreator body(String content) {
this.content = content.getBytes(StandardCharsets.UTF_8);
return this;
}
/**
* Set the body from a string using the given character set.
* @since 6.0
*/
public DefaultResponseCreator body(String content, Charset charset) {
this.content = content.getBytes(charset);
return this;
}
/**
* Set the body as a byte array.
*/
public DefaultResponseCreator body(byte[] content) {
this.content = content;
return this;
}
/**
* Set the body from a {@link Resource}.
*/
public DefaultResponseCreator body(Resource resource) {
this.contentResource = resource;
return this;
}
/**
* Set the {@code Content-Type} header.
*/
public DefaultResponseCreator contentType(MediaType mediaType) {
this.headers.setContentType(mediaType);
return this;
}
/**
* Set the {@code Location} header.
*/
public DefaultResponseCreator location(URI location) {
this.headers.setLocation(location);
return this;
}
/**
* Add a response header with one or more values.
* @since 6.0
*/
public DefaultResponseCreator header(String name, String ... headerValues) {
for (String headerValue : headerValues) {
this.headers.add(name, headerValue);
}
return this;
}
/**
* Copy all given headers.
*/
public DefaultResponseCreator headers(HttpHeaders headers) {
this.headers.putAll(headers);
return this;
}
/**
* Add one or more cookies.
* @since 6.0
*/
public DefaultResponseCreator cookies(ResponseCookie... cookies) {
for (ResponseCookie cookie : cookies) {
this.headers.add(HttpHeaders.SET_COOKIE, cookie.toString());
}
return this;
}
/**
* Copy all cookies from the given {@link MultiValueMap}.
* @since 6.0
*/
public DefaultResponseCreator cookies(MultiValueMap<String, ResponseCookie> multiValueMap) {
multiValueMap.values().forEach(cookies -> cookies.forEach(this::cookies));
return this;
}
@Override
public ClientHttpResponse createResponse(@Nullable ClientHttpRequest request) throws IOException {
MockClientHttpResponse response = (this.contentResource != null ?
new MockClientHttpResponse(this.contentResource.getInputStream(), this.statusCode) :
new MockClientHttpResponse(this.content, this.statusCode));
response.getHeaders().putAll(this.headers);
return response;
}
}
|
DefaultResponseCreator
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.