language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | netty__netty | transport-classes-epoll/src/main/java/io/netty/channel/epoll/EpollEventLoopGroup.java | {
"start": 1912,
"end": 9192
} | class ____ loaded.
Epoll.ensureAvailability();
}
private static final InternalLogger LOGGER = InternalLoggerFactory.getInstance(EpollEventLoopGroup.class);
/**
* Create a new instance using the default number of threads and the default {@link ThreadFactory}.
*/
public EpollEventLoopGroup() {
this(0);
}
/**
* Create a new instance using the specified number of threads and the default {@link ThreadFactory}.
*/
public EpollEventLoopGroup(int nThreads) {
this(nThreads, (ThreadFactory) null);
}
/**
* Create a new instance using the default number of threads and the given {@link ThreadFactory}.
*/
@SuppressWarnings("deprecation")
public EpollEventLoopGroup(ThreadFactory threadFactory) {
this(0, threadFactory, 0);
}
/**
* Create a new instance using the specified number of threads and the default {@link ThreadFactory}.
*/
@SuppressWarnings("deprecation")
public EpollEventLoopGroup(int nThreads, SelectStrategyFactory selectStrategyFactory) {
this(nThreads, (ThreadFactory) null, selectStrategyFactory);
}
/**
* Create a new instance using the specified number of threads and the given {@link ThreadFactory}.
*/
@SuppressWarnings("deprecation")
public EpollEventLoopGroup(int nThreads, ThreadFactory threadFactory) {
this(nThreads, threadFactory, 0);
}
public EpollEventLoopGroup(int nThreads, Executor executor) {
this(nThreads, executor, DefaultSelectStrategyFactory.INSTANCE);
}
/**
* Create a new instance using the specified number of threads and the given {@link ThreadFactory}.
*/
@SuppressWarnings("deprecation")
public EpollEventLoopGroup(int nThreads, ThreadFactory threadFactory, SelectStrategyFactory selectStrategyFactory) {
this(nThreads, threadFactory, 0, selectStrategyFactory);
}
/**
* Create a new instance using the specified number of threads, the given {@link ThreadFactory} and the given
* maximal amount of epoll events to handle per epollWait(...).
*
* @deprecated Use {@link #EpollEventLoopGroup(int)} or {@link #EpollEventLoopGroup(int, ThreadFactory)}
*/
@Deprecated
public EpollEventLoopGroup(int nThreads, ThreadFactory threadFactory, int maxEventsAtOnce) {
this(nThreads, threadFactory, maxEventsAtOnce, DefaultSelectStrategyFactory.INSTANCE);
}
/**
* Create a new instance using the specified number of threads, the given {@link ThreadFactory} and the given
* maximal amount of epoll events to handle per epollWait(...).
*
* @deprecated Use {@link #EpollEventLoopGroup(int)}, {@link #EpollEventLoopGroup(int, ThreadFactory)}, or
* {@link #EpollEventLoopGroup(int, SelectStrategyFactory)}
*/
@Deprecated
public EpollEventLoopGroup(int nThreads, ThreadFactory threadFactory, int maxEventsAtOnce,
SelectStrategyFactory selectStrategyFactory) {
super(nThreads, threadFactory, EpollIoHandler.newFactory(maxEventsAtOnce, selectStrategyFactory),
RejectedExecutionHandlers.reject());
}
public EpollEventLoopGroup(int nThreads, Executor executor, SelectStrategyFactory selectStrategyFactory) {
super(nThreads, executor, EpollIoHandler.newFactory(0, selectStrategyFactory),
RejectedExecutionHandlers.reject());
}
public EpollEventLoopGroup(int nThreads, Executor executor, EventExecutorChooserFactory chooserFactory,
SelectStrategyFactory selectStrategyFactory) {
super(nThreads, executor, EpollIoHandler.newFactory(0, selectStrategyFactory), chooserFactory,
RejectedExecutionHandlers.reject());
}
public EpollEventLoopGroup(int nThreads, Executor executor, EventExecutorChooserFactory chooserFactory,
SelectStrategyFactory selectStrategyFactory,
RejectedExecutionHandler rejectedExecutionHandler) {
super(nThreads, executor, EpollIoHandler.newFactory(0, selectStrategyFactory), chooserFactory,
rejectedExecutionHandler);
}
public EpollEventLoopGroup(int nThreads, Executor executor, EventExecutorChooserFactory chooserFactory,
SelectStrategyFactory selectStrategyFactory,
RejectedExecutionHandler rejectedExecutionHandler,
EventLoopTaskQueueFactory queueFactory) {
super(nThreads, executor, EpollIoHandler.newFactory(0, selectStrategyFactory), chooserFactory,
rejectedExecutionHandler, queueFactory);
}
/**
* @param nThreads the number of threads that will be used by this instance.
* @param executor the Executor to use, or {@code null} if default one should be used.
* @param chooserFactory the {@link EventExecutorChooserFactory} to use.
* @param selectStrategyFactory the {@link SelectStrategyFactory} to use.
* @param rejectedExecutionHandler the {@link RejectedExecutionHandler} to use.
* @param taskQueueFactory the {@link EventLoopTaskQueueFactory} to use for
* {@link SingleThreadEventLoop#execute(Runnable)},
* or {@code null} if default one should be used.
* @param tailTaskQueueFactory the {@link EventLoopTaskQueueFactory} to use for
* {@link SingleThreadEventLoop#executeAfterEventLoopIteration(Runnable)},
* or {@code null} if default one should be used.
*/
public EpollEventLoopGroup(int nThreads, Executor executor, EventExecutorChooserFactory chooserFactory,
SelectStrategyFactory selectStrategyFactory,
RejectedExecutionHandler rejectedExecutionHandler,
EventLoopTaskQueueFactory taskQueueFactory,
EventLoopTaskQueueFactory tailTaskQueueFactory) {
super(nThreads, executor, EpollIoHandler.newFactory(0, selectStrategyFactory),
chooserFactory, rejectedExecutionHandler, taskQueueFactory, tailTaskQueueFactory);
}
/**
* This method is a no-op.
*
* @deprecated
*/
@Deprecated
public void setIoRatio(int ioRatio) {
LOGGER.debug("EpollEventLoopGroup.setIoRatio(int) logic was removed, this is a no-op");
}
@Override
protected IoEventLoop newChild(Executor executor, IoHandlerFactory ioHandlerFactory, Object... args) {
RejectedExecutionHandler rejectedExecutionHandler = (RejectedExecutionHandler) args[0];
EventLoopTaskQueueFactory taskQueueFactory = null;
EventLoopTaskQueueFactory tailTaskQueueFactory = null;
int argsLength = args.length;
if (argsLength > 1) {
taskQueueFactory = (EventLoopTaskQueueFactory) args[1];
}
if (argsLength > 2) {
tailTaskQueueFactory = (EventLoopTaskQueueFactory) args[2];
}
return new EpollEventLoop(this, executor, ioHandlerFactory, taskQueueFactory, tailTaskQueueFactory,
rejectedExecutionHandler);
}
}
| is |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java | {
"start": 1141,
"end": 3533
} | class ____ {
private final ClusterService clusterService;
private final ClusterInfoService clusterInfoService;
private final Supplier<DesiredBalance> desiredBalanceSupplier;
private final NodeAllocationStatsAndWeightsCalculator nodeAllocationStatsAndWeightsCalculator;
public AllocationStatsService(
ClusterService clusterService,
ClusterInfoService clusterInfoService,
ShardsAllocator shardsAllocator,
NodeAllocationStatsAndWeightsCalculator nodeAllocationStatsAndWeightsCalculator
) {
this.clusterService = clusterService;
this.clusterInfoService = clusterInfoService;
this.nodeAllocationStatsAndWeightsCalculator = nodeAllocationStatsAndWeightsCalculator;
this.desiredBalanceSupplier = shardsAllocator instanceof DesiredBalanceShardsAllocator allocator
? allocator::getDesiredBalance
: () -> null;
}
/**
* Returns a map of node IDs to node allocation stats.
*/
public Map<String, NodeAllocationStats> stats() {
return stats(() -> {});
}
/**
* Returns a map of node IDs to node allocation stats, promising to execute the provided {@link Runnable} during the computation to
* test for cancellation.
*/
public Map<String, NodeAllocationStats> stats(Runnable ensureNotCancelled) {
assert Transports.assertNotTransportThread("too expensive for a transport worker");
var clusterState = clusterService.state();
var nodesStatsAndWeights = nodeAllocationStatsAndWeightsCalculator.nodesAllocationStatsAndWeights(
clusterState.metadata(),
clusterState.getRoutingNodes(),
clusterInfoService.getClusterInfo(),
ensureNotCancelled,
desiredBalanceSupplier.get()
);
return nodesStatsAndWeights.entrySet()
.stream()
.collect(
Collectors.toMap(
Map.Entry::getKey,
e -> new NodeAllocationStats(
e.getValue().shards(),
e.getValue().undesiredShards(),
e.getValue().forecastedIngestLoad(),
e.getValue().forecastedDiskUsage(),
e.getValue().currentDiskUsage()
)
)
);
}
}
| AllocationStatsService |
java | grpc__grpc-java | alts/src/test/java/io/grpc/alts/internal/FakeTsiTest.java | {
"start": 1463,
"end": 7366
} | class ____ {
private static final int OVERHEAD =
FakeChannelCrypter.getTagBytes() + AltsTsiFrameProtector.getHeaderBytes();
private final List<ReferenceCounted> references = new ArrayList<>();
private final RegisterRef ref =
new RegisterRef() {
@Override
public ByteBuf register(ByteBuf buf) {
if (buf != null) {
references.add(buf);
}
return buf;
}
};
private static Handshakers newHandshakers() {
TsiHandshaker clientHandshaker = FakeTsiHandshaker.newFakeHandshakerClient();
TsiHandshaker serverHandshaker = FakeTsiHandshaker.newFakeHandshakerServer();
return new Handshakers(clientHandshaker, serverHandshaker);
}
@Before
public void setUp() {
ResourceLeakDetector.setLevel(Level.PARANOID);
}
@After
public void tearDown() {
for (ReferenceCounted reference : references) {
reference.release();
}
references.clear();
// Increase our chances to detect ByteBuf leaks.
GcFinalization.awaitFullGc();
}
@Test
public void handshakeStateOrderTest() {
try {
Handshakers handshakers = newHandshakers();
TsiHandshaker clientHandshaker = handshakers.getClient();
TsiHandshaker serverHandshaker = handshakers.getServer();
byte[] transportBufferBytes = new byte[TsiTest.getDefaultTransportBufferSize()];
ByteBuffer transportBuffer = ByteBuffer.wrap(transportBufferBytes);
((Buffer) transportBuffer).limit(0); // Start off with an empty buffer
((Buffer) transportBuffer).clear();
clientHandshaker.getBytesToSendToPeer(transportBuffer);
((Buffer) transportBuffer).flip();
assertEquals(
FakeTsiHandshaker.State.CLIENT_INIT.toString().trim(),
new String(transportBufferBytes, 4, transportBuffer.remaining(), UTF_8).trim());
serverHandshaker.processBytesFromPeer(transportBuffer);
assertFalse(transportBuffer.hasRemaining());
// client shouldn't offer any more bytes
((Buffer) transportBuffer).clear();
clientHandshaker.getBytesToSendToPeer(transportBuffer);
((Buffer) transportBuffer).flip();
assertFalse(transportBuffer.hasRemaining());
((Buffer) transportBuffer).clear();
serverHandshaker.getBytesToSendToPeer(transportBuffer);
((Buffer) transportBuffer).flip();
assertEquals(
FakeTsiHandshaker.State.SERVER_INIT.toString().trim(),
new String(transportBufferBytes, 4, transportBuffer.remaining(), UTF_8).trim());
clientHandshaker.processBytesFromPeer(transportBuffer);
assertFalse(transportBuffer.hasRemaining());
// server shouldn't offer any more bytes
((Buffer) transportBuffer).clear();
serverHandshaker.getBytesToSendToPeer(transportBuffer);
((Buffer) transportBuffer).flip();
assertFalse(transportBuffer.hasRemaining());
((Buffer) transportBuffer).clear();
clientHandshaker.getBytesToSendToPeer(transportBuffer);
((Buffer) transportBuffer).flip();
assertEquals(
FakeTsiHandshaker.State.CLIENT_FINISHED.toString().trim(),
new String(transportBufferBytes, 4, transportBuffer.remaining(), UTF_8).trim());
serverHandshaker.processBytesFromPeer(transportBuffer);
assertFalse(transportBuffer.hasRemaining());
// client shouldn't offer any more bytes
((Buffer) transportBuffer).clear();
clientHandshaker.getBytesToSendToPeer(transportBuffer);
((Buffer) transportBuffer).flip();
assertFalse(transportBuffer.hasRemaining());
((Buffer) transportBuffer).clear();
serverHandshaker.getBytesToSendToPeer(transportBuffer);
((Buffer) transportBuffer).flip();
assertEquals(
FakeTsiHandshaker.State.SERVER_FINISHED.toString().trim(),
new String(transportBufferBytes, 4, transportBuffer.remaining(), UTF_8).trim());
clientHandshaker.processBytesFromPeer(transportBuffer);
assertFalse(transportBuffer.hasRemaining());
// server shouldn't offer any more bytes
((Buffer) transportBuffer).clear();
serverHandshaker.getBytesToSendToPeer(transportBuffer);
((Buffer) transportBuffer).flip();
assertFalse(transportBuffer.hasRemaining());
} catch (GeneralSecurityException e) {
throw new AssertionError(e);
}
}
@Test
public void handshake() throws GeneralSecurityException {
TsiTest.handshakeTest(newHandshakers());
}
@Test
public void handshakeSmallBuffer() throws GeneralSecurityException {
TsiTest.handshakeSmallBufferTest(newHandshakers());
}
@Test
public void pingPong() throws GeneralSecurityException {
TsiTest.pingPongTest(newHandshakers(), ref);
}
@Test
public void pingPongExactFrameSize() throws GeneralSecurityException {
TsiTest.pingPongExactFrameSizeTest(newHandshakers(), ref);
}
@Test
public void pingPongSmallBuffer() throws GeneralSecurityException {
TsiTest.pingPongSmallBufferTest(newHandshakers(), ref);
}
@Test
public void pingPongSmallFrame() throws GeneralSecurityException {
TsiTest.pingPongSmallFrameTest(OVERHEAD, newHandshakers(), ref);
}
@Test
public void pingPongSmallFrameSmallBuffer() throws GeneralSecurityException {
TsiTest.pingPongSmallFrameSmallBufferTest(OVERHEAD, newHandshakers(), ref);
}
@Test
public void corruptedCounter() throws GeneralSecurityException {
TsiTest.corruptedCounterTest(newHandshakers(), ref);
}
@Test
public void corruptedCiphertext() throws GeneralSecurityException {
TsiTest.corruptedCiphertextTest(newHandshakers(), ref);
}
@Test
public void corruptedTag() throws GeneralSecurityException {
TsiTest.corruptedTagTest(newHandshakers(), ref);
}
@Test
public void reflectedCiphertext() throws GeneralSecurityException {
TsiTest.reflectedCiphertextTest(newHandshakers(), ref);
}
}
| FakeTsiTest |
java | apache__logging-log4j2 | log4j-core-test/src/test/java/org/apache/logging/log4j/core/appender/rolling/RollingAppenderSizeCompressPermissionsTest.java | {
"start": 1487,
"end": 4201
} | class ____ {
private static final String CONFIG = "log4j-rolling-gz-posix.xml";
private static final String DIR = "target/rollingpermissions1";
public static LoggerContextRule loggerContextRule =
LoggerContextRule.createShutdownTimeoutLoggerContextRule(CONFIG);
@Rule
public RuleChain chain = loggerContextRule.withCleanFoldersRule(DIR);
private Logger logger;
@BeforeClass
public static void beforeClass() {
Assume.assumeTrue(FileUtils.isFilePosixAttributeViewSupported());
}
@Before
public void setUp() {
this.logger = loggerContextRule.getLogger(RollingAppenderSizeCompressPermissionsTest.class.getName());
}
@Test
public void testAppenderCompressPermissions() throws Exception {
for (int i = 0; i < 500; ++i) {
final String message = "This is test message number " + i;
logger.debug(message);
if (i % 100 == 0) {
Thread.sleep(500);
}
}
if (!loggerContextRule.getLoggerContext().stop(30, TimeUnit.SECONDS)) {
System.err.println("Could not stop cleanly " + loggerContextRule + " for " + this);
}
final File dir = new File(DIR);
assertTrue("Directory not created", dir.exists());
final File[] files = dir.listFiles();
assertNotNull(files);
int gzippedFiles1 = 0;
int gzippedFiles2 = 0;
for (final File file : files) {
final FileExtension ext = FileExtension.lookupForFile(file.getName());
if (ext != null) {
if (file.getName().startsWith("test1")) {
gzippedFiles1++;
assertEquals(
"rw-------", PosixFilePermissions.toString(Files.getPosixFilePermissions(file.toPath())));
} else {
gzippedFiles2++;
assertEquals(
"r--r--r--", PosixFilePermissions.toString(Files.getPosixFilePermissions(file.toPath())));
}
} else if (file.getName().startsWith("test1")) {
assertEquals("rw-------", PosixFilePermissions.toString(Files.getPosixFilePermissions(file.toPath())));
} else {
assertEquals("rwx------", PosixFilePermissions.toString(Files.getPosixFilePermissions(file.toPath())));
}
}
assertTrue("Files not rolled : " + files.length, files.length > 2);
assertTrue("Files 1 gzipped not rolled : " + gzippedFiles1, gzippedFiles1 > 0);
assertTrue("Files 2 gzipped not rolled : " + gzippedFiles2, gzippedFiles2 > 0);
}
}
| RollingAppenderSizeCompressPermissionsTest |
java | spring-projects__spring-boot | loader/spring-boot-loader/src/test/java/org/springframework/boot/loader/jar/MetaInfVersionsInfoTests.java | {
"start": 1084,
"end": 3772
} | class ____ {
@Test
void getParsesVersionsAndEntries() {
List<ZipContent.Entry> entries = new ArrayList<>();
entries.add(mockEntry("META-INF/"));
entries.add(mockEntry("META-INF/MANIFEST.MF"));
entries.add(mockEntry("META-INF/versions/"));
entries.add(mockEntry("META-INF/versions/9/"));
entries.add(mockEntry("META-INF/versions/9/Foo.class"));
entries.add(mockEntry("META-INF/versions/11/"));
entries.add(mockEntry("META-INF/versions/11/Foo.class"));
entries.add(mockEntry("META-INF/versions/10/"));
entries.add(mockEntry("META-INF/versions/10/Foo.class"));
MetaInfVersionsInfo info = MetaInfVersionsInfo.get(entries.size(), entries::get);
assertThat(info.versions()).containsExactly(9, 10, 11);
assertThat(info.directories()).containsExactly("META-INF/versions/9/", "META-INF/versions/10/",
"META-INF/versions/11/");
}
@Test
void getWhenHasBadEntryParsesGoodVersionsAndEntries() {
List<ZipContent.Entry> entries = new ArrayList<>();
entries.add(mockEntry("META-INF/versions/9/Foo.class"));
entries.add(mockEntry("META-INF/versions/0x11/Foo.class"));
MetaInfVersionsInfo info = MetaInfVersionsInfo.get(entries.size(), entries::get);
assertThat(info.versions()).containsExactly(9);
assertThat(info.directories()).containsExactly("META-INF/versions/9/");
}
@Test
void getWhenHasNoEntriesReturnsNone() {
List<ZipContent.Entry> entries = new ArrayList<>();
MetaInfVersionsInfo info = MetaInfVersionsInfo.get(entries.size(), entries::get);
assertThat(info.versions()).isEmpty();
assertThat(info.directories()).isEmpty();
assertThat(info).isSameAs(MetaInfVersionsInfo.NONE);
}
@Test
void toleratesUnexpectedFileEntryInMetaInfVersions() {
List<ZipContent.Entry> entries = new ArrayList<>();
entries.add(mockEntry("META-INF/"));
entries.add(mockEntry("META-INF/MANIFEST.MF"));
entries.add(mockEntry("META-INF/versions/"));
entries.add(mockEntry("META-INF/versions/unexpected"));
entries.add(mockEntry("META-INF/versions/9/"));
entries.add(mockEntry("META-INF/versions/9/Foo.class"));
MetaInfVersionsInfo info = MetaInfVersionsInfo.get(entries.size(), entries::get);
assertThat(info.versions()).containsExactly(9);
assertThat(info.directories()).containsExactly("META-INF/versions/9/");
}
private ZipContent.Entry mockEntry(String name) {
ZipContent.Entry entry = mock(ZipContent.Entry.class);
given(entry.getName()).willReturn(name);
given(entry.hasNameStartingWith(any()))
.willAnswer((invocation) -> name.startsWith(invocation.getArgument(0, CharSequence.class).toString()));
given(entry.isDirectory()).willAnswer((invocation) -> name.endsWith("/"));
return entry;
}
}
| MetaInfVersionsInfoTests |
java | elastic__elasticsearch | modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorFactoryTests.java | {
"start": 757,
"end": 4194
} | class ____ extends ESTestCase {
public void testCreate() throws Exception {
DotExpanderProcessor.Factory factory = new DotExpanderProcessor.Factory();
Map<String, Object> config = new HashMap<>();
config.put("field", "_field.field");
config.put("path", "_path");
DotExpanderProcessor processor = (DotExpanderProcessor) factory.create(null, "_tag", null, config, null);
assertThat(processor.getField(), equalTo("_field.field"));
assertThat(processor.getPath(), equalTo("_path"));
config = new HashMap<>();
config.put("field", "_field.field");
processor = (DotExpanderProcessor) factory.create(null, "_tag", null, config, null);
assertThat(processor.getField(), equalTo("_field.field"));
assertThat(processor.getPath(), nullValue());
}
public void testValidFields() throws Exception {
DotExpanderProcessor.Factory factory = new DotExpanderProcessor.Factory();
String[] fields = new String[] { "a.b", "a.b.c", "a.b.c.d", "ab.cd" };
for (String field : fields) {
Map<String, Object> config = new HashMap<>();
config.put("field", field);
config.put("path", "_path");
DotExpanderProcessor processor = (DotExpanderProcessor) factory.create(null, "_tag", null, config, null);
assertThat(processor.getField(), equalTo(field));
assertThat(processor.getPath(), equalTo("_path"));
}
}
public void testCreate_fieldMissing() throws Exception {
DotExpanderProcessor.Factory factory = new DotExpanderProcessor.Factory();
Map<String, Object> config = new HashMap<>();
config.put("path", "_path");
Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, "_tag", null, config, null));
assertThat(e.getMessage(), equalTo("[field] required property is missing"));
}
public void testCreate_invalidFields() throws Exception {
DotExpanderProcessor.Factory factory = new DotExpanderProcessor.Factory();
String[] fields = new String[] { "a", "abc" };
for (String field : fields) {
Map<String, Object> config = new HashMap<>();
config.put("field", field);
Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, "_tag", null, config, null));
assertThat(e.getMessage(), equalTo("[field] field does not contain a dot and is not a wildcard"));
}
fields = new String[] { ".a", "a.", "." };
for (String field : fields) {
Map<String, Object> config = new HashMap<>();
config.put("field", field);
Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, "_tag", null, config, null));
assertThat(e.getMessage(), equalTo("[field] Field can't start or end with a dot"));
}
fields = new String[] { "a..b", "a...b", "a.b..c", "abc.def..hij" };
for (String field : fields) {
Map<String, Object> config = new HashMap<>();
config.put("field", field);
Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, "_tag", null, config, null));
assertThat(e.getMessage(), equalTo("[field] No space between dots"));
}
}
}
| DotExpanderProcessorFactoryTests |
java | apache__avro | lang/java/protobuf/src/test/java/org/apache/avro/protobuf/multiplefiles/M.java | {
"start": 2642,
"end": 9529
} | enum ____ with the given numeric wire value.
*/
public static N forNumber(int value) {
switch (value) {
case 1:
return A;
default:
return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap<N> internalGetValueMap() {
return internalValueMap;
}
private static final com.google.protobuf.Internal.EnumLiteMap<N> internalValueMap = new com.google.protobuf.Internal.EnumLiteMap<N>() {
public N findValueByNumber(int number) {
return N.forNumber(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() {
return getDescriptor().getValues().get(ordinal());
}
public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() {
return org.apache.avro.protobuf.multiplefiles.M.getDescriptor().getEnumTypes().get(0);
}
private static final N[] VALUES = values();
public static N valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int value;
private N(int value) {
this.value = value;
}
// @@protoc_insertion_point(enum_scope:org.apache.avro.protobuf.multiplefiles.M.N)
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1)
return true;
if (isInitialized == 0)
return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1)
return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.avro.protobuf.multiplefiles.M)) {
return super.equals(obj);
}
org.apache.avro.protobuf.multiplefiles.M other = (org.apache.avro.protobuf.multiplefiles.M) obj;
if (!getUnknownFields().equals(other.getUnknownFields()))
return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.avro.protobuf.multiplefiles.M parseFrom(java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.avro.protobuf.multiplefiles.M parseFrom(java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.avro.protobuf.multiplefiles.M parseFrom(com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.avro.protobuf.multiplefiles.M parseFrom(com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.avro.protobuf.multiplefiles.M parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.avro.protobuf.multiplefiles.M parseFrom(byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.avro.protobuf.multiplefiles.M parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input);
}
public static org.apache.avro.protobuf.multiplefiles.M parseFrom(java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException {
return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.avro.protobuf.multiplefiles.M parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.avro.protobuf.multiplefiles.M parseDelimitedFrom(java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException {
return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.avro.protobuf.multiplefiles.M parseFrom(com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input);
}
public static org.apache.avro.protobuf.multiplefiles.M parseFrom(com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException {
return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() {
return newBuilder();
}
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.avro.protobuf.multiplefiles.M prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* <pre>
* a nested enum
* </pre>
*
* Protobuf type {@code org.apache.avro.protobuf.multiplefiles.M}
*/
public static final | associated |
java | junit-team__junit5 | documentation/src/test/java/example/testkit/EngineTestKitDiscoveryDemo.java | {
"start": 743,
"end": 1163
} | class ____ {
@Test
void verifyJupiterDiscovery() {
EngineDiscoveryResults results = EngineTestKit.engine("junit-jupiter") // <1>
.selectors(selectClass(ExampleTestCase.class)) // <2>
.discover(); // <3>
assertEquals("JUnit Jupiter", results.getEngineDescriptor().getDisplayName()); // <4>
assertEquals(emptyList(), results.getDiscoveryIssues()); // <5>
}
}
// end::user_guide[]
| EngineTestKitDiscoveryDemo |
java | google__error-prone | check_api/src/main/java/com/google/errorprone/BugCheckerInfo.java | {
"start": 1450,
"end": 1494
} | class ____ implements the check.
*/
public | that |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/checkreturnvalue/Api.java | {
"start": 5036,
"end": 6123
} | class ____ {
private final String api;
private final boolean assumeNoWhitespace;
private int position = -1;
Parser(String api, boolean assumeNoWhitespace) {
this.api = api;
this.assumeNoWhitespace = assumeNoWhitespace;
}
String owningType() {
StringBuilder buffer = new StringBuilder(api.length());
token:
do {
char next = nextLookingFor('#');
switch (next) {
case '#' -> {
// We've hit the end of the leading type, break out.
break token;
}
case '.' -> {
// OK, separator
}
case '-' -> {
// OK, used in Kotlin JvmName to prevent Java users.
}
default ->
checkArgument(
isJavaIdentifierPart(next),
"Unable to parse '%s' because '%s' is not a valid identifier",
api,
next);
}
buffer.append(next);
} while (true);
String type = buffer.toString();
check(!type.isEmpty(), api, " | Parser |
java | grpc__grpc-java | core/src/test/java/io/grpc/internal/ManagedChannelImplTest.java | {
"start": 7224,
"end": 174132
} | class ____ {
private static final int DEFAULT_PORT = 447;
private static final MethodDescriptor<String, Integer> method =
MethodDescriptor.<String, Integer>newBuilder()
.setType(MethodType.UNKNOWN)
.setFullMethodName("service/method")
.setRequestMarshaller(new StringMarshaller())
.setResponseMarshaller(new IntegerMarshaller())
.build();
private static final Attributes.Key<String> SUBCHANNEL_ATTR_KEY =
Attributes.Key.create("subchannel-attr-key");
private static final long RECONNECT_BACKOFF_INTERVAL_NANOS = 10;
private static final String SERVICE_NAME = "fake.example.com";
private static final String AUTHORITY = SERVICE_NAME;
private static final String USER_AGENT = "userAgent";
private static final ClientTransportOptions clientTransportOptions =
new ClientTransportOptions()
.setAuthority(AUTHORITY)
.setUserAgent(USER_AGENT);
private static final String TARGET = "fake://" + SERVICE_NAME;
private static final String MOCK_POLICY_NAME = "mock_lb";
private static final NameResolver.Args NAMERESOLVER_ARGS = NameResolver.Args.newBuilder()
.setDefaultPort(447)
.setProxyDetector(mock(ProxyDetector.class))
.setSynchronizationContext(
new SynchronizationContext(mock(Thread.UncaughtExceptionHandler.class)))
.setServiceConfigParser(mock(NameResolver.ServiceConfigParser.class))
.setScheduledExecutorService(new FakeClock().getScheduledExecutorService())
.build();
private static final NameResolver.Args.Key<String> TEST_RESOLVER_CUSTOM_ARG_KEY =
NameResolver.Args.Key.create("test-key");
private URI expectedUri;
private final SocketAddress socketAddress =
new SocketAddress() {
@Override
public String toString() {
return "test-addr";
}
};
private final SocketAddress socketAddress2 =
new SocketAddress() {
@Override
public String toString() {
return "test-addr";
}
};
private final EquivalentAddressGroup addressGroup = new EquivalentAddressGroup(socketAddress);
private final EquivalentAddressGroup addressGroup2 =
new EquivalentAddressGroup(Arrays.asList(socketAddress, socketAddress2));
private final FakeClock timer = new FakeClock();
private final FakeClock executor = new FakeClock();
private final FakeClock balancerRpcExecutor = new FakeClock();
private final InternalChannelz channelz = new InternalChannelz();
private final MetricInstrumentRegistry metricInstrumentRegistry =
MetricInstrumentRegistry.getDefaultRegistry();
@Rule public final MockitoRule mocks = MockitoJUnit.rule();
private ManagedChannelImpl channel;
private Helper helper;
@Captor
private ArgumentCaptor<Status> statusCaptor;
@Captor
private ArgumentCaptor<CallOptions> callOptionsCaptor;
@Captor
private ArgumentCaptor<ClientStreamTracer[]> tracersCaptor;
@Mock
private LoadBalancer mockLoadBalancer;
@Mock
private SubchannelStateListener subchannelStateListener;
private final LoadBalancerProvider mockLoadBalancerProvider =
mock(LoadBalancerProvider.class, delegatesTo(new LoadBalancerProvider() {
@Override
public LoadBalancer newLoadBalancer(Helper helper) {
return mockLoadBalancer;
}
@Override
public boolean isAvailable() {
return true;
}
@Override
public int getPriority() {
return 999;
}
@Override
public String getPolicyName() {
return MOCK_POLICY_NAME;
}
}));
@Captor
private ArgumentCaptor<ConnectivityStateInfo> stateInfoCaptor;
@Mock
private SubchannelPicker mockPicker;
@Mock
private ClientTransportFactory mockTransportFactory;
@Mock
private ClientCall.Listener<Integer> mockCallListener;
@Mock
private ClientCall.Listener<Integer> mockCallListener2;
@Mock
private ClientCall.Listener<Integer> mockCallListener3;
@Mock
private ClientCall.Listener<Integer> mockCallListener4;
@Mock
private ClientCall.Listener<Integer> mockCallListener5;
@Mock
private ObjectPool<Executor> executorPool;
@Mock
private ObjectPool<Executor> balancerRpcExecutorPool;
@Mock
private CallCredentials creds;
@Mock
private Executor offloadExecutor;
private ManagedChannelImplBuilder channelBuilder;
private boolean requestConnection = true;
private BlockingQueue<MockClientTransportInfo> transports;
private boolean panicExpected;
@Captor
private ArgumentCaptor<ResolvedAddresses> resolvedAddressCaptor;
private ArgumentCaptor<ClientStreamListener> streamListenerCaptor =
ArgumentCaptor.forClass(ClientStreamListener.class);
private void createChannel(ClientInterceptor... interceptors) {
createChannel(false, interceptors);
}
private void createChannel(boolean nameResolutionExpectedToFail,
ClientInterceptor... interceptors) {
checkState(channel == null);
when(mockTransportFactory.getSupportedSocketAddressTypes()).thenReturn(Collections.singleton(
InetSocketAddress.class));
NameResolverProvider nameResolverProvider =
channelBuilder.nameResolverRegistry.getProviderForScheme(expectedUri.getScheme());
channel = new ManagedChannelImpl(
channelBuilder, mockTransportFactory, expectedUri, nameResolverProvider,
new FakeBackoffPolicyProvider(),
balancerRpcExecutorPool, timer.getStopwatchSupplier(), Arrays.asList(interceptors),
timer.getTimeProvider());
if (requestConnection) {
int numExpectedTasks = nameResolutionExpectedToFail ? 1 : 0;
// Force-exit the initial idle-mode
channel.syncContext.execute(new Runnable() {
@Override
public void run() {
channel.exitIdleMode();
}
});
if (channelBuilder.idleTimeoutMillis != ManagedChannelImpl.IDLE_TIMEOUT_MILLIS_DISABLE) {
numExpectedTasks += 1;
}
assertEquals(numExpectedTasks, timer.numPendingTasks());
ArgumentCaptor<Helper> helperCaptor = ArgumentCaptor.forClass(Helper.class);
verify(mockLoadBalancerProvider).newLoadBalancer(helperCaptor.capture());
helper = helperCaptor.getValue();
}
}
@Before
public void setUp() throws Exception {
when(mockLoadBalancer.acceptResolvedAddresses(isA(ResolvedAddresses.class))).thenReturn(
Status.OK);
LoadBalancerRegistry.getDefaultRegistry().register(mockLoadBalancerProvider);
expectedUri = new URI(TARGET);
transports = TestUtils.captureTransports(mockTransportFactory);
when(mockTransportFactory.getScheduledExecutorService())
.thenReturn(timer.getScheduledExecutorService());
when(executorPool.getObject()).thenReturn(executor.getScheduledExecutorService());
when(balancerRpcExecutorPool.getObject())
.thenReturn(balancerRpcExecutor.getScheduledExecutorService());
channelBuilder = new ManagedChannelImplBuilder(TARGET,
new UnsupportedClientTransportFactoryBuilder(), new FixedPortProvider(DEFAULT_PORT));
channelBuilder.disableRetry();
configureBuilder(channelBuilder);
}
private void configureBuilder(ManagedChannelImplBuilder channelBuilder) {
channelBuilder
.nameResolverFactory(new FakeNameResolverFactory.Builder(expectedUri).build())
.defaultLoadBalancingPolicy(MOCK_POLICY_NAME)
.userAgent(USER_AGENT)
.idleTimeout(ManagedChannelImplBuilder.IDLE_MODE_MAX_TIMEOUT_DAYS, TimeUnit.DAYS)
.offloadExecutor(offloadExecutor);
channelBuilder.executorPool = executorPool;
channelBuilder.binlog = null;
channelBuilder.channelz = channelz;
}
@After
public void allPendingTasksAreRun() throws Exception {
// The "never" verifications in the tests only hold up if all due tasks are done.
// As for timer, although there may be scheduled tasks in a future time, since we don't test
// any time-related behavior in this test suite, we only care the tasks that are due. This
// would ignore any time-sensitive tasks, e.g., back-off and the idle timer.
assertTrue(timer.getDueTasks() + " should be empty", timer.getDueTasks().isEmpty());
assertEquals(executor.getPendingTasks() + " should be empty", 0, executor.numPendingTasks());
if (channel != null) {
if (!panicExpected) {
assertFalse(channel.isInPanicMode());
}
channel.shutdownNow();
channel = null;
}
}
@After
public void cleanUp() {
LoadBalancerRegistry.getDefaultRegistry().deregister(mockLoadBalancerProvider);
}
@Test
public void createSubchannel_outsideSynchronizationContextShouldThrow() {
createChannel();
try {
helper.createSubchannel(CreateSubchannelArgs.newBuilder()
.setAddresses(addressGroup)
.build());
fail("Should throw");
} catch (IllegalStateException e) {
assertThat(e).hasMessageThat().isEqualTo("Not called from the SynchronizationContext");
}
}
@Test
public void createSubchannel_resolverOverrideAuthority() {
EquivalentAddressGroup addressGroup = new EquivalentAddressGroup(
socketAddress,
Attributes.newBuilder()
.set(ATTR_AUTHORITY_OVERRIDE, "resolver.override.authority")
.build());
channelBuilder.nameResolverFactory(
new FakeNameResolverFactory.Builder(expectedUri)
.setServers(Collections.singletonList(addressGroup))
.build());
createChannel();
Subchannel subchannel =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
requestConnectionSafely(helper, subchannel);
ArgumentCaptor<ClientTransportOptions> transportOptionCaptor =
ArgumentCaptor.forClass(ClientTransportOptions.class);
verify(mockTransportFactory)
.newClientTransport(
any(SocketAddress.class), transportOptionCaptor.capture(), any(ChannelLogger.class));
assertThat(transportOptionCaptor.getValue().getAuthority())
.isEqualTo("resolver.override.authority");
}
@Test
public void createSubchannel_channelBuilderOverrideAuthority() {
channelBuilder.overrideAuthority("channel-builder.override.authority");
EquivalentAddressGroup addressGroup = new EquivalentAddressGroup(
socketAddress,
Attributes.newBuilder()
.set(ATTR_AUTHORITY_OVERRIDE, "resolver.override.authority")
.build());
channelBuilder.nameResolverFactory(
new FakeNameResolverFactory.Builder(expectedUri)
.setServers(Collections.singletonList(addressGroup))
.build());
createChannel();
final Subchannel subchannel =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
requestConnectionSafely(helper, subchannel);
ArgumentCaptor<ClientTransportOptions> transportOptionCaptor =
ArgumentCaptor.forClass(ClientTransportOptions.class);
verify(mockTransportFactory)
.newClientTransport(
any(SocketAddress.class), transportOptionCaptor.capture(), any(ChannelLogger.class));
assertThat(transportOptionCaptor.getValue().getAuthority())
.isEqualTo("channel-builder.override.authority");
final List<EquivalentAddressGroup> subchannelEags = new ArrayList<>();
helper.getSynchronizationContext().execute(
new Runnable() {
@Override
public void run() {
subchannelEags.addAll(subchannel.getAllAddresses());
}
});
assertThat(subchannelEags).isEqualTo(ImmutableList.of(addressGroup));
}
@Test
public void idleModeDisabled() {
channelBuilder.nameResolverFactory(
new FakeNameResolverFactory.Builder(expectedUri)
.setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress)))
.build());
createChannel();
// In this test suite, the channel is always created with idle mode disabled.
// No task is scheduled to enter idle mode
assertEquals(0, timer.numPendingTasks());
assertEquals(0, executor.numPendingTasks());
}
@Test
public void immediateDeadlineExceeded() {
createChannel();
ClientCall<String, Integer> call =
channel.newCall(method, CallOptions.DEFAULT.withDeadlineAfter(0, TimeUnit.NANOSECONDS));
call.start(mockCallListener, new Metadata());
assertEquals(1, executor.runDueTasks());
verify(mockCallListener).onClose(statusCaptor.capture(), any(Metadata.class));
Status status = statusCaptor.getValue();
assertSame(Status.DEADLINE_EXCEEDED.getCode(), status.getCode());
}
@Test
public void startCallBeforeNameResolution() throws Exception {
FakeNameResolverFactory nameResolverFactory =
new FakeNameResolverFactory.Builder(expectedUri)
.setServers(ImmutableList.of(addressGroup)).build();
channelBuilder.nameResolverFactory(nameResolverFactory);
when(mockTransportFactory.getSupportedSocketAddressTypes()).thenReturn(Collections.singleton(
InetSocketAddress.class));
channel = new ManagedChannelImpl(
channelBuilder, mockTransportFactory, expectedUri, nameResolverFactory,
new FakeBackoffPolicyProvider(),
balancerRpcExecutorPool, timer.getStopwatchSupplier(),
Collections.<ClientInterceptor>emptyList(), timer.getTimeProvider());
Map<String, Object> rawServiceConfig =
parseConfig("{\"methodConfig\":[{"
+ "\"name\":[{\"service\":\"service\"}],"
+ "\"waitForReady\":true}]}");
ManagedChannelServiceConfig managedChannelServiceConfig =
createManagedChannelServiceConfig(rawServiceConfig, null);
nameResolverFactory.nextConfigOrError.set(
ConfigOrError.fromConfig(managedChannelServiceConfig));
Metadata headers = new Metadata();
ClientStream mockStream = mock(ClientStream.class);
ClientCall<String, Integer> call = channel.newCall(method, CallOptions.DEFAULT);
call.start(mockCallListener, headers);
ArgumentCaptor<Helper> helperCaptor = ArgumentCaptor.forClass(Helper.class);
verify(mockLoadBalancerProvider).newLoadBalancer(helperCaptor.capture());
helper = helperCaptor.getValue();
// Make the transport available
Subchannel subchannel =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
verify(mockTransportFactory, never())
.newClientTransport(
any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class));
requestConnectionSafely(helper, subchannel);
verify(mockTransportFactory)
.newClientTransport(
any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class));
MockClientTransportInfo transportInfo = transports.poll();
ConnectionClientTransport mockTransport = transportInfo.transport;
ManagedClientTransport.Listener transportListener = transportInfo.listener;
when(mockTransport.newStream(
same(method), same(headers), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any()))
.thenReturn(mockStream);
transportListener.transportReady();
when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class)))
.thenReturn(PickResult.withSubchannel(subchannel));
updateBalancingStateSafely(helper, READY, mockPicker);
executor.runDueTasks();
ArgumentCaptor<CallOptions> callOptionsCaptor = ArgumentCaptor.forClass(CallOptions.class);
verify(mockTransport).newStream(
same(method), same(headers), callOptionsCaptor.capture(),
ArgumentMatchers.<ClientStreamTracer[]>any());
assertThat(callOptionsCaptor.getValue().isWaitForReady()).isTrue();
verify(mockStream).start(streamListenerCaptor.capture());
// Clean up as much as possible to allow the channel to terminate.
shutdownSafely(helper, subchannel);
timer.forwardNanos(
TimeUnit.SECONDS.toNanos(ManagedChannelImpl.SUBCHANNEL_SHUTDOWN_DELAY_SECONDS));
}
@Test
public void newCallWithConfigSelector() {
FakeNameResolverFactory nameResolverFactory =
new FakeNameResolverFactory.Builder(expectedUri)
.setServers(ImmutableList.of(addressGroup)).build();
channelBuilder.nameResolverFactory(nameResolverFactory);
when(mockTransportFactory.getSupportedSocketAddressTypes()).thenReturn(Collections.singleton(
InetSocketAddress.class));
channel = new ManagedChannelImpl(
channelBuilder, mockTransportFactory, expectedUri, nameResolverFactory,
new FakeBackoffPolicyProvider(),
balancerRpcExecutorPool, timer.getStopwatchSupplier(),
Collections.<ClientInterceptor>emptyList(), timer.getTimeProvider());
nameResolverFactory.nextConfigOrError.set(
ConfigOrError.fromConfig(ManagedChannelServiceConfig.empty()));
final Metadata.Key<String> metadataKey =
Metadata.Key.of("test", Metadata.ASCII_STRING_MARSHALLER);
final CallOptions.Key<String> callOptionsKey = CallOptions.Key.create("test");
InternalConfigSelector configSelector = new InternalConfigSelector() {
@Override
public Result selectConfig(final PickSubchannelArgs args) {
return Result.newBuilder()
.setConfig(ManagedChannelServiceConfig.empty())
.setInterceptor(
// An interceptor that mutates CallOptions based on headers value.
new ClientInterceptor() {
String value = args.getHeaders().get(metadataKey);
@Override
public <ReqT, RespT> ClientCall<ReqT, RespT> interceptCall(
MethodDescriptor<ReqT, RespT> method, CallOptions callOptions, Channel next) {
callOptions = callOptions.withOption(callOptionsKey, value);
return next.newCall(method, callOptions);
}
})
.build();
}
};
nameResolverFactory.nextAttributes.set(
Attributes.newBuilder().set(InternalConfigSelector.KEY, configSelector).build());
channel.getState(true);
Metadata headers = new Metadata();
headers.put(metadataKey, "fooValue");
ClientStream mockStream = mock(ClientStream.class);
ClientCall<String, Integer> call = channel.newCall(method, CallOptions.DEFAULT);
call.start(mockCallListener, headers);
ArgumentCaptor<Helper> helperCaptor = ArgumentCaptor.forClass(Helper.class);
verify(mockLoadBalancerProvider).newLoadBalancer(helperCaptor.capture());
helper = helperCaptor.getValue();
// Make the transport available
Subchannel subchannel =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
requestConnectionSafely(helper, subchannel);
verify(mockTransportFactory)
.newClientTransport(
any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class));
MockClientTransportInfo transportInfo = transports.poll();
ConnectionClientTransport mockTransport = transportInfo.transport;
ManagedClientTransport.Listener transportListener = transportInfo.listener;
when(mockTransport.newStream(
same(method), same(headers), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any()))
.thenReturn(mockStream);
transportListener.transportReady();
when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class)))
.thenReturn(PickResult.withSubchannel(subchannel));
updateBalancingStateSafely(helper, READY, mockPicker);
executor.runDueTasks();
ArgumentCaptor<CallOptions> callOptionsCaptor = ArgumentCaptor.forClass(CallOptions.class);
verify(mockTransport).newStream(
same(method), same(headers), callOptionsCaptor.capture(),
ArgumentMatchers.<ClientStreamTracer[]>any());
assertThat(callOptionsCaptor.getValue().getOption(callOptionsKey)).isEqualTo("fooValue");
verify(mockStream).start(streamListenerCaptor.capture());
// Clean up as much as possible to allow the channel to terminate.
shutdownSafely(helper, subchannel);
timer.forwardNanos(
TimeUnit.SECONDS.toNanos(ManagedChannelImpl.SUBCHANNEL_SHUTDOWN_DELAY_SECONDS));
}
@Test
public void pickSubchannelAddOptionalLabel_callsTracer() {
channelBuilder.directExecutor();
createChannel();
updateBalancingStateSafely(helper, TRANSIENT_FAILURE, new SubchannelPicker() {
@Override
public PickResult pickSubchannel(PickSubchannelArgs args) {
args.getPickDetailsConsumer().addOptionalLabel("routed", "perfectly");
return PickResult.withError(Status.UNAVAILABLE.withDescription("expected"));
}
});
ClientStreamTracer tracer = mock(ClientStreamTracer.class);
ClientStreamTracer.Factory tracerFactory = new ClientStreamTracer.Factory() {
@Override
public ClientStreamTracer newClientStreamTracer(StreamInfo info, Metadata headers) {
return tracer;
}
};
ClientCall<String, Integer> call = channel.newCall(
method, CallOptions.DEFAULT.withStreamTracerFactory(tracerFactory));
call.start(mockCallListener, new Metadata());
verify(tracer).addOptionalLabel("routed", "perfectly");
}
@Test
public void metricRecorder_recordsToMetricSink() {
MetricSink mockSink = mock(MetricSink.class);
channelBuilder.addMetricSink(mockSink);
createChannel();
LongCounterMetricInstrument counter = metricInstrumentRegistry.registerLongCounter(
"recorder_duration", "Time taken by metric recorder", "s",
ImmutableList.of("grpc.method"), Collections.emptyList(), false);
List<String> requiredLabelValues = ImmutableList.of("testMethod");
List<String> optionalLabelValues = Collections.emptyList();
helper.getMetricRecorder()
.addLongCounter(counter, 32, requiredLabelValues, optionalLabelValues);
verify(mockSink).addLongCounter(eq(counter), eq(32L), eq(requiredLabelValues),
eq(optionalLabelValues));
}
@Test
public void metricRecorder_fromNameResolverArgs_recordsToMetricSink() {
MetricSink mockSink1 = mock(MetricSink.class);
MetricSink mockSink2 = mock(MetricSink.class);
channelBuilder.addMetricSink(mockSink1);
channelBuilder.addMetricSink(mockSink2);
createChannel();
LongCounterMetricInstrument counter = metricInstrumentRegistry.registerLongCounter(
"test_counter", "Time taken by metric recorder", "s",
ImmutableList.of("grpc.method"), Collections.emptyList(), false);
List<String> requiredLabelValues = ImmutableList.of("testMethod");
List<String> optionalLabelValues = Collections.emptyList();
NameResolver.Args args = helper.getNameResolverArgs();
assertThat(args.getMetricRecorder()).isNotNull();
args.getMetricRecorder()
.addLongCounter(counter, 10, requiredLabelValues, optionalLabelValues);
verify(mockSink1).addLongCounter(eq(counter), eq(10L), eq(requiredLabelValues),
eq(optionalLabelValues));
verify(mockSink2).addLongCounter(eq(counter), eq(10L), eq(requiredLabelValues),
eq(optionalLabelValues));
}
@Test
public void shutdownWithNoTransportsEverCreated() {
channelBuilder.nameResolverFactory(
new FakeNameResolverFactory.Builder(expectedUri)
.setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress)))
.build());
createChannel();
verify(executorPool).getObject();
verify(executorPool, never()).returnObject(any());
channel.shutdown();
assertTrue(channel.isShutdown());
assertTrue(channel.isTerminated());
verify(executorPool).returnObject(executor.getScheduledExecutorService());
}
@Test
public void shutdownNow_pendingCallShouldFail() {
channelBuilder.nameResolverFactory(
new FakeNameResolverFactory.Builder(expectedUri)
.setResolvedAtStart(false)
.setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress)))
.build());
createChannel();
ClientCall<String, Integer> call = channel.newCall(method, CallOptions.DEFAULT);
call.start(mockCallListener, new Metadata());
channel.shutdown();
executor.runDueTasks();
verify(mockCallListener, never()).onClose(any(Status.class), any(Metadata.class));
channel.shutdownNow();
executor.runDueTasks();
verify(mockCallListener).onClose(statusCaptor.capture(), any(Metadata.class));
assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.CANCELLED);
}
@Test
public void shutdownWithNoNameResolution_newCallShouldFail() {
channelBuilder.nameResolverFactory(
new FakeNameResolverFactory.Builder(expectedUri)
.setResolvedAtStart(false)
.setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress)))
.build());
createChannel();
channel.shutdown();
ClientCall<String, Integer> call = channel.newCall(method, CallOptions.DEFAULT);
call.start(mockCallListener, new Metadata());
executor.runDueTasks();
verify(mockCallListener).onClose(statusCaptor.capture(), any(Metadata.class));
assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.UNAVAILABLE);
}
@Test
public void channelzMembership() throws Exception {
createChannel();
assertNotNull(channelz.getRootChannel(channel.getLogId().getId()));
assertFalse(channelz.containsSubchannel(channel.getLogId()));
channel.shutdownNow();
channel.awaitTermination(5, TimeUnit.SECONDS);
assertNull(channelz.getRootChannel(channel.getLogId().getId()));
assertFalse(channelz.containsSubchannel(channel.getLogId()));
}
@Test
public void channelzMembership_subchannel() throws Exception {
createChannel();
assertNotNull(channelz.getRootChannel(channel.getLogId().getId()));
AbstractSubchannel subchannel =
(AbstractSubchannel) createSubchannelSafely(
helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
// subchannels are not root channels
assertNull(
channelz.getRootChannel(subchannel.getInstrumentedInternalSubchannel().getLogId().getId()));
assertTrue(
channelz.containsSubchannel(subchannel.getInstrumentedInternalSubchannel().getLogId()));
assertThat(getStats(channel).subchannels)
.containsExactly(subchannel.getInstrumentedInternalSubchannel());
requestConnectionSafely(helper, subchannel);
MockClientTransportInfo transportInfo = transports.poll();
assertNotNull(transportInfo);
assertTrue(channelz.containsClientSocket(transportInfo.transport.getLogId()));
transportInfo.listener.transportReady();
// terminate transport
transportInfo.listener.transportShutdown(Status.CANCELLED);
transportInfo.listener.transportTerminated();
assertFalse(channelz.containsClientSocket(transportInfo.transport.getLogId()));
// terminate subchannel
assertTrue(
channelz.containsSubchannel(subchannel.getInstrumentedInternalSubchannel().getLogId()));
shutdownSafely(helper, subchannel);
timer.forwardTime(ManagedChannelImpl.SUBCHANNEL_SHUTDOWN_DELAY_SECONDS, TimeUnit.SECONDS);
timer.runDueTasks();
assertFalse(
channelz.containsSubchannel(subchannel.getInstrumentedInternalSubchannel().getLogId()));
assertThat(getStats(channel).subchannels).isEmpty();
// channel still appears
assertNotNull(channelz.getRootChannel(channel.getLogId().getId()));
}
@Test
public void channelzMembership_oob() throws Exception {
createChannel();
OobChannel oob = (OobChannel) helper.createOobChannel(
Collections.singletonList(addressGroup), AUTHORITY);
// oob channels are not root channels
assertNull(channelz.getRootChannel(oob.getLogId().getId()));
assertTrue(channelz.containsSubchannel(oob.getLogId()));
assertThat(getStats(channel).subchannels).containsExactly(oob);
assertTrue(channelz.containsSubchannel(oob.getLogId()));
AbstractSubchannel subchannel = (AbstractSubchannel) oob.getSubchannel();
assertTrue(
channelz.containsSubchannel(subchannel.getInstrumentedInternalSubchannel().getLogId()));
assertThat(getStats(oob).subchannels)
.containsExactly(subchannel.getInstrumentedInternalSubchannel());
assertTrue(
channelz.containsSubchannel(subchannel.getInstrumentedInternalSubchannel().getLogId()));
oob.getSubchannel().requestConnection();
MockClientTransportInfo transportInfo = transports.poll();
assertNotNull(transportInfo);
assertTrue(channelz.containsClientSocket(transportInfo.transport.getLogId()));
// terminate transport
transportInfo.listener.transportShutdown(Status.INTERNAL);
transportInfo.listener.transportTerminated();
assertFalse(channelz.containsClientSocket(transportInfo.transport.getLogId()));
// terminate oobchannel
oob.shutdown();
assertFalse(channelz.containsSubchannel(oob.getLogId()));
assertThat(getStats(channel).subchannels).isEmpty();
assertFalse(
channelz.containsSubchannel(subchannel.getInstrumentedInternalSubchannel().getLogId()));
// channel still appears
assertNotNull(channelz.getRootChannel(channel.getLogId().getId()));
}
@Test
public void callsAndShutdown() {
subtestCallsAndShutdown(false, false);
}
@Test
public void callsAndShutdownNow() {
subtestCallsAndShutdown(true, false);
}
/** Make sure shutdownNow() after shutdown() has an effect. */
@Test
public void callsAndShutdownAndShutdownNow() {
subtestCallsAndShutdown(false, true);
}
private void subtestCallsAndShutdown(boolean shutdownNow, boolean shutdownNowAfterShutdown) {
FakeNameResolverFactory nameResolverFactory =
new FakeNameResolverFactory.Builder(expectedUri).build();
channelBuilder.nameResolverFactory(nameResolverFactory);
createChannel();
verify(executorPool).getObject();
ClientStream mockStream = mock(ClientStream.class);
ClientStream mockStream2 = mock(ClientStream.class);
Metadata headers = new Metadata();
Metadata headers2 = new Metadata();
// Configure the picker so that first RPC goes to delayed transport, and second RPC goes to
// real transport.
Subchannel subchannel =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
requestConnectionSafely(helper, subchannel);
verify(mockTransportFactory)
.newClientTransport(
any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class));
MockClientTransportInfo transportInfo = transports.poll();
ConnectionClientTransport mockTransport = transportInfo.transport;
verify(mockTransport).start(any(ManagedClientTransport.Listener.class));
ManagedClientTransport.Listener transportListener = transportInfo.listener;
when(mockTransport.newStream(
same(method), same(headers), same(CallOptions.DEFAULT),
ArgumentMatchers.<ClientStreamTracer[]>any()))
.thenReturn(mockStream);
when(mockTransport.newStream(
same(method), same(headers2), same(CallOptions.DEFAULT),
ArgumentMatchers.<ClientStreamTracer[]>any()))
.thenReturn(mockStream2);
transportListener.transportReady();
when(mockPicker.pickSubchannel(
eqPickSubchannelArgs(method, headers, CallOptions.DEFAULT))).thenReturn(
PickResult.withNoResult());
when(mockPicker.pickSubchannel(
eqPickSubchannelArgs(method, headers2, CallOptions.DEFAULT))).thenReturn(
PickResult.withSubchannel(subchannel));
updateBalancingStateSafely(helper, READY, mockPicker);
// First RPC, will be pending
ClientCall<String, Integer> call = channel.newCall(method, CallOptions.DEFAULT);
verify(mockTransportFactory)
.newClientTransport(
any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class));
call.start(mockCallListener, headers);
verify(mockTransport, never()).newStream(
same(method), same(headers), same(CallOptions.DEFAULT),
ArgumentMatchers.<ClientStreamTracer[]>any());
// Second RPC, will be assigned to the real transport
ClientCall<String, Integer> call2 = channel.newCall(method, CallOptions.DEFAULT);
call2.start(mockCallListener2, headers2);
verify(mockTransport).newStream(
same(method), same(headers2), same(CallOptions.DEFAULT),
ArgumentMatchers.<ClientStreamTracer[]>any());
verify(mockTransport).newStream(
same(method), same(headers2), same(CallOptions.DEFAULT),
ArgumentMatchers.<ClientStreamTracer[]>any());
verify(mockStream2).start(any(ClientStreamListener.class));
// Shutdown
if (shutdownNow) {
channel.shutdownNow();
} else {
channel.shutdown();
if (shutdownNowAfterShutdown) {
channel.shutdownNow();
shutdownNow = true;
}
}
assertTrue(channel.isShutdown());
assertFalse(channel.isTerminated());
assertThat(nameResolverFactory.resolvers).hasSize(1);
verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class));
// Further calls should fail without going to the transport
ClientCall<String, Integer> call3 = channel.newCall(method, CallOptions.DEFAULT);
call3.start(mockCallListener3, headers2);
timer.runDueTasks();
executor.runDueTasks();
verify(mockCallListener3).onClose(statusCaptor.capture(), any(Metadata.class));
assertSame(Status.Code.UNAVAILABLE, statusCaptor.getValue().getCode());
if (shutdownNow) {
// LoadBalancer and NameResolver are shut down as soon as delayed transport is terminated.
verify(mockLoadBalancer).shutdown();
assertTrue(nameResolverFactory.resolvers.get(0).shutdown);
// call should have been aborted by delayed transport
executor.runDueTasks();
verify(mockCallListener).onClose(same(ManagedChannelImpl.SHUTDOWN_NOW_STATUS),
any(Metadata.class));
} else {
// LoadBalancer and NameResolver are still running.
verify(mockLoadBalancer, never()).shutdown();
assertFalse(nameResolverFactory.resolvers.get(0).shutdown);
// call and call2 are still alive, and can still be assigned to a real transport
SubchannelPicker picker2 = mock(SubchannelPicker.class);
when(picker2.pickSubchannel(eqPickSubchannelArgs(method, headers, CallOptions.DEFAULT)))
.thenReturn(PickResult.withSubchannel(subchannel));
updateBalancingStateSafely(helper, READY, picker2);
executor.runDueTasks();
verify(mockTransport).newStream(
same(method), same(headers), same(CallOptions.DEFAULT),
ArgumentMatchers.<ClientStreamTracer[]>any());
verify(mockStream).start(any(ClientStreamListener.class));
}
// After call is moved out of delayed transport, LoadBalancer, NameResolver and the transports
// will be shutdown.
verify(mockLoadBalancer).shutdown();
assertTrue(nameResolverFactory.resolvers.get(0).shutdown);
if (shutdownNow) {
// Channel shutdownNow() all subchannels after shutting down LoadBalancer
verify(mockTransport).shutdownNow(ManagedChannelImpl.SHUTDOWN_NOW_STATUS);
} else {
verify(mockTransport, never()).shutdownNow(any(Status.class));
}
// LoadBalancer should shutdown the subchannel
shutdownSafely(helper, subchannel);
if (shutdownNow) {
verify(mockTransport).shutdown(same(ManagedChannelImpl.SHUTDOWN_NOW_STATUS));
} else {
verify(mockTransport).shutdown(same(ManagedChannelImpl.SHUTDOWN_STATUS));
}
// Killing the remaining real transport will terminate the channel
transportListener.transportShutdown(Status.UNAVAILABLE);
assertFalse(channel.isTerminated());
verify(executorPool, never()).returnObject(any());
transportListener.transportTerminated();
assertTrue(channel.isTerminated());
verify(executorPool).returnObject(executor.getScheduledExecutorService());
verifyNoMoreInteractions(balancerRpcExecutorPool);
verify(mockTransportFactory)
.newClientTransport(
any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class));
verify(mockTransportFactory).close();
verify(mockTransport, atLeast(0)).getLogId();
verifyNoMoreInteractions(mockTransport);
}
@Test
public void noMoreCallbackAfterLoadBalancerShutdown() {
FakeNameResolverFactory nameResolverFactory =
new FakeNameResolverFactory.Builder(expectedUri)
.setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress)))
.build();
channelBuilder.nameResolverFactory(nameResolverFactory);
Status resolutionError = Status.UNAVAILABLE.withDescription("Resolution failed");
createChannel();
FakeNameResolverFactory.FakeNameResolver resolver = nameResolverFactory.resolvers.get(0);
verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class));
verify(mockLoadBalancer).acceptResolvedAddresses(resolvedAddressCaptor.capture());
assertThat(resolvedAddressCaptor.getValue().getAddresses()).containsExactly(addressGroup);
SubchannelStateListener stateListener1 = mock(SubchannelStateListener.class);
SubchannelStateListener stateListener2 = mock(SubchannelStateListener.class);
Subchannel subchannel1 =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, stateListener1);
Subchannel subchannel2 =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, stateListener2);
requestConnectionSafely(helper, subchannel1);
requestConnectionSafely(helper, subchannel2);
verify(mockTransportFactory, times(2))
.newClientTransport(
any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class));
MockClientTransportInfo transportInfo1 = transports.poll();
MockClientTransportInfo transportInfo2 = transports.poll();
// LoadBalancer receives all sorts of callbacks
transportInfo1.listener.transportReady();
verify(stateListener1, times(2)).onSubchannelState(stateInfoCaptor.capture());
assertSame(CONNECTING, stateInfoCaptor.getAllValues().get(0).getState());
assertSame(READY, stateInfoCaptor.getAllValues().get(1).getState());
verify(stateListener2).onSubchannelState(stateInfoCaptor.capture());
assertSame(CONNECTING, stateInfoCaptor.getValue().getState());
resolver.listener.onError(resolutionError);
verify(mockLoadBalancer).handleNameResolutionError(resolutionError);
verifyNoMoreInteractions(mockLoadBalancer);
channel.shutdown();
verify(mockLoadBalancer).shutdown();
verifyNoMoreInteractions(stateListener1, stateListener2);
// LoadBalancer will normally shutdown all subchannels
shutdownSafely(helper, subchannel1);
shutdownSafely(helper, subchannel2);
// Since subchannels are shutdown, SubchannelStateListeners will only get SHUTDOWN regardless of
// the transport states.
transportInfo1.listener.transportShutdown(Status.UNAVAILABLE);
transportInfo2.listener.transportReady();
verify(stateListener1).onSubchannelState(ConnectivityStateInfo.forNonError(SHUTDOWN));
verify(stateListener2).onSubchannelState(ConnectivityStateInfo.forNonError(SHUTDOWN));
verifyNoMoreInteractions(stateListener1, stateListener2);
// No more callback should be delivered to LoadBalancer after it's shut down
resolver.listener.onError(resolutionError);
resolver.resolved();
verifyNoMoreInteractions(mockLoadBalancer);
}
@Test
public void noMoreCallbackAfterLoadBalancerShutdown_configError() throws InterruptedException {
FakeNameResolverFactory nameResolverFactory =
new FakeNameResolverFactory.Builder(expectedUri)
.setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress)))
.build();
channelBuilder.nameResolverFactory(nameResolverFactory);
Status resolutionError = Status.UNAVAILABLE.withDescription("Resolution failed");
createChannel();
FakeNameResolverFactory.FakeNameResolver resolver = nameResolverFactory.resolvers.get(0);
verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class));
verify(mockLoadBalancer).acceptResolvedAddresses(resolvedAddressCaptor.capture());
assertThat(resolvedAddressCaptor.getValue().getAddresses()).containsExactly(addressGroup);
SubchannelStateListener stateListener1 = mock(SubchannelStateListener.class);
SubchannelStateListener stateListener2 = mock(SubchannelStateListener.class);
Subchannel subchannel1 =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, stateListener1);
Subchannel subchannel2 =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, stateListener2);
requestConnectionSafely(helper, subchannel1);
requestConnectionSafely(helper, subchannel2);
verify(mockTransportFactory, times(2))
.newClientTransport(
any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class));
MockClientTransportInfo transportInfo1 = transports.poll();
MockClientTransportInfo transportInfo2 = transports.poll();
// LoadBalancer receives all sorts of callbacks
transportInfo1.listener.transportReady();
verify(stateListener1, times(2)).onSubchannelState(stateInfoCaptor.capture());
assertSame(CONNECTING, stateInfoCaptor.getAllValues().get(0).getState());
assertSame(READY, stateInfoCaptor.getAllValues().get(1).getState());
verify(stateListener2).onSubchannelState(stateInfoCaptor.capture());
assertSame(CONNECTING, stateInfoCaptor.getValue().getState());
channel.syncContext.execute(() ->
resolver.listener.onResult2(
ResolutionResult.newBuilder()
.setAddressesOrError(StatusOr.fromStatus(resolutionError)).build()));
verify(mockLoadBalancer).handleNameResolutionError(resolutionError);
verifyNoMoreInteractions(mockLoadBalancer);
channel.shutdown();
verify(mockLoadBalancer).shutdown();
verifyNoMoreInteractions(stateListener1, stateListener2);
// LoadBalancer will normally shutdown all subchannels
shutdownSafely(helper, subchannel1);
shutdownSafely(helper, subchannel2);
// Since subchannels are shutdown, SubchannelStateListeners will only get SHUTDOWN regardless of
// the transport states.
transportInfo1.listener.transportShutdown(Status.UNAVAILABLE);
transportInfo2.listener.transportReady();
verify(stateListener1).onSubchannelState(ConnectivityStateInfo.forNonError(SHUTDOWN));
verify(stateListener2).onSubchannelState(ConnectivityStateInfo.forNonError(SHUTDOWN));
verifyNoMoreInteractions(stateListener1, stateListener2);
// No more callback should be delivered to LoadBalancer after it's shut down
channel.syncContext.execute(() ->
resolver.listener.onResult2(
ResolutionResult.newBuilder()
.setAddressesOrError(StatusOr.fromStatus(resolutionError)).build()));
assertThat(timer.getPendingTasks()).isEmpty();
resolver.resolved();
verifyNoMoreInteractions(mockLoadBalancer);
}
@Test
public void addressResolutionError_noPriorNameResolution_usesDefaultServiceConfig()
throws Exception {
Map<String, Object> rawServiceConfig =
parseConfig("{\"methodConfig\":[{"
+ "\"name\":[{\"service\":\"service\"}],"
+ "\"waitForReady\":true}]}");
ManagedChannelServiceConfig managedChannelServiceConfig =
createManagedChannelServiceConfig(rawServiceConfig, null);
FakeNameResolverFactory nameResolverFactory =
new FakeNameResolverFactory.Builder(expectedUri)
.setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress)))
.setResolvedAtStart(false)
.build();
nameResolverFactory.nextConfigOrError.set(
ConfigOrError.fromConfig(managedChannelServiceConfig));
channelBuilder.nameResolverFactory(nameResolverFactory);
Map<String, Object> defaultServiceConfig =
parseConfig("{\"methodConfig\":[{"
+ "\"name\":[{\"service\":\"service\"}],"
+ "\"waitForReady\":true}]}");
channelBuilder.defaultServiceConfig(defaultServiceConfig);
Status resolutionError = Status.UNAVAILABLE.withDescription("Resolution failed");
channelBuilder.maxTraceEvents(10);
createChannel();
FakeNameResolverFactory.FakeNameResolver resolver = nameResolverFactory.resolvers.get(0);
resolver.listener.onError(resolutionError);
InternalConfigSelector configSelector = channel.getConfigSelector();
ManagedChannelServiceConfig config =
(ManagedChannelServiceConfig) configSelector.selectConfig(null).getConfig();
MethodInfo methodConfig = config.getMethodConfig(method);
assertThat(methodConfig.waitForReady).isTrue();
timer.forwardNanos(1234);
assertThat(getStats(channel).channelTrace.events).contains(new ChannelTrace.Event.Builder()
.setDescription("Initial Name Resolution error, using default service config")
.setSeverity(Severity.CT_ERROR)
.setTimestampNanos(0)
.build());
// Check that "lastServiceConfig" variable has been set above: a config resolution with the same
// config simply gets ignored and not gets reassigned.
resolver.resolved();
timer.forwardNanos(1234);
assertThat(Iterables.filter(
getStats(channel).channelTrace.events,
event -> event.description.equals("Service config changed")))
.isEmpty();
}
@Test
public void interceptor() throws Exception {
final AtomicLong atomic = new AtomicLong();
ClientInterceptor interceptor = new ClientInterceptor() {
@Override
public <RequestT, ResponseT> ClientCall<RequestT, ResponseT> interceptCall(
MethodDescriptor<RequestT, ResponseT> method, CallOptions callOptions,
Channel next) {
atomic.set(1);
return next.newCall(method, callOptions);
}
};
createChannel(interceptor);
assertNotNull(channel.newCall(method, CallOptions.DEFAULT));
assertEquals(1, atomic.get());
}
@Test
public void callOptionsExecutor() {
Metadata headers = new Metadata();
ClientStream mockStream = mock(ClientStream.class);
FakeClock callExecutor = new FakeClock();
createChannel();
// Start a call with a call executor
CallOptions options =
CallOptions.DEFAULT.withExecutor(callExecutor.getScheduledExecutorService());
ClientCall<String, Integer> call = channel.newCall(method, options);
call.start(mockCallListener, headers);
// Make the transport available
Subchannel subchannel =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
verify(mockTransportFactory, never())
.newClientTransport(
any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class));
requestConnectionSafely(helper, subchannel);
verify(mockTransportFactory)
.newClientTransport(
any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class));
MockClientTransportInfo transportInfo = transports.poll();
ConnectionClientTransport mockTransport = transportInfo.transport;
ManagedClientTransport.Listener transportListener = transportInfo.listener;
when(mockTransport.newStream(
same(method), same(headers), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any()))
.thenReturn(mockStream);
transportListener.transportReady();
when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class)))
.thenReturn(PickResult.withSubchannel(subchannel));
assertEquals(0, callExecutor.numPendingTasks());
updateBalancingStateSafely(helper, READY, mockPicker);
// Real streams are started in the call executor if they were previously buffered.
assertEquals(1, callExecutor.runDueTasks());
verify(mockTransport).newStream(
same(method), same(headers), same(options), ArgumentMatchers.<ClientStreamTracer[]>any());
verify(mockStream).start(streamListenerCaptor.capture());
// Call listener callbacks are also run in the call executor
ClientStreamListener streamListener = streamListenerCaptor.getValue();
Metadata trailers = new Metadata();
assertEquals(0, callExecutor.numPendingTasks());
streamListener.closed(Status.CANCELLED, PROCESSED, trailers);
verify(mockCallListener, never()).onClose(same(Status.CANCELLED), same(trailers));
assertEquals(1, callExecutor.runDueTasks());
verify(mockCallListener).onClose(same(Status.CANCELLED), same(trailers));
transportListener.transportShutdown(Status.UNAVAILABLE);
transportListener.transportTerminated();
// Clean up as much as possible to allow the channel to terminate.
shutdownSafely(helper, subchannel);
timer.forwardNanos(
TimeUnit.SECONDS.toNanos(ManagedChannelImpl.SUBCHANNEL_SHUTDOWN_DELAY_SECONDS));
}
@Test
public void loadBalancerThrowsInHandleResolvedAddresses() {
RuntimeException ex = new RuntimeException("simulated");
// Delay the success of name resolution until allResolved() is called
FakeNameResolverFactory nameResolverFactory =
new FakeNameResolverFactory.Builder(expectedUri)
.setResolvedAtStart(false)
.setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress)))
.build();
channelBuilder.nameResolverFactory(nameResolverFactory);
createChannel();
verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class));
doThrow(ex).when(mockLoadBalancer).acceptResolvedAddresses(any(ResolvedAddresses.class));
// NameResolver returns addresses.
nameResolverFactory.allResolved();
// Exception thrown from balancer is caught by ChannelExecutor, making channel enter panic mode.
verifyPanicMode(ex);
}
@Test
public void delayedNameResolution() throws Exception {
ClientStream mockStream = mock(ClientStream.class);
final ClientStreamTracer tracer = new ClientStreamTracer() {};
ClientStreamTracer.Factory factory = new ClientStreamTracer.Factory() {
@Override
public ClientStreamTracer newClientStreamTracer(StreamInfo info, Metadata headers) {
return tracer;
}
};
FakeNameResolverFactory nsFactory = new FakeNameResolverFactory.Builder(expectedUri)
.setResolvedAtStart(false).build();
channelBuilder.nameResolverFactory(nsFactory);
createChannel();
CallOptions callOptions = CallOptions.DEFAULT.withStreamTracerFactory(factory);
ClientCall<String, Integer> call = channel.newCall(method, callOptions);
call.start(mockCallListener, new Metadata());
Thread.sleep(500);
nsFactory.allResolved();
Subchannel subchannel =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
requestConnectionSafely(helper, subchannel);
MockClientTransportInfo transportInfo = transports.poll();
transportInfo.listener.transportReady();
ClientTransport mockTransport = transportInfo.transport;
when(mockTransport.newStream(
any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any()))
.thenReturn(mockStream);
when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))).thenReturn(
PickResult.withSubchannel(subchannel));
updateBalancingStateSafely(helper, READY, mockPicker);
assertEquals(3, executor.runDueTasks());
verify(mockPicker).pickSubchannel(any(PickSubchannelArgs.class));
verify(mockTransport).newStream(
same(method), any(Metadata.class), callOptionsCaptor.capture(),
tracersCaptor.capture());
assertThat(Arrays.asList(tracersCaptor.getValue()).contains(tracer)).isTrue();
Long realDelay = callOptionsCaptor.getValue().getOption(NAME_RESOLUTION_DELAYED);
assertThat(realDelay).isNotNull();
assertThat(realDelay).isAtLeast(
TimeUnit.MILLISECONDS.toNanos(400));//sleep not precise
}
@Test
public void nameResolvedAfterChannelShutdown() {
// Delay the success of name resolution until allResolved() is called.
FakeNameResolverFactory nameResolverFactory =
new FakeNameResolverFactory.Builder(expectedUri).setResolvedAtStart(false).build();
channelBuilder.nameResolverFactory(nameResolverFactory);
createChannel();
channel.shutdown();
assertTrue(channel.isShutdown());
assertTrue(channel.isTerminated());
verify(mockLoadBalancer).shutdown();
// Name resolved after the channel is shut down, which is possible if the name resolution takes
// time and is not cancellable. The resolved address will be dropped.
nameResolverFactory.allResolved();
verifyNoMoreInteractions(mockLoadBalancer);
}
/**
* Verify that if the first resolved address points to a server that cannot be connected, the call
* will end up with the second address which works.
*/
@Test
public void firstResolvedServerFailedToConnect() throws Exception {
final SocketAddress goodAddress = new SocketAddress() {
@Override public String toString() {
return "goodAddress";
}
};
final SocketAddress badAddress = new SocketAddress() {
@Override public String toString() {
return "badAddress";
}
};
InOrder inOrder = inOrder(mockLoadBalancer, subchannelStateListener);
List<SocketAddress> resolvedAddrs = Arrays.asList(badAddress, goodAddress);
FakeNameResolverFactory nameResolverFactory =
new FakeNameResolverFactory.Builder(expectedUri)
.setServers(Collections.singletonList(new EquivalentAddressGroup(resolvedAddrs)))
.build();
channelBuilder.nameResolverFactory(nameResolverFactory);
createChannel();
// Start the call
ClientCall<String, Integer> call = channel.newCall(method, CallOptions.DEFAULT);
Metadata headers = new Metadata();
call.start(mockCallListener, headers);
executor.runDueTasks();
// Simulate name resolution results
EquivalentAddressGroup addressGroup = new EquivalentAddressGroup(resolvedAddrs);
inOrder.verify(mockLoadBalancer).acceptResolvedAddresses(resolvedAddressCaptor.capture());
assertThat(resolvedAddressCaptor.getValue().getAddresses()).containsExactly(addressGroup);
Subchannel subchannel =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class)))
.thenReturn(PickResult.withSubchannel(subchannel));
requestConnectionSafely(helper, subchannel);
inOrder.verify(subchannelStateListener).onSubchannelState(stateInfoCaptor.capture());
assertEquals(CONNECTING, stateInfoCaptor.getValue().getState());
// The channel will starts with the first address (badAddress)
verify(mockTransportFactory)
.newClientTransport(
same(badAddress), any(ClientTransportOptions.class), any(ChannelLogger.class));
verify(mockTransportFactory, times(0))
.newClientTransport(
same(goodAddress), any(ClientTransportOptions.class), any(ChannelLogger.class));
MockClientTransportInfo badTransportInfo = transports.poll();
// Which failed to connect
badTransportInfo.listener.transportShutdown(Status.UNAVAILABLE);
inOrder.verifyNoMoreInteractions();
// The channel then try the second address (goodAddress)
verify(mockTransportFactory)
.newClientTransport(
same(goodAddress), any(ClientTransportOptions.class), any(ChannelLogger.class));
MockClientTransportInfo goodTransportInfo = transports.poll();
when(goodTransportInfo.transport.newStream(
any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any()))
.thenReturn(mock(ClientStream.class));
goodTransportInfo.listener.transportReady();
inOrder.verify(subchannelStateListener).onSubchannelState(stateInfoCaptor.capture());
assertEquals(READY, stateInfoCaptor.getValue().getState());
// A typical LoadBalancer will call this once the subchannel becomes READY
updateBalancingStateSafely(helper, READY, mockPicker);
// Delayed transport uses the app executor to create real streams.
executor.runDueTasks();
verify(goodTransportInfo.transport).newStream(
same(method), same(headers), same(CallOptions.DEFAULT),
ArgumentMatchers.<ClientStreamTracer[]>any());
// The bad transport was never used.
verify(badTransportInfo.transport, times(0)).newStream(
any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any());
}
@Test
public void failFastRpcFailFromErrorFromBalancer() {
subtestFailRpcFromBalancer(false, false, true);
}
@Test
public void failFastRpcFailFromDropFromBalancer() {
subtestFailRpcFromBalancer(false, true, true);
}
@Test
public void waitForReadyRpcImmuneFromErrorFromBalancer() {
subtestFailRpcFromBalancer(true, false, false);
}
@Test
public void waitForReadyRpcFailFromDropFromBalancer() {
subtestFailRpcFromBalancer(true, true, true);
}
private void subtestFailRpcFromBalancer(boolean waitForReady, boolean drop, boolean shouldFail) {
createChannel();
// This call will be buffered by the channel, thus involve delayed transport
CallOptions callOptions = CallOptions.DEFAULT;
if (waitForReady) {
callOptions = callOptions.withWaitForReady();
} else {
callOptions = callOptions.withoutWaitForReady();
}
ClientCall<String, Integer> call1 = channel.newCall(method, callOptions);
call1.start(mockCallListener, new Metadata());
SubchannelPicker picker = mock(SubchannelPicker.class);
Status status = Status.UNAVAILABLE.withDescription("for test");
when(picker.pickSubchannel(any(PickSubchannelArgs.class)))
.thenReturn(drop ? PickResult.withDrop(status) : PickResult.withError(status));
updateBalancingStateSafely(helper, READY, picker);
executor.runDueTasks();
if (shouldFail) {
verify(mockCallListener).onClose(same(status), any(Metadata.class));
} else {
verifyNoInteractions(mockCallListener);
}
// This call doesn't involve delayed transport
ClientCall<String, Integer> call2 = channel.newCall(method, callOptions);
call2.start(mockCallListener2, new Metadata());
executor.runDueTasks();
if (shouldFail) {
verify(mockCallListener2).onClose(same(status), any(Metadata.class));
} else {
verifyNoInteractions(mockCallListener2);
}
}
/**
* Verify that if all resolved addresses failed to connect, a fail-fast call will fail, while a
* wait-for-ready call will still be buffered.
*/
@Test
public void allServersFailedToConnect() throws Exception {
final SocketAddress addr1 = new SocketAddress() {
@Override public String toString() {
return "addr1";
}
};
final SocketAddress addr2 = new SocketAddress() {
@Override public String toString() {
return "addr2";
}
};
InOrder inOrder = inOrder(mockLoadBalancer, subchannelStateListener);
List<SocketAddress> resolvedAddrs = Arrays.asList(addr1, addr2);
FakeNameResolverFactory nameResolverFactory =
new FakeNameResolverFactory.Builder(expectedUri)
.setServers(Collections.singletonList(new EquivalentAddressGroup(resolvedAddrs)))
.build();
channelBuilder.nameResolverFactory(nameResolverFactory);
createChannel();
// Start a wait-for-ready call
ClientCall<String, Integer> call =
channel.newCall(method, CallOptions.DEFAULT.withWaitForReady());
Metadata headers = new Metadata();
call.start(mockCallListener, headers);
// ... and a fail-fast call
ClientCall<String, Integer> call2 =
channel.newCall(method, CallOptions.DEFAULT.withoutWaitForReady());
call2.start(mockCallListener2, headers);
executor.runDueTasks();
// Simulate name resolution results
EquivalentAddressGroup addressGroup = new EquivalentAddressGroup(resolvedAddrs);
inOrder.verify(mockLoadBalancer).acceptResolvedAddresses(resolvedAddressCaptor.capture());
assertThat(resolvedAddressCaptor.getValue().getAddresses()).containsExactly(addressGroup);
Subchannel subchannel =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class)))
.thenReturn(PickResult.withSubchannel(subchannel));
requestConnectionSafely(helper, subchannel);
inOrder.verify(subchannelStateListener).onSubchannelState(stateInfoCaptor.capture());
assertEquals(CONNECTING, stateInfoCaptor.getValue().getState());
// Connecting to server1, which will fail
verify(mockTransportFactory)
.newClientTransport(
same(addr1), any(ClientTransportOptions.class), any(ChannelLogger.class));
verify(mockTransportFactory, times(0))
.newClientTransport(
same(addr2), any(ClientTransportOptions.class), any(ChannelLogger.class));
MockClientTransportInfo transportInfo1 = transports.poll();
transportInfo1.listener.transportShutdown(Status.UNAVAILABLE);
// Connecting to server2, which will fail too
verify(mockTransportFactory)
.newClientTransport(
same(addr2), any(ClientTransportOptions.class), any(ChannelLogger.class));
MockClientTransportInfo transportInfo2 = transports.poll();
Status server2Error = Status.UNAVAILABLE.withDescription("Server2 failed to connect");
transportInfo2.listener.transportShutdown(server2Error);
// ... which makes the subchannel enter TRANSIENT_FAILURE. The last error Status is propagated
// to LoadBalancer.
inOrder.verify(subchannelStateListener).onSubchannelState(stateInfoCaptor.capture());
assertEquals(TRANSIENT_FAILURE, stateInfoCaptor.getValue().getState());
assertSame(server2Error, stateInfoCaptor.getValue().getStatus());
// A typical LoadBalancer would create a picker with error
SubchannelPicker picker2 = mock(SubchannelPicker.class);
when(picker2.pickSubchannel(any(PickSubchannelArgs.class)))
.thenReturn(PickResult.withError(server2Error));
updateBalancingStateSafely(helper, TRANSIENT_FAILURE, picker2);
executor.runDueTasks();
// ... which fails the fail-fast call
verify(mockCallListener2).onClose(same(server2Error), any(Metadata.class));
// ... while the wait-for-ready call stays
verifyNoMoreInteractions(mockCallListener);
// No real stream was ever created
verify(transportInfo1.transport, times(0)).newStream(
any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any());
verify(transportInfo2.transport, times(0)).newStream(
any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any());
}
@Test
public void subchannels() {
createChannel();
// createSubchannel() always return a new Subchannel
Attributes attrs1 = Attributes.newBuilder().set(SUBCHANNEL_ATTR_KEY, "attr1").build();
Attributes attrs2 = Attributes.newBuilder().set(SUBCHANNEL_ATTR_KEY, "attr2").build();
SubchannelStateListener listener1 = mock(SubchannelStateListener.class);
SubchannelStateListener listener2 = mock(SubchannelStateListener.class);
final Subchannel sub1 = createSubchannelSafely(helper, addressGroup, attrs1, listener1);
final Subchannel sub2 = createSubchannelSafely(helper, addressGroup, attrs2, listener2);
assertNotSame(sub1, sub2);
assertNotSame(attrs1, attrs2);
assertSame(attrs1, sub1.getAttributes());
assertSame(attrs2, sub2.getAttributes());
final AtomicBoolean snippetPassed = new AtomicBoolean(false);
helper.getSynchronizationContext().execute(new Runnable() {
@Override
public void run() {
// getAddresses() must be called from sync context
assertSame(addressGroup, sub1.getAddresses());
assertSame(addressGroup, sub2.getAddresses());
snippetPassed.set(true);
}
});
assertThat(snippetPassed.get()).isTrue();
// requestConnection()
verify(mockTransportFactory, never())
.newClientTransport(
any(SocketAddress.class),
any(ClientTransportOptions.class),
any(TransportLogger.class));
requestConnectionSafely(helper, sub1);
verify(mockTransportFactory)
.newClientTransport(
eq(socketAddress),
eq(clientTransportOptions),
isA(TransportLogger.class));
MockClientTransportInfo transportInfo1 = transports.poll();
assertNotNull(transportInfo1);
requestConnectionSafely(helper, sub2);
verify(mockTransportFactory, times(2))
.newClientTransport(
eq(socketAddress),
eq(clientTransportOptions),
isA(TransportLogger.class));
MockClientTransportInfo transportInfo2 = transports.poll();
assertNotNull(transportInfo2);
requestConnectionSafely(helper, sub1);
requestConnectionSafely(helper, sub2);
// The subchannel doesn't matter since this isn't called
verify(mockTransportFactory, times(2))
.newClientTransport(
eq(socketAddress), eq(clientTransportOptions), isA(TransportLogger.class));
// updateAddresses()
updateAddressesSafely(helper, sub1, Collections.singletonList(addressGroup2));
assertThat(((InternalSubchannel) sub1.getInternalSubchannel()).getAddressGroups())
.isEqualTo(Collections.singletonList(addressGroup2));
// shutdown() has a delay
shutdownSafely(helper, sub1);
timer.forwardTime(ManagedChannelImpl.SUBCHANNEL_SHUTDOWN_DELAY_SECONDS - 1, TimeUnit.SECONDS);
shutdownSafely(helper, sub1);
verify(transportInfo1.transport, never()).shutdown(any(Status.class));
timer.forwardTime(1, TimeUnit.SECONDS);
verify(transportInfo1.transport).shutdown(same(ManagedChannelImpl.SUBCHANNEL_SHUTDOWN_STATUS));
// ... but not after Channel is terminating
verify(mockLoadBalancer, never()).shutdown();
channel.shutdown();
verify(mockLoadBalancer).shutdown();
verify(transportInfo2.transport, never()).shutdown(any(Status.class));
shutdownSafely(helper, sub2);
verify(transportInfo2.transport).shutdown(same(ManagedChannelImpl.SHUTDOWN_STATUS));
// Cleanup
transportInfo1.listener.transportShutdown(Status.UNAVAILABLE);
transportInfo1.listener.transportTerminated();
transportInfo2.listener.transportShutdown(Status.UNAVAILABLE);
transportInfo2.listener.transportTerminated();
timer.forwardTime(ManagedChannelImpl.SUBCHANNEL_SHUTDOWN_DELAY_SECONDS, TimeUnit.SECONDS);
}
@Test
public void subchannelStringableBeforeStart() {
createChannel();
Subchannel subchannel = createUnstartedSubchannel(helper, addressGroup, Attributes.EMPTY);
assertThat(subchannel.toString()).isNotNull();
}
@Test
public void subchannelLoggerCreatedBeforeSubchannelStarted() {
createChannel();
Subchannel subchannel = createUnstartedSubchannel(helper, addressGroup, Attributes.EMPTY);
assertThat(subchannel.getChannelLogger()).isNotNull();
}
@Test
public void subchannelsWhenChannelShutdownNow() {
createChannel();
Subchannel sub1 =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
Subchannel sub2 =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
requestConnectionSafely(helper, sub1);
requestConnectionSafely(helper, sub2);
assertThat(transports).hasSize(2);
MockClientTransportInfo ti1 = transports.poll();
MockClientTransportInfo ti2 = transports.poll();
ti1.listener.transportReady();
ti2.listener.transportReady();
channel.shutdownNow();
verify(ti1.transport).shutdownNow(any(Status.class));
verify(ti2.transport).shutdownNow(any(Status.class));
ti1.listener.transportShutdown(Status.UNAVAILABLE.withDescription("shutdown now"));
ti2.listener.transportShutdown(Status.UNAVAILABLE.withDescription("shutdown now"));
ti1.listener.transportTerminated();
assertFalse(channel.isTerminated());
ti2.listener.transportTerminated();
assertTrue(channel.isTerminated());
}
@Test
public void subchannelsNoConnectionShutdown() {
createChannel();
Subchannel sub1 =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
Subchannel sub2 =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
channel.shutdown();
verify(mockLoadBalancer).shutdown();
shutdownSafely(helper, sub1);
assertFalse(channel.isTerminated());
shutdownSafely(helper, sub2);
assertTrue(channel.isTerminated());
verify(mockTransportFactory, never())
.newClientTransport(
any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class));
}
@Test
public void subchannelsRequestConnectionNoopAfterShutdown() {
createChannel();
Subchannel sub1 =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
shutdownSafely(helper, sub1);
requestConnectionSafely(helper, sub1);
verify(mockTransportFactory, never())
.newClientTransport(
any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class));
}
@Test
public void subchannelsNoConnectionShutdownNow() {
createChannel();
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
channel.shutdownNow();
verify(mockLoadBalancer).shutdown();
// Channel's shutdownNow() will call shutdownNow() on all subchannels and oobchannels.
// Therefore, channel is terminated without relying on LoadBalancer to shutdown subchannels.
assertTrue(channel.isTerminated());
verify(mockTransportFactory, never())
.newClientTransport(
any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class));
}
@Test
public void oobchannels() {
createChannel();
ManagedChannel oob1 = helper.createOobChannel(
Collections.singletonList(addressGroup), "oob1authority");
ManagedChannel oob2 = helper.createOobChannel(
Collections.singletonList(addressGroup), "oob2authority");
verify(balancerRpcExecutorPool, times(2)).getObject();
assertEquals("oob1authority", oob1.authority());
assertEquals("oob2authority", oob2.authority());
// OOB channels create connections lazily. A new call will initiate the connection.
Metadata headers = new Metadata();
ClientCall<String, Integer> call = oob1.newCall(method, CallOptions.DEFAULT);
call.start(mockCallListener, headers);
verify(mockTransportFactory)
.newClientTransport(
eq(socketAddress),
eq(new ClientTransportOptions().setAuthority("oob1authority").setUserAgent(USER_AGENT)),
isA(ChannelLogger.class));
MockClientTransportInfo transportInfo = transports.poll();
assertNotNull(transportInfo);
assertEquals(0, balancerRpcExecutor.numPendingTasks());
transportInfo.listener.transportReady();
assertEquals(1, balancerRpcExecutor.runDueTasks());
verify(transportInfo.transport).newStream(
same(method), same(headers), same(CallOptions.DEFAULT),
ArgumentMatchers.<ClientStreamTracer[]>any());
// The transport goes away
transportInfo.listener.transportShutdown(Status.UNAVAILABLE);
transportInfo.listener.transportTerminated();
// A new call will trigger a new transport
ClientCall<String, Integer> call2 = oob1.newCall(method, CallOptions.DEFAULT);
call2.start(mockCallListener2, headers);
ClientCall<String, Integer> call3 =
oob1.newCall(method, CallOptions.DEFAULT.withWaitForReady());
call3.start(mockCallListener3, headers);
verify(mockTransportFactory, times(2)).newClientTransport(
eq(socketAddress),
eq(new ClientTransportOptions().setAuthority("oob1authority").setUserAgent(USER_AGENT)),
isA(ChannelLogger.class));
transportInfo = transports.poll();
assertNotNull(transportInfo);
// This transport fails
Status transportError = Status.UNAVAILABLE.withDescription("Connection refused");
assertEquals(0, balancerRpcExecutor.numPendingTasks());
transportInfo.listener.transportShutdown(transportError);
assertTrue(balancerRpcExecutor.runDueTasks() > 0);
// Fail-fast RPC will fail, while wait-for-ready RPC will still be pending
verify(mockCallListener2).onClose(same(transportError), any(Metadata.class));
verify(mockCallListener3, never()).onClose(any(Status.class), any(Metadata.class));
// Shutdown
assertFalse(oob1.isShutdown());
assertFalse(oob2.isShutdown());
oob1.shutdown();
oob2.shutdownNow();
assertTrue(oob1.isShutdown());
assertTrue(oob2.isShutdown());
assertTrue(oob2.isTerminated());
verify(balancerRpcExecutorPool).returnObject(balancerRpcExecutor.getScheduledExecutorService());
// New RPCs will be rejected.
assertEquals(0, balancerRpcExecutor.numPendingTasks());
ClientCall<String, Integer> call4 = oob1.newCall(method, CallOptions.DEFAULT);
ClientCall<String, Integer> call5 = oob2.newCall(method, CallOptions.DEFAULT);
call4.start(mockCallListener4, headers);
call5.start(mockCallListener5, headers);
assertTrue(balancerRpcExecutor.runDueTasks() > 0);
verify(mockCallListener4).onClose(statusCaptor.capture(), any(Metadata.class));
Status status4 = statusCaptor.getValue();
assertEquals(Status.Code.UNAVAILABLE, status4.getCode());
verify(mockCallListener5).onClose(statusCaptor.capture(), any(Metadata.class));
Status status5 = statusCaptor.getValue();
assertEquals(Status.Code.UNAVAILABLE, status5.getCode());
// The pending RPC will still be pending
verify(mockCallListener3, never()).onClose(any(Status.class), any(Metadata.class));
// This will shutdownNow() the delayed transport, terminating the pending RPC
assertEquals(0, balancerRpcExecutor.numPendingTasks());
oob1.shutdownNow();
assertTrue(balancerRpcExecutor.runDueTasks() > 0);
verify(mockCallListener3).onClose(any(Status.class), any(Metadata.class));
// Shut down the channel, and it will not terminated because OOB channel has not.
channel.shutdown();
assertFalse(channel.isTerminated());
// Delayed transport has already terminated. Terminating the transport terminates the
// subchannel, which in turn terimates the OOB channel, which terminates the channel.
assertFalse(oob1.isTerminated());
verify(balancerRpcExecutorPool).returnObject(balancerRpcExecutor.getScheduledExecutorService());
transportInfo.listener.transportTerminated();
assertTrue(oob1.isTerminated());
assertTrue(channel.isTerminated());
verify(balancerRpcExecutorPool, times(2))
.returnObject(balancerRpcExecutor.getScheduledExecutorService());
}
@Test
public void oobChannelHasNoChannelCallCredentials() {
Metadata.Key<String> metadataKey =
Metadata.Key.of("token", Metadata.ASCII_STRING_MARSHALLER);
String channelCredValue = "channel-provided call cred";
channelBuilder = new ManagedChannelImplBuilder(
TARGET, InsecureChannelCredentials.create(),
new FakeCallCredentials(metadataKey, channelCredValue),
new UnsupportedClientTransportFactoryBuilder(), new FixedPortProvider(DEFAULT_PORT));
channelBuilder.disableRetry();
configureBuilder(channelBuilder);
createChannel();
// Verify that the normal channel has call creds, to validate configuration
Subchannel subchannel =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
requestConnectionSafely(helper, subchannel);
MockClientTransportInfo transportInfo = transports.poll();
assertNotNull(transportInfo);
transportInfo.listener.transportReady();
when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))).thenReturn(
PickResult.withSubchannel(subchannel));
updateBalancingStateSafely(helper, READY, mockPicker);
String callCredValue = "per-RPC call cred";
CallOptions callOptions = CallOptions.DEFAULT
.withCallCredentials(new FakeCallCredentials(metadataKey, callCredValue));
Metadata headers = new Metadata();
ClientCall<String, Integer> call = channel.newCall(method, callOptions);
call.start(mockCallListener, headers);
verify(transportInfo.transport).newStream(
same(method), same(headers), same(callOptions),
ArgumentMatchers.<ClientStreamTracer[]>any());
assertThat(headers.getAll(metadataKey))
.containsExactly(channelCredValue, callCredValue).inOrder();
// Verify that the oob channel does not
ManagedChannel oob = helper.createOobChannel(
Collections.singletonList(addressGroup), "oobauthority");
headers = new Metadata();
call = oob.newCall(method, callOptions);
call.start(mockCallListener2, headers);
transportInfo = transports.poll();
assertNotNull(transportInfo);
transportInfo.listener.transportReady();
balancerRpcExecutor.runDueTasks();
verify(transportInfo.transport).newStream(
same(method), same(headers), same(callOptions),
ArgumentMatchers.<ClientStreamTracer[]>any());
assertThat(headers.getAll(metadataKey)).containsExactly(callCredValue);
oob.shutdownNow();
// Verify that resolving oob channel does not
oob = helper.createResolvingOobChannelBuilder("oobauthority")
.nameResolverFactory(
new FakeNameResolverFactory.Builder(URI.create("fake:///oobauthority")).build())
.defaultLoadBalancingPolicy(MOCK_POLICY_NAME)
.idleTimeout(ManagedChannelImplBuilder.IDLE_MODE_MAX_TIMEOUT_DAYS, TimeUnit.DAYS)
.disableRetry() // irrelevant to what we test, disable retry to make verification easy
.build();
oob.getState(true);
ArgumentCaptor<Helper> helperCaptor = ArgumentCaptor.forClass(Helper.class);
verify(mockLoadBalancerProvider, times(2)).newLoadBalancer(helperCaptor.capture());
Helper oobHelper = helperCaptor.getValue();
subchannel =
createSubchannelSafely(oobHelper, addressGroup, Attributes.EMPTY, subchannelStateListener);
requestConnectionSafely(oobHelper, subchannel);
transportInfo = transports.poll();
assertNotNull(transportInfo);
transportInfo.listener.transportReady();
SubchannelPicker mockPicker2 = mock(SubchannelPicker.class);
when(mockPicker2.pickSubchannel(any(PickSubchannelArgs.class))).thenReturn(
PickResult.withSubchannel(subchannel));
updateBalancingStateSafely(oobHelper, READY, mockPicker2);
headers = new Metadata();
call = oob.newCall(method, callOptions);
call.start(mockCallListener2, headers);
// CallOptions may contain StreamTracerFactory for census that is added by default.
verify(transportInfo.transport).newStream(
same(method), same(headers), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any());
assertThat(headers.getAll(metadataKey)).containsExactly(callCredValue);
oob.shutdownNow();
}
@Test
public void oobChannelWithOobChannelCredsHasChannelCallCredentials() {
Metadata.Key<String> metadataKey =
Metadata.Key.of("token", Metadata.ASCII_STRING_MARSHALLER);
String channelCredValue = "channel-provided call cred";
when(mockTransportFactory.swapChannelCredentials(any(CompositeChannelCredentials.class)))
.thenAnswer(new Answer<SwapChannelCredentialsResult>() {
@Override
public SwapChannelCredentialsResult answer(InvocationOnMock invocation) {
CompositeChannelCredentials c =
invocation.getArgument(0, CompositeChannelCredentials.class);
return new SwapChannelCredentialsResult(mockTransportFactory, c.getCallCredentials());
}
});
channelBuilder = new ManagedChannelImplBuilder(
TARGET, InsecureChannelCredentials.create(),
new FakeCallCredentials(metadataKey, channelCredValue),
new UnsupportedClientTransportFactoryBuilder(), new FixedPortProvider(DEFAULT_PORT));
channelBuilder.disableRetry();
configureBuilder(channelBuilder);
createChannel();
// Verify that the normal channel has call creds, to validate configuration
Subchannel subchannel =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
requestConnectionSafely(helper, subchannel);
MockClientTransportInfo transportInfo = transports.poll();
transportInfo.listener.transportReady();
when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))).thenReturn(
PickResult.withSubchannel(subchannel));
updateBalancingStateSafely(helper, READY, mockPicker);
String callCredValue = "per-RPC call cred";
CallOptions callOptions = CallOptions.DEFAULT
.withCallCredentials(new FakeCallCredentials(metadataKey, callCredValue));
Metadata headers = new Metadata();
ClientCall<String, Integer> call = channel.newCall(method, callOptions);
call.start(mockCallListener, headers);
verify(transportInfo.transport).newStream(
same(method), same(headers), same(callOptions),
ArgumentMatchers.<ClientStreamTracer[]>any());
assertThat(headers.getAll(metadataKey))
.containsExactly(channelCredValue, callCredValue).inOrder();
// Verify that resolving oob channel with oob channel creds provides call creds
String oobChannelCredValue = "oob-channel-provided call cred";
ChannelCredentials oobChannelCreds = CompositeChannelCredentials.create(
InsecureChannelCredentials.create(),
new FakeCallCredentials(metadataKey, oobChannelCredValue));
ManagedChannel oob = helper.createResolvingOobChannelBuilder(
"fake://oobauthority/", oobChannelCreds)
.nameResolverFactory(
new FakeNameResolverFactory.Builder(URI.create("fake://oobauthority/")).build())
.defaultLoadBalancingPolicy(MOCK_POLICY_NAME)
.idleTimeout(ManagedChannelImplBuilder.IDLE_MODE_MAX_TIMEOUT_DAYS, TimeUnit.DAYS)
.disableRetry() // irrelevant to what we test, disable retry to make verification easy
.build();
oob.getState(true);
ArgumentCaptor<Helper> helperCaptor = ArgumentCaptor.forClass(Helper.class);
verify(mockLoadBalancerProvider, times(2)).newLoadBalancer(helperCaptor.capture());
Helper oobHelper = helperCaptor.getValue();
subchannel =
createSubchannelSafely(oobHelper, addressGroup, Attributes.EMPTY, subchannelStateListener);
requestConnectionSafely(oobHelper, subchannel);
transportInfo = transports.poll();
transportInfo.listener.transportReady();
SubchannelPicker mockPicker2 = mock(SubchannelPicker.class);
when(mockPicker2.pickSubchannel(any(PickSubchannelArgs.class))).thenReturn(
PickResult.withSubchannel(subchannel));
updateBalancingStateSafely(oobHelper, READY, mockPicker2);
headers = new Metadata();
call = oob.newCall(method, callOptions);
call.start(mockCallListener2, headers);
// CallOptions may contain StreamTracerFactory for census that is added by default.
verify(transportInfo.transport).newStream(
same(method), same(headers), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any());
assertThat(headers.getAll(metadataKey))
.containsExactly(oobChannelCredValue, callCredValue).inOrder();
oob.shutdownNow();
}
@Test
public void oobChannelsWhenChannelShutdownNow() {
createChannel();
ManagedChannel oob1 = helper.createOobChannel(
Collections.singletonList(addressGroup), "oob1Authority");
ManagedChannel oob2 = helper.createOobChannel(
Collections.singletonList(addressGroup), "oob2Authority");
oob1.newCall(method, CallOptions.DEFAULT).start(mockCallListener, new Metadata());
oob2.newCall(method, CallOptions.DEFAULT).start(mockCallListener2, new Metadata());
assertThat(transports).hasSize(2);
MockClientTransportInfo ti1 = transports.poll();
MockClientTransportInfo ti2 = transports.poll();
ti1.listener.transportReady();
ti2.listener.transportReady();
channel.shutdownNow();
verify(ti1.transport).shutdownNow(any(Status.class));
verify(ti2.transport).shutdownNow(any(Status.class));
ti1.listener.transportShutdown(Status.UNAVAILABLE.withDescription("shutdown now"));
ti2.listener.transportShutdown(Status.UNAVAILABLE.withDescription("shutdown now"));
ti1.listener.transportTerminated();
assertFalse(channel.isTerminated());
ti2.listener.transportTerminated();
assertTrue(channel.isTerminated());
}
@Test
public void oobChannelsNoConnectionShutdown() {
createChannel();
ManagedChannel oob1 = helper.createOobChannel(
Collections.singletonList(addressGroup), "oob1Authority");
ManagedChannel oob2 = helper.createOobChannel(
Collections.singletonList(addressGroup), "oob2Authority");
channel.shutdown();
verify(mockLoadBalancer).shutdown();
oob1.shutdown();
assertTrue(oob1.isTerminated());
assertFalse(channel.isTerminated());
oob2.shutdown();
assertTrue(oob2.isTerminated());
assertTrue(channel.isTerminated());
verify(mockTransportFactory, never())
.newClientTransport(
any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class));
}
@Test
public void oobChannelsNoConnectionShutdownNow() {
createChannel();
helper.createOobChannel(Collections.singletonList(addressGroup), "oob1Authority");
helper.createOobChannel(Collections.singletonList(addressGroup), "oob2Authority");
channel.shutdownNow();
verify(mockLoadBalancer).shutdown();
assertTrue(channel.isTerminated());
// Channel's shutdownNow() will call shutdownNow() on all subchannels and oobchannels.
// Therefore, channel is terminated without relying on LoadBalancer to shutdown oobchannels.
verify(mockTransportFactory, never())
.newClientTransport(
any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class));
}
@Test
public void subchannelChannel_normalUsage() {
createChannel();
Subchannel subchannel =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
verify(balancerRpcExecutorPool, never()).getObject();
Channel sChannel = subchannel.asChannel();
verify(balancerRpcExecutorPool).getObject();
Metadata headers = new Metadata();
CallOptions callOptions = CallOptions.DEFAULT.withDeadlineAfter(5, TimeUnit.SECONDS);
// Subchannel must be READY when creating the RPC.
requestConnectionSafely(helper, subchannel);
verify(mockTransportFactory)
.newClientTransport(
any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class));
MockClientTransportInfo transportInfo = transports.poll();
ConnectionClientTransport mockTransport = transportInfo.transport;
ManagedClientTransport.Listener transportListener = transportInfo.listener;
transportListener.transportReady();
ClientCall<String, Integer> call = sChannel.newCall(method, callOptions);
call.start(mockCallListener, headers);
verify(mockTransport).newStream(
same(method), same(headers), callOptionsCaptor.capture(),
ArgumentMatchers.<ClientStreamTracer[]>any());
CallOptions capturedCallOption = callOptionsCaptor.getValue();
assertThat(capturedCallOption.getDeadline()).isSameInstanceAs(callOptions.getDeadline());
assertThat(capturedCallOption.getOption(GrpcUtil.CALL_OPTIONS_RPC_OWNED_BY_BALANCER)).isTrue();
}
@Test
public void subchannelChannel_failWhenNotReady() {
createChannel();
Subchannel subchannel =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
Channel sChannel = subchannel.asChannel();
Metadata headers = new Metadata();
requestConnectionSafely(helper, subchannel);
verify(mockTransportFactory)
.newClientTransport(
any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class));
MockClientTransportInfo transportInfo = transports.poll();
ConnectionClientTransport mockTransport = transportInfo.transport;
assertEquals(0, balancerRpcExecutor.numPendingTasks());
// Subchannel is still CONNECTING, but not READY yet
ClientCall<String, Integer> call = sChannel.newCall(method, CallOptions.DEFAULT);
call.start(mockCallListener, headers);
verify(mockTransport, never()).newStream(
any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any());
verifyNoInteractions(mockCallListener);
assertEquals(1, balancerRpcExecutor.runDueTasks());
verify(mockCallListener).onClose(
same(SubchannelChannel.NOT_READY_ERROR), any(Metadata.class));
}
@Test
public void subchannelChannel_failWaitForReady() {
createChannel();
Subchannel subchannel =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
Channel sChannel = subchannel.asChannel();
Metadata headers = new Metadata();
// Subchannel must be READY when creating the RPC.
requestConnectionSafely(helper, subchannel);
verify(mockTransportFactory)
.newClientTransport(
any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class));
MockClientTransportInfo transportInfo = transports.poll();
ConnectionClientTransport mockTransport = transportInfo.transport;
ManagedClientTransport.Listener transportListener = transportInfo.listener;
transportListener.transportReady();
assertEquals(0, balancerRpcExecutor.numPendingTasks());
// Wait-for-ready RPC is not allowed
ClientCall<String, Integer> call =
sChannel.newCall(method, CallOptions.DEFAULT.withWaitForReady());
call.start(mockCallListener, headers);
verify(mockTransport, never()).newStream(
any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any());
verifyNoInteractions(mockCallListener);
assertEquals(1, balancerRpcExecutor.runDueTasks());
verify(mockCallListener).onClose(
same(SubchannelChannel.WAIT_FOR_READY_ERROR), any(Metadata.class));
}
@Test
public void lbHelper_getScheduledExecutorService() {
createChannel();
ScheduledExecutorService ses = helper.getScheduledExecutorService();
Runnable task = mock(Runnable.class);
helper.getSynchronizationContext().schedule(task, 110, TimeUnit.NANOSECONDS, ses);
timer.forwardNanos(109);
verify(task, never()).run();
timer.forwardNanos(1);
verify(task).run();
try {
ses.shutdown();
fail("Should throw");
} catch (UnsupportedOperationException e) {
// expected
}
try {
ses.shutdownNow();
fail("Should throw");
} catch (UnsupportedOperationException e) {
// expected
}
}
@Test
public void lbHelper_getNameResolverArgs() {
createChannel();
NameResolver.Args args = helper.getNameResolverArgs();
assertThat(args.getDefaultPort()).isEqualTo(DEFAULT_PORT);
assertThat(args.getProxyDetector()).isSameInstanceAs(GrpcUtil.DEFAULT_PROXY_DETECTOR);
assertThat(args.getSynchronizationContext())
.isSameInstanceAs(helper.getSynchronizationContext());
assertThat(args.getServiceConfigParser()).isNotNull();
assertThat(args.getMetricRecorder()).isNotNull();
}
@Test
public void lbHelper_getNonDefaultNameResolverRegistry() {
createChannel();
assertThat(helper.getNameResolverRegistry())
.isNotSameInstanceAs(NameResolverRegistry.getDefaultRegistry());
}
@Test
public void refreshNameResolution_whenOobChannelConnectionFailed_notIdle() {
subtestNameResolutionRefreshWhenConnectionFailed(false);
}
@Test
public void notRefreshNameResolution_whenOobChannelConnectionFailed_idle() {
subtestNameResolutionRefreshWhenConnectionFailed(true);
}
private void subtestNameResolutionRefreshWhenConnectionFailed(boolean isIdle) {
FakeNameResolverFactory nameResolverFactory =
new FakeNameResolverFactory.Builder(expectedUri)
.setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress)))
.build();
channelBuilder.nameResolverFactory(nameResolverFactory);
createChannel();
OobChannel oobChannel = (OobChannel) helper.createOobChannel(
Collections.singletonList(addressGroup), "oobAuthority");
oobChannel.getSubchannel().requestConnection();
MockClientTransportInfo transportInfo = transports.poll();
assertNotNull(transportInfo);
FakeNameResolverFactory.FakeNameResolver resolver = nameResolverFactory.resolvers.remove(0);
if (isIdle) {
channel.enterIdle();
// Entering idle mode will result in a new resolver
resolver = nameResolverFactory.resolvers.remove(0);
}
assertEquals(0, nameResolverFactory.resolvers.size());
int expectedRefreshCount = 0;
// Transport closed when connecting
assertEquals(expectedRefreshCount, resolver.refreshCalled);
transportInfo.listener.transportShutdown(Status.UNAVAILABLE);
// When channel enters idle, new resolver is created but not started.
if (!isIdle) {
expectedRefreshCount++;
}
assertEquals(expectedRefreshCount, resolver.refreshCalled);
timer.forwardNanos(RECONNECT_BACKOFF_INTERVAL_NANOS);
transportInfo = transports.poll();
assertNotNull(transportInfo);
transportInfo.listener.transportReady();
// Transport closed when ready
assertEquals(expectedRefreshCount, resolver.refreshCalled);
transportInfo.listener.transportShutdown(Status.UNAVAILABLE);
// When channel enters idle, new resolver is created but not started.
if (!isIdle) {
expectedRefreshCount++;
}
assertEquals(expectedRefreshCount, resolver.refreshCalled);
}
/**
* Test that information such as the Call's context, MethodDescriptor, authority, executor are
* propagated to newStream() and applyRequestMetadata().
*/
@Test
public void informationPropagatedToNewStreamAndCallCredentials() {
createChannel();
CallOptions callOptions = CallOptions.DEFAULT.withCallCredentials(creds);
final Context.Key<String> testKey = Context.key("testing");
Context ctx = Context.current().withValue(testKey, "testValue");
final LinkedList<Context> credsApplyContexts = new LinkedList<>();
final LinkedList<Context> newStreamContexts = new LinkedList<>();
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock in) throws Throwable {
credsApplyContexts.add(Context.current());
return null;
}
}).when(creds).applyRequestMetadata(
any(RequestInfo.class), any(Executor.class), any(CallCredentials.MetadataApplier.class));
// First call will be on delayed transport. Only newCall() is run within the expected context,
// so that we can verify that the context is explicitly attached before calling newStream() and
// applyRequestMetadata(), which happens after we detach the context from the thread.
Context origCtx = ctx.attach();
assertEquals("testValue", testKey.get());
ClientCall<String, Integer> call = channel.newCall(method, callOptions);
ctx.detach(origCtx);
assertNull(testKey.get());
call.start(mockCallListener, new Metadata());
// Simulate name resolution results
EquivalentAddressGroup addressGroup = new EquivalentAddressGroup(socketAddress);
Subchannel subchannel =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
requestConnectionSafely(helper, subchannel);
verify(mockTransportFactory)
.newClientTransport(
same(socketAddress), eq(clientTransportOptions), any(ChannelLogger.class));
MockClientTransportInfo transportInfo = transports.poll();
final ConnectionClientTransport transport = transportInfo.transport;
when(transport.getAttributes()).thenReturn(Attributes.EMPTY);
doAnswer(new Answer<ClientStream>() {
@Override
public ClientStream answer(InvocationOnMock in) throws Throwable {
newStreamContexts.add(Context.current());
return mock(ClientStream.class);
}
}).when(transport).newStream(
any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any());
verify(creds, never()).applyRequestMetadata(
any(RequestInfo.class), any(Executor.class), any(CallCredentials.MetadataApplier.class));
// applyRequestMetadata() is called after the transport becomes ready.
transportInfo.listener.transportReady();
when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class)))
.thenReturn(PickResult.withSubchannel(subchannel));
updateBalancingStateSafely(helper, READY, mockPicker);
executor.runDueTasks();
ArgumentCaptor<RequestInfo> infoCaptor = ArgumentCaptor.forClass(RequestInfo.class);
ArgumentCaptor<Executor> executorArgumentCaptor = ArgumentCaptor.forClass(Executor.class);
ArgumentCaptor<CallCredentials.MetadataApplier> applierCaptor =
ArgumentCaptor.forClass(CallCredentials.MetadataApplier.class);
verify(creds).applyRequestMetadata(infoCaptor.capture(),
executorArgumentCaptor.capture(), applierCaptor.capture());
assertSame(offloadExecutor,
((ManagedChannelImpl.ExecutorHolder) executorArgumentCaptor.getValue()).getExecutor());
assertEquals("testValue", testKey.get(credsApplyContexts.poll()));
assertEquals(AUTHORITY, infoCaptor.getValue().getAuthority());
assertEquals(SecurityLevel.NONE, infoCaptor.getValue().getSecurityLevel());
verify(transport, never()).newStream(
any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any());
// newStream() is called after apply() is called
applierCaptor.getValue().apply(new Metadata());
verify(transport).newStream(
same(method), any(Metadata.class), same(callOptions),
ArgumentMatchers.<ClientStreamTracer[]>any());
assertEquals("testValue", testKey.get(newStreamContexts.poll()));
// The context should not live beyond the scope of newStream() and applyRequestMetadata()
assertNull(testKey.get());
// Second call will not be on delayed transport
origCtx = ctx.attach();
call = channel.newCall(method, callOptions);
ctx.detach(origCtx);
call.start(mockCallListener, new Metadata());
verify(creds, times(2)).applyRequestMetadata(infoCaptor.capture(),
executorArgumentCaptor.capture(), applierCaptor.capture());
assertSame(offloadExecutor,
((ManagedChannelImpl.ExecutorHolder) executorArgumentCaptor.getValue()).getExecutor());
assertEquals("testValue", testKey.get(credsApplyContexts.poll()));
assertEquals(AUTHORITY, infoCaptor.getValue().getAuthority());
assertEquals(SecurityLevel.NONE, infoCaptor.getValue().getSecurityLevel());
// This is from the first call
verify(transport).newStream(
any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any());
// Still, newStream() is called after apply() is called
applierCaptor.getValue().apply(new Metadata());
verify(transport, times(2)).newStream(
same(method), any(Metadata.class), same(callOptions),
ArgumentMatchers.<ClientStreamTracer[]>any());
assertEquals("testValue", testKey.get(newStreamContexts.poll()));
assertNull(testKey.get());
}
@Test
public void pickerReturnsStreamTracer_noDelay() {
ClientStream mockStream = mock(ClientStream.class);
final ClientStreamTracer tracer1 = new ClientStreamTracer() {};
final ClientStreamTracer tracer2 = new ClientStreamTracer() {};
ClientStreamTracer.Factory factory1 = new ClientStreamTracer.Factory() {
@Override
public ClientStreamTracer newClientStreamTracer(StreamInfo info, Metadata headers) {
return tracer1;
}
};
ClientStreamTracer.Factory factory2 = new ClientStreamTracer.Factory() {
@Override
public ClientStreamTracer newClientStreamTracer(StreamInfo info, Metadata headers) {
return tracer2;
}
};
createChannel();
Subchannel subchannel =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
requestConnectionSafely(helper, subchannel);
MockClientTransportInfo transportInfo = transports.poll();
transportInfo.listener.transportReady();
ClientTransport mockTransport = transportInfo.transport;
when(mockTransport.newStream(
any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any()))
.thenReturn(mockStream);
when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))).thenReturn(
PickResult.withSubchannel(subchannel, factory2));
updateBalancingStateSafely(helper, READY, mockPicker);
CallOptions callOptions = CallOptions.DEFAULT.withStreamTracerFactory(factory1);
ClientCall<String, Integer> call = channel.newCall(method, callOptions);
call.start(mockCallListener, new Metadata());
verify(mockPicker).pickSubchannel(any(PickSubchannelArgs.class));
verify(mockTransport).newStream(
same(method), any(Metadata.class), callOptionsCaptor.capture(),
tracersCaptor.capture());
assertThat(tracersCaptor.getValue()).isEqualTo(new ClientStreamTracer[] {tracer1, tracer2});
}
@Test
public void pickerReturnsStreamTracer_delayed() {
ClientStream mockStream = mock(ClientStream.class);
final ClientStreamTracer tracer1 = new ClientStreamTracer() {};
final ClientStreamTracer tracer2 = new ClientStreamTracer() {};
ClientStreamTracer.Factory factory1 = new ClientStreamTracer.Factory() {
@Override
public ClientStreamTracer newClientStreamTracer(StreamInfo info, Metadata headers) {
return tracer1;
}
};
ClientStreamTracer.Factory factory2 = new ClientStreamTracer.Factory() {
@Override
public ClientStreamTracer newClientStreamTracer(StreamInfo info, Metadata headers) {
return tracer2;
}
};
createChannel();
CallOptions callOptions = CallOptions.DEFAULT.withStreamTracerFactory(factory1);
ClientCall<String, Integer> call = channel.newCall(method, callOptions);
call.start(mockCallListener, new Metadata());
Subchannel subchannel =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
requestConnectionSafely(helper, subchannel);
MockClientTransportInfo transportInfo = transports.poll();
transportInfo.listener.transportReady();
ClientTransport mockTransport = transportInfo.transport;
when(mockTransport.newStream(
any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any()))
.thenReturn(mockStream);
when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))).thenReturn(
PickResult.withSubchannel(subchannel, factory2));
updateBalancingStateSafely(helper, READY, mockPicker);
assertEquals(1, executor.runDueTasks());
verify(mockPicker).pickSubchannel(any(PickSubchannelArgs.class));
verify(mockTransport).newStream(
same(method), any(Metadata.class), callOptionsCaptor.capture(),
tracersCaptor.capture());
assertThat(tracersCaptor.getValue()).isEqualTo(new ClientStreamTracer[] {tracer1, tracer2});
}
@Test
public void getState_loadBalancerSupportsChannelState() {
channelBuilder.nameResolverFactory(
new FakeNameResolverFactory.Builder(expectedUri).setResolvedAtStart(false).build());
createChannel();
assertEquals(CONNECTING, channel.getState(false));
updateBalancingStateSafely(helper, TRANSIENT_FAILURE, mockPicker);
assertEquals(TRANSIENT_FAILURE, channel.getState(false));
}
@Test
public void getState_withRequestConnect() {
channelBuilder.nameResolverFactory(
new FakeNameResolverFactory.Builder(expectedUri).setResolvedAtStart(false).build());
requestConnection = false;
createChannel();
assertEquals(IDLE, channel.getState(false));
verify(mockLoadBalancerProvider, never()).newLoadBalancer(any(Helper.class));
// call getState() with requestConnection = true
assertEquals(IDLE, channel.getState(true));
ArgumentCaptor<Helper> helperCaptor = ArgumentCaptor.forClass(Helper.class);
verify(mockLoadBalancerProvider).newLoadBalancer(helperCaptor.capture());
helper = helperCaptor.getValue();
updateBalancingStateSafely(helper, CONNECTING, mockPicker);
assertEquals(CONNECTING, channel.getState(false));
assertEquals(CONNECTING, channel.getState(true));
verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class));
}
@SuppressWarnings("deprecation")
@Test
public void getState_withRequestConnect_IdleWithLbRunning() {
channelBuilder.nameResolverFactory(
new FakeNameResolverFactory.Builder(expectedUri).setResolvedAtStart(false).build());
createChannel();
verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class));
updateBalancingStateSafely(helper, IDLE, mockPicker);
assertEquals(IDLE, channel.getState(true));
verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class));
verify(mockLoadBalancer).requestConnection();
}
@Test
public void notifyWhenStateChanged() {
final AtomicBoolean stateChanged = new AtomicBoolean();
Runnable onStateChanged = new Runnable() {
@Override
public void run() {
stateChanged.set(true);
}
};
channelBuilder.nameResolverFactory(
new FakeNameResolverFactory.Builder(expectedUri).setResolvedAtStart(false).build());
createChannel();
assertEquals(CONNECTING, channel.getState(false));
channel.notifyWhenStateChanged(CONNECTING, onStateChanged);
executor.runDueTasks();
assertFalse(stateChanged.get());
// state change from CONNECTING to IDLE
updateBalancingStateSafely(helper, IDLE, mockPicker);
// onStateChanged callback should run
executor.runDueTasks();
assertTrue(stateChanged.get());
// clear and test form IDLE
stateChanged.set(false);
channel.notifyWhenStateChanged(CONNECTING, onStateChanged);
// onStateChanged callback should run immediately
executor.runDueTasks();
assertTrue(stateChanged.get());
}
@Test
public void channelStateWhenChannelShutdown() {
final AtomicBoolean stateChanged = new AtomicBoolean();
Runnable onStateChanged = new Runnable() {
@Override
public void run() {
stateChanged.set(true);
}
};
channelBuilder.nameResolverFactory(
new FakeNameResolverFactory.Builder(expectedUri).setResolvedAtStart(false).build());
createChannel();
assertEquals(CONNECTING, channel.getState(false));
channel.notifyWhenStateChanged(CONNECTING, onStateChanged);
executor.runDueTasks();
assertFalse(stateChanged.get());
channel.shutdown();
assertEquals(SHUTDOWN, channel.getState(false));
executor.runDueTasks();
assertTrue(stateChanged.get());
stateChanged.set(false);
channel.notifyWhenStateChanged(SHUTDOWN, onStateChanged);
updateBalancingStateSafely(helper, CONNECTING, mockPicker);
assertEquals(SHUTDOWN, channel.getState(false));
executor.runDueTasks();
assertFalse(stateChanged.get());
}
@Test
public void stateIsIdleOnIdleTimeout() {
long idleTimeoutMillis = 2000L;
channelBuilder.idleTimeout(idleTimeoutMillis, TimeUnit.MILLISECONDS);
createChannel();
assertEquals(CONNECTING, channel.getState(false));
timer.forwardNanos(TimeUnit.MILLISECONDS.toNanos(idleTimeoutMillis));
assertEquals(IDLE, channel.getState(false));
}
@Test
public void panic_whenIdle() {
subtestPanic(IDLE);
}
@Test
public void panic_whenConnecting() {
subtestPanic(CONNECTING);
}
@Test
public void panic_whenTransientFailure() {
subtestPanic(TRANSIENT_FAILURE);
}
@Test
public void panic_whenReady() {
subtestPanic(READY);
}
private void subtestPanic(ConnectivityState initialState) {
assertNotEquals("We don't test panic mode if it's already SHUTDOWN", SHUTDOWN, initialState);
long idleTimeoutMillis = 2000L;
FakeNameResolverFactory nameResolverFactory =
new FakeNameResolverFactory.Builder(expectedUri).build();
channelBuilder.nameResolverFactory(nameResolverFactory);
channelBuilder.idleTimeout(idleTimeoutMillis, TimeUnit.MILLISECONDS);
createChannel();
verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class));
assertThat(nameResolverFactory.resolvers).hasSize(1);
FakeNameResolverFactory.FakeNameResolver resolver = nameResolverFactory.resolvers.remove(0);
final Throwable panicReason = new Exception("Simulated uncaught exception");
if (initialState == IDLE) {
timer.forwardNanos(TimeUnit.MILLISECONDS.toNanos(idleTimeoutMillis));
} else {
updateBalancingStateSafely(helper, initialState, mockPicker);
}
assertEquals(initialState, channel.getState(false));
if (initialState == IDLE) {
// IDLE mode will shutdown resolver and balancer
verify(mockLoadBalancer).shutdown();
assertTrue(resolver.shutdown);
// A new resolver is created
assertThat(nameResolverFactory.resolvers).hasSize(1);
resolver = nameResolverFactory.resolvers.remove(0);
assertFalse(resolver.shutdown);
} else {
verify(mockLoadBalancer, never()).shutdown();
assertFalse(resolver.shutdown);
}
// Make channel panic!
channel.syncContext.execute(
new Runnable() {
@Override
public void run() {
channel.panic(panicReason);
}
});
// Calls buffered in delayedTransport will fail
// Resolver and balancer are shutdown
verify(mockLoadBalancer).shutdown();
assertTrue(resolver.shutdown);
// Channel will stay in TRANSIENT_FAILURE. getState(true) will not revive it.
assertEquals(TRANSIENT_FAILURE, channel.getState(true));
assertEquals(TRANSIENT_FAILURE, channel.getState(true));
verifyPanicMode(panicReason);
// Besides the resolver created initially, no new resolver or balancer are created.
verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class));
assertThat(nameResolverFactory.resolvers).isEmpty();
// A misbehaving balancer that calls updateBalancingState() after it's shut down will not be
// able to revive it.
updateBalancingStateSafely(helper, READY, mockPicker);
verifyPanicMode(panicReason);
// Cannot be revived by exitIdleMode()
channel.syncContext.execute(new Runnable() {
@Override
public void run() {
channel.exitIdleMode();
}
});
verifyPanicMode(panicReason);
// Can still shutdown normally
channel.shutdown();
assertTrue(channel.isShutdown());
assertTrue(channel.isTerminated());
assertEquals(SHUTDOWN, channel.getState(false));
// We didn't stub mockPicker, because it should have never been called in this test.
verifyNoInteractions(mockPicker);
}
@Test
public void panic_bufferedCallsWillFail() {
createChannel();
when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class)))
.thenReturn(PickResult.withNoResult());
updateBalancingStateSafely(helper, CONNECTING, mockPicker);
// Start RPCs that will be buffered in delayedTransport
ClientCall<String, Integer> call =
channel.newCall(method, CallOptions.DEFAULT.withoutWaitForReady());
call.start(mockCallListener, new Metadata());
ClientCall<String, Integer> call2 =
channel.newCall(method, CallOptions.DEFAULT.withWaitForReady());
call2.start(mockCallListener2, new Metadata());
executor.runDueTasks();
verifyNoInteractions(mockCallListener, mockCallListener2);
// Enter panic
final Throwable panicReason = new Exception("Simulated uncaught exception");
channel.syncContext.execute(
new Runnable() {
@Override
public void run() {
channel.panic(panicReason);
}
});
// Buffered RPCs fail immediately
executor.runDueTasks();
verifyCallListenerClosed(mockCallListener, Status.Code.INTERNAL, panicReason);
verifyCallListenerClosed(mockCallListener2, Status.Code.INTERNAL, panicReason);
panicExpected = true;
}
@Test
public void panic_atStart() {
final RuntimeException panicReason = new RuntimeException("Simulated NR exception");
final NameResolver failingResolver = new NameResolver() {
@Override public String getServiceAuthority() {
return "fake-authority";
}
@Override public void start(Listener2 listener) {
throw panicReason;
}
@Override public void shutdown() {}
};
channelBuilder.nameResolverFactory(new NameResolver.Factory() {
@Override public NameResolver newNameResolver(URI targetUri, NameResolver.Args args) {
return failingResolver;
}
@Override public String getDefaultScheme() {
return "fake";
}
});
createChannel();
// RPCs fail immediately
ClientCall<String, Integer> call =
channel.newCall(method, CallOptions.DEFAULT.withoutWaitForReady());
call.start(mockCallListener, new Metadata());
executor.runDueTasks();
verifyCallListenerClosed(mockCallListener, Status.Code.INTERNAL, panicReason);
panicExpected = true;
}
private void verifyPanicMode(Throwable cause) {
panicExpected = true;
@SuppressWarnings("unchecked")
ClientCall.Listener<Integer> mockListener =
(ClientCall.Listener<Integer>) mock(ClientCall.Listener.class);
assertEquals(TRANSIENT_FAILURE, channel.getState(false));
ClientCall<String, Integer> call = channel.newCall(method, CallOptions.DEFAULT);
call.start(mockListener, new Metadata());
executor.runDueTasks();
verifyCallListenerClosed(mockListener, Status.Code.INTERNAL, cause);
// Channel is dead. No more pending task to possibly revive it.
assertEquals(0, timer.numPendingTasks());
assertEquals(0, executor.numPendingTasks());
assertEquals(0, balancerRpcExecutor.numPendingTasks());
}
private void verifyCallListenerClosed(
ClientCall.Listener<Integer> listener, Status.Code code, Throwable cause) {
ArgumentCaptor<Status> captor = ArgumentCaptor.forClass(Status.class);
verify(listener).onClose(captor.capture(), any(Metadata.class));
Status rpcStatus = captor.getValue();
assertEquals(code, rpcStatus.getCode());
assertSame(cause, rpcStatus.getCause());
verifyNoMoreInteractions(listener);
}
@Test
public void idleTimeoutAndReconnect() {
long idleTimeoutMillis = 2000L;
channelBuilder.idleTimeout(idleTimeoutMillis, TimeUnit.MILLISECONDS);
createChannel();
timer.forwardNanos(TimeUnit.MILLISECONDS.toNanos(idleTimeoutMillis));
assertEquals(IDLE, channel.getState(true /* request connection */));
ArgumentCaptor<Helper> helperCaptor = ArgumentCaptor.forClass(Helper.class);
// Two times of requesting connection will create loadBalancer twice.
verify(mockLoadBalancerProvider, times(2)).newLoadBalancer(helperCaptor.capture());
Helper helper2 = helperCaptor.getValue();
// Updating on the old helper (whose balancer has been shutdown) does not change the channel
// state.
updateBalancingStateSafely(helper, IDLE, mockPicker);
assertEquals(CONNECTING, channel.getState(false));
updateBalancingStateSafely(helper2, IDLE, mockPicker);
assertEquals(IDLE, channel.getState(false));
}
@Test
public void idleMode_resetsDelayedTransportPicker() {
ClientStream mockStream = mock(ClientStream.class);
Status pickError = Status.UNAVAILABLE.withDescription("pick result error");
long idleTimeoutMillis = 1000L;
channelBuilder.idleTimeout(idleTimeoutMillis, TimeUnit.MILLISECONDS);
channelBuilder.nameResolverFactory(
new FakeNameResolverFactory.Builder(expectedUri)
.setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress)))
.build());
createChannel();
assertEquals(CONNECTING, channel.getState(false));
// This call will be buffered in delayedTransport
ClientCall<String, Integer> call = channel.newCall(method, CallOptions.DEFAULT);
call.start(mockCallListener, new Metadata());
// Move channel into TRANSIENT_FAILURE, which will fail the pending call
when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class)))
.thenReturn(PickResult.withError(pickError));
updateBalancingStateSafely(helper, TRANSIENT_FAILURE, mockPicker);
assertEquals(TRANSIENT_FAILURE, channel.getState(false));
executor.runDueTasks();
verify(mockCallListener).onClose(same(pickError), any(Metadata.class));
// Move channel to idle
timer.forwardNanos(TimeUnit.MILLISECONDS.toNanos(idleTimeoutMillis));
assertEquals(IDLE, channel.getState(false));
// This call should be buffered, but will move the channel out of idle
ClientCall<String, Integer> call2 = channel.newCall(method, CallOptions.DEFAULT);
call2.start(mockCallListener2, new Metadata());
executor.runDueTasks();
verifyNoMoreInteractions(mockCallListener2);
// Get the helper created on exiting idle
ArgumentCaptor<Helper> helperCaptor = ArgumentCaptor.forClass(Helper.class);
verify(mockLoadBalancerProvider, times(2)).newLoadBalancer(helperCaptor.capture());
Helper helper2 = helperCaptor.getValue();
// Establish a connection
Subchannel subchannel =
createSubchannelSafely(helper2, addressGroup, Attributes.EMPTY, subchannelStateListener);
requestConnectionSafely(helper, subchannel);
MockClientTransportInfo transportInfo = transports.poll();
ConnectionClientTransport mockTransport = transportInfo.transport;
ManagedClientTransport.Listener transportListener = transportInfo.listener;
when(mockTransport.newStream(
same(method), any(Metadata.class), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any()))
.thenReturn(mockStream);
transportListener.transportReady();
when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class)))
.thenReturn(PickResult.withSubchannel(subchannel));
updateBalancingStateSafely(helper2, READY, mockPicker);
assertEquals(READY, channel.getState(false));
executor.runDueTasks();
// Verify the buffered call was drained
verify(mockTransport).newStream(
same(method), any(Metadata.class), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any());
verify(mockStream).start(any(ClientStreamListener.class));
}
@Test
public void enterIdleEntersIdle() {
createChannel();
updateBalancingStateSafely(helper, READY, mockPicker);
assertEquals(READY, channel.getState(false));
channel.enterIdle();
assertEquals(IDLE, channel.getState(false));
}
@Test
public void enterIdleAfterIdleTimerIsNoOp() {
long idleTimeoutMillis = 2000L;
channelBuilder.idleTimeout(idleTimeoutMillis, TimeUnit.MILLISECONDS);
createChannel();
timer.forwardNanos(TimeUnit.MILLISECONDS.toNanos(idleTimeoutMillis));
assertEquals(IDLE, channel.getState(false));
channel.enterIdle();
assertEquals(IDLE, channel.getState(false));
}
@Test
public void enterIdle_exitsIdleIfDelayedStreamPending() {
FakeNameResolverFactory nameResolverFactory =
new FakeNameResolverFactory.Builder(expectedUri)
.setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress)))
.build();
channelBuilder.nameResolverFactory(nameResolverFactory);
createChannel();
// Start a call that will be buffered in delayedTransport
ClientCall<String, Integer> call = channel.newCall(method, CallOptions.DEFAULT);
call.start(mockCallListener, new Metadata());
// enterIdle() will shut down the name resolver and lb policy used to get a pick for the delayed
// call
channel.enterIdle();
assertEquals(CONNECTING, channel.getState(false));
// enterIdle() will restart the delayed call by exiting idle. This creates a new helper.
ArgumentCaptor<Helper> helperCaptor = ArgumentCaptor.forClass(Helper.class);
verify(mockLoadBalancerProvider, times(2)).newLoadBalancer(helperCaptor.capture());
Helper helper2 = helperCaptor.getValue();
// Establish a connection
Subchannel subchannel =
createSubchannelSafely(helper2, addressGroup, Attributes.EMPTY, subchannelStateListener);
requestConnectionSafely(helper, subchannel);
ClientStream mockStream = mock(ClientStream.class);
MockClientTransportInfo transportInfo = transports.poll();
ConnectionClientTransport mockTransport = transportInfo.transport;
ManagedClientTransport.Listener transportListener = transportInfo.listener;
when(mockTransport.newStream(
same(method), any(Metadata.class), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any()))
.thenReturn(mockStream);
transportListener.transportReady();
when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class)))
.thenReturn(PickResult.withSubchannel(subchannel));
updateBalancingStateSafely(helper2, READY, mockPicker);
assertEquals(READY, channel.getState(false));
// Verify the original call was drained
executor.runDueTasks();
verify(mockTransport).newStream(
same(method), any(Metadata.class), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any());
verify(mockStream).start(any(ClientStreamListener.class));
}
@Test
public void updateBalancingStateDoesUpdatePicker() {
ClientStream mockStream = mock(ClientStream.class);
createChannel();
ClientCall<String, Integer> call = channel.newCall(method, CallOptions.DEFAULT);
call.start(mockCallListener, new Metadata());
// Make the transport available with subchannel2
Subchannel subchannel1 =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
Subchannel subchannel2 =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
requestConnectionSafely(helper, subchannel2);
MockClientTransportInfo transportInfo = transports.poll();
ConnectionClientTransport mockTransport = transportInfo.transport;
ManagedClientTransport.Listener transportListener = transportInfo.listener;
when(mockTransport.newStream(
same(method), any(Metadata.class), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any()))
.thenReturn(mockStream);
transportListener.transportReady();
when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class)))
.thenReturn(PickResult.withSubchannel(subchannel1));
updateBalancingStateSafely(helper, READY, mockPicker);
executor.runDueTasks();
verify(mockTransport, never()).newStream(
any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any());
verify(mockStream, never()).start(any(ClientStreamListener.class));
when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class)))
.thenReturn(PickResult.withSubchannel(subchannel2));
updateBalancingStateSafely(helper, READY, mockPicker);
executor.runDueTasks();
verify(mockTransport).newStream(
same(method), any(Metadata.class), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any());
verify(mockStream).start(any(ClientStreamListener.class));
}
@Test
public void updateBalancingState_withWrappedSubchannel() {
ClientStream mockStream = mock(ClientStream.class);
createChannel();
ClientCall<String, Integer> call = channel.newCall(method, CallOptions.DEFAULT);
call.start(mockCallListener, new Metadata());
final Subchannel subchannel1 =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
requestConnectionSafely(helper, subchannel1);
MockClientTransportInfo transportInfo = transports.poll();
ConnectionClientTransport mockTransport = transportInfo.transport;
ManagedClientTransport.Listener transportListener = transportInfo.listener;
when(mockTransport.newStream(
same(method), any(Metadata.class), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any()))
.thenReturn(mockStream);
transportListener.transportReady();
Subchannel wrappedSubchannel1 = new ForwardingSubchannel() {
@Override
protected Subchannel delegate() {
return subchannel1;
}
};
when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class)))
.thenReturn(PickResult.withSubchannel(wrappedSubchannel1));
updateBalancingStateSafely(helper, READY, mockPicker);
executor.runDueTasks();
verify(mockTransport).newStream(
same(method), any(Metadata.class), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any());
verify(mockStream).start(any(ClientStreamListener.class));
}
@Test
public void updateBalancingStateWithShutdownShouldBeIgnored() {
channelBuilder.nameResolverFactory(
new FakeNameResolverFactory.Builder(expectedUri).setResolvedAtStart(false).build());
createChannel();
assertEquals(CONNECTING, channel.getState(false));
Runnable onStateChanged = mock(Runnable.class);
channel.notifyWhenStateChanged(CONNECTING, onStateChanged);
updateBalancingStateSafely(helper, SHUTDOWN, mockPicker);
assertEquals(CONNECTING, channel.getState(false));
executor.runDueTasks();
verify(onStateChanged, never()).run();
}
@Test
public void balancerRefreshNameResolution() {
FakeNameResolverFactory nameResolverFactory =
new FakeNameResolverFactory.Builder(expectedUri).build();
channelBuilder.nameResolverFactory(nameResolverFactory);
createChannel();
FakeNameResolverFactory.FakeNameResolver resolver = nameResolverFactory.resolvers.get(0);
int initialRefreshCount = resolver.refreshCalled;
refreshNameResolutionSafely(helper);
assertEquals(initialRefreshCount + 1, resolver.refreshCalled);
}
@Test
public void resetConnectBackoff_noOpWhenChannelShutdown() {
FakeNameResolverFactory nameResolverFactory =
new FakeNameResolverFactory.Builder(expectedUri).build();
channelBuilder.nameResolverFactory(nameResolverFactory);
createChannel();
channel.shutdown();
assertTrue(channel.isShutdown());
channel.resetConnectBackoff();
FakeNameResolverFactory.FakeNameResolver nameResolver = nameResolverFactory.resolvers.get(0);
assertEquals(0, nameResolver.refreshCalled);
}
@Test
public void resetConnectBackoff_noOpWhenNameResolverNotStarted() {
FakeNameResolverFactory nameResolverFactory =
new FakeNameResolverFactory.Builder(expectedUri).build();
channelBuilder.nameResolverFactory(nameResolverFactory);
requestConnection = false;
createChannel();
channel.resetConnectBackoff();
FakeNameResolverFactory.FakeNameResolver nameResolver = nameResolverFactory.resolvers.get(0);
assertEquals(0, nameResolver.refreshCalled);
}
@Test
public void channelsAndSubchannels_instrumented_name() throws Exception {
createChannel();
assertEquals(TARGET, getStats(channel).target);
Subchannel subchannel =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
assertEquals(Collections.singletonList(addressGroup).toString(),
getStats((AbstractSubchannel) subchannel).target);
}
@Test
public void channelTracing_channelCreationEvent() throws Exception {
timer.forwardNanos(1234);
channelBuilder.maxTraceEvents(10);
createChannel();
assertThat(getStats(channel).channelTrace.events).contains(new ChannelTrace.Event.Builder()
.setDescription("Channel for 'fake://fake.example.com' created")
.setSeverity(ChannelTrace.Event.Severity.CT_INFO)
.setTimestampNanos(timer.getTicker().read())
.build());
}
@Test
public void channelTracing_subchannelCreationEvents() throws Exception {
channelBuilder.maxTraceEvents(10);
createChannel();
timer.forwardNanos(1234);
AbstractSubchannel subchannel =
(AbstractSubchannel) createSubchannelSafely(
helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
assertThat(getStats(channel).channelTrace.events).contains(new ChannelTrace.Event.Builder()
.setDescription("Child Subchannel started")
.setSeverity(ChannelTrace.Event.Severity.CT_INFO)
.setTimestampNanos(timer.getTicker().read())
.setSubchannelRef(subchannel.getInstrumentedInternalSubchannel())
.build());
assertThat(getStats(subchannel).channelTrace.events).contains(new ChannelTrace.Event.Builder()
.setDescription("Subchannel for [[[test-addr]/{}]] created")
.setSeverity(ChannelTrace.Event.Severity.CT_INFO)
.setTimestampNanos(timer.getTicker().read())
.build());
}
@Test
public void channelTracing_nameResolvingErrorEvent() throws Exception {
timer.forwardNanos(1234);
channelBuilder.maxTraceEvents(10);
Status error = Status.UNAVAILABLE.withDescription("simulated error");
FakeNameResolverFactory nameResolverFactory =
new FakeNameResolverFactory.Builder(expectedUri).setError(error).build();
channelBuilder.nameResolverFactory(nameResolverFactory);
createChannel(true);
assertThat(getStats(channel).channelTrace.events).contains(new ChannelTrace.Event.Builder()
.setDescription("Failed to resolve name: " + error)
.setSeverity(ChannelTrace.Event.Severity.CT_WARNING)
.setTimestampNanos(timer.getTicker().read())
.build());
}
@Test
public void channelTracing_nameResolvedEvent() throws Exception {
timer.forwardNanos(1234);
channelBuilder.maxTraceEvents(10);
FakeNameResolverFactory nameResolverFactory =
new FakeNameResolverFactory.Builder(expectedUri)
.setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress)))
.build();
channelBuilder.nameResolverFactory(nameResolverFactory);
createChannel();
assertThat(getStats(channel).channelTrace.events).contains(new ChannelTrace.Event.Builder()
.setDescription("Address resolved: "
+ Collections.singletonList(new EquivalentAddressGroup(socketAddress)))
.setSeverity(ChannelTrace.Event.Severity.CT_INFO)
.setTimestampNanos(timer.getTicker().read())
.build());
}
@Test
public void channelTracing_nameResolvedEvent_zeorAndNonzeroBackends() throws Exception {
timer.forwardNanos(1234);
channelBuilder.maxTraceEvents(10);
List<EquivalentAddressGroup> servers = new ArrayList<>();
servers.add(new EquivalentAddressGroup(socketAddress));
FakeNameResolverFactory nameResolverFactory =
new FakeNameResolverFactory.Builder(expectedUri).setServers(servers).build();
channelBuilder.nameResolverFactory(nameResolverFactory);
createChannel();
int prevSize = getStats(channel).channelTrace.events.size();
ResolutionResult resolutionResult1 = ResolutionResult.newBuilder()
.setAddresses(Collections.singletonList(
new EquivalentAddressGroup(
Arrays.asList(new SocketAddress() {}, new SocketAddress() {}))))
.build();
nameResolverFactory.resolvers.get(0).listener.onResult(resolutionResult1);
assertThat(getStats(channel).channelTrace.events).hasSize(prevSize);
prevSize = getStats(channel).channelTrace.events.size();
nameResolverFactory.resolvers.get(0).listener.onError(Status.INTERNAL);
assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1);
prevSize = getStats(channel).channelTrace.events.size();
nameResolverFactory.resolvers.get(0).listener.onError(Status.INTERNAL);
assertThat(getStats(channel).channelTrace.events).hasSize(prevSize);
prevSize = getStats(channel).channelTrace.events.size();
ResolutionResult resolutionResult2 = ResolutionResult.newBuilder()
.setAddresses(Collections.singletonList(
new EquivalentAddressGroup(
Arrays.asList(new SocketAddress() {}, new SocketAddress() {}))))
.build();
nameResolverFactory.resolvers.get(0).listener.onResult(resolutionResult2);
assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1);
}
@Test
public void channelTracing_nameResolvedEvent_zeorAndNonzeroBackends_usesListener2onResult2()
throws Exception {
timer.forwardNanos(1234);
channelBuilder.maxTraceEvents(10);
List<EquivalentAddressGroup> servers = new ArrayList<>();
servers.add(new EquivalentAddressGroup(socketAddress));
FakeNameResolverFactory nameResolverFactory =
new FakeNameResolverFactory.Builder(expectedUri).setServers(servers).build();
channelBuilder.nameResolverFactory(nameResolverFactory);
createChannel();
int prevSize = getStats(channel).channelTrace.events.size();
ResolutionResult resolutionResult1 = ResolutionResult.newBuilder()
.setAddresses(Collections.singletonList(
new EquivalentAddressGroup(
Arrays.asList(new SocketAddress() {}, new SocketAddress() {}))))
.build();
channel.syncContext.execute(
() -> nameResolverFactory.resolvers.get(0).listener.onResult2(resolutionResult1));
assertThat(getStats(channel).channelTrace.events).hasSize(prevSize);
prevSize = getStats(channel).channelTrace.events.size();
channel.syncContext.execute(() ->
nameResolverFactory.resolvers.get(0).listener.onResult2(
ResolutionResult.newBuilder()
.setAddressesOrError(
StatusOr.fromStatus(Status.INTERNAL)).build()));
assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1);
prevSize = getStats(channel).channelTrace.events.size();
channel.syncContext.execute(() ->
nameResolverFactory.resolvers.get(0).listener.onResult2(
ResolutionResult.newBuilder()
.setAddressesOrError(
StatusOr.fromStatus(Status.INTERNAL)).build()));
assertThat(getStats(channel).channelTrace.events).hasSize(prevSize);
prevSize = getStats(channel).channelTrace.events.size();
ResolutionResult resolutionResult2 = ResolutionResult.newBuilder()
.setAddresses(Collections.singletonList(
new EquivalentAddressGroup(
Arrays.asList(new SocketAddress() {}, new SocketAddress() {}))))
.build();
channel.syncContext.execute(
() -> nameResolverFactory.resolvers.get(0).listener.onResult2(resolutionResult2));
assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1);
}
@Test
public void channelTracing_serviceConfigChange() throws Exception {
timer.forwardNanos(1234);
channelBuilder.maxTraceEvents(10);
List<EquivalentAddressGroup> servers = new ArrayList<>();
servers.add(new EquivalentAddressGroup(socketAddress));
FakeNameResolverFactory nameResolverFactory =
new FakeNameResolverFactory.Builder(expectedUri).setServers(servers).build();
channelBuilder.nameResolverFactory(nameResolverFactory);
createChannel();
int prevSize = getStats(channel).channelTrace.events.size();
ManagedChannelServiceConfig mcsc1 = createManagedChannelServiceConfig(
ImmutableMap.<String, Object>of(),
new PolicySelection(
mockLoadBalancerProvider, null));
ResolutionResult resolutionResult1 = ResolutionResult.newBuilder()
.setAddresses(Collections.singletonList(
new EquivalentAddressGroup(
Arrays.asList(new SocketAddress() {}, new SocketAddress() {}))))
.setServiceConfig(ConfigOrError.fromConfig(mcsc1))
.build();
nameResolverFactory.resolvers.get(0).listener.onResult(resolutionResult1);
assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1);
assertThat(getStats(channel).channelTrace.events.get(prevSize))
.isEqualTo(new ChannelTrace.Event.Builder()
.setDescription("Service config changed")
.setSeverity(ChannelTrace.Event.Severity.CT_INFO)
.setTimestampNanos(timer.getTicker().read())
.build());
prevSize = getStats(channel).channelTrace.events.size();
ResolutionResult resolutionResult2 = ResolutionResult.newBuilder().setAddresses(
Collections.singletonList(
new EquivalentAddressGroup(
Arrays.asList(new SocketAddress() {}, new SocketAddress() {}))))
.setServiceConfig(ConfigOrError.fromConfig(mcsc1))
.build();
nameResolverFactory.resolvers.get(0).listener.onResult(resolutionResult2);
assertThat(getStats(channel).channelTrace.events).hasSize(prevSize);
prevSize = getStats(channel).channelTrace.events.size();
timer.forwardNanos(1234);
ResolutionResult resolutionResult3 = ResolutionResult.newBuilder()
.setAddresses(Collections.singletonList(
new EquivalentAddressGroup(
Arrays.asList(new SocketAddress() {}, new SocketAddress() {}))))
.setServiceConfig(ConfigOrError.fromConfig(ManagedChannelServiceConfig.empty()))
.build();
nameResolverFactory.resolvers.get(0).listener.onResult(resolutionResult3);
assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1);
assertThat(getStats(channel).channelTrace.events.get(prevSize))
.isEqualTo(new ChannelTrace.Event.Builder()
.setDescription("Service config changed")
.setSeverity(ChannelTrace.Event.Severity.CT_INFO)
.setTimestampNanos(timer.getTicker().read())
.build());
}
@Test
public void channelTracing_serviceConfigChange_usesListener2OnResult2() throws Exception {
timer.forwardNanos(1234);
channelBuilder.maxTraceEvents(10);
List<EquivalentAddressGroup> servers = new ArrayList<>();
servers.add(new EquivalentAddressGroup(socketAddress));
FakeNameResolverFactory nameResolverFactory =
new FakeNameResolverFactory.Builder(expectedUri).setServers(servers).build();
channelBuilder.nameResolverFactory(nameResolverFactory);
createChannel();
int prevSize = getStats(channel).channelTrace.events.size();
ManagedChannelServiceConfig mcsc1 = createManagedChannelServiceConfig(
ImmutableMap.<String, Object>of(),
new PolicySelection(
mockLoadBalancerProvider, null));
ResolutionResult resolutionResult1 = ResolutionResult.newBuilder()
.setAddresses(Collections.singletonList(
new EquivalentAddressGroup(
Arrays.asList(new SocketAddress() {}, new SocketAddress() {}))))
.setServiceConfig(ConfigOrError.fromConfig(mcsc1))
.build();
channel.syncContext.execute(() ->
nameResolverFactory.resolvers.get(0).listener.onResult2(resolutionResult1));
assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1);
assertThat(getStats(channel).channelTrace.events.get(prevSize))
.isEqualTo(new ChannelTrace.Event.Builder()
.setDescription("Service config changed")
.setSeverity(ChannelTrace.Event.Severity.CT_INFO)
.setTimestampNanos(timer.getTicker().read())
.build());
prevSize = getStats(channel).channelTrace.events.size();
ResolutionResult resolutionResult2 = ResolutionResult.newBuilder().setAddresses(
Collections.singletonList(
new EquivalentAddressGroup(
Arrays.asList(new SocketAddress() {}, new SocketAddress() {}))))
.setServiceConfig(ConfigOrError.fromConfig(mcsc1))
.build();
channel.syncContext.execute(() ->
nameResolverFactory.resolvers.get(0).listener.onResult(resolutionResult2));
assertThat(getStats(channel).channelTrace.events).hasSize(prevSize);
prevSize = getStats(channel).channelTrace.events.size();
timer.forwardNanos(1234);
ResolutionResult resolutionResult3 = ResolutionResult.newBuilder()
.setAddresses(Collections.singletonList(
new EquivalentAddressGroup(
Arrays.asList(new SocketAddress() {}, new SocketAddress() {}))))
.setServiceConfig(ConfigOrError.fromConfig(ManagedChannelServiceConfig.empty()))
.build();
channel.syncContext.execute(() ->
nameResolverFactory.resolvers.get(0).listener.onResult(resolutionResult3));
assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1);
assertThat(getStats(channel).channelTrace.events.get(prevSize))
.isEqualTo(new ChannelTrace.Event.Builder()
.setDescription("Service config changed")
.setSeverity(ChannelTrace.Event.Severity.CT_INFO)
.setTimestampNanos(timer.getTicker().read())
.build());
}
@Test
public void channelTracing_stateChangeEvent() throws Exception {
channelBuilder.maxTraceEvents(10);
createChannel();
timer.forwardNanos(1234);
updateBalancingStateSafely(helper, CONNECTING, mockPicker);
assertThat(getStats(channel).channelTrace.events).contains(new ChannelTrace.Event.Builder()
.setDescription("Entering CONNECTING state with picker: mockPicker")
.setSeverity(ChannelTrace.Event.Severity.CT_INFO)
.setTimestampNanos(timer.getTicker().read())
.build());
}
@Test
public void channelTracing_subchannelStateChangeEvent() throws Exception {
channelBuilder.maxTraceEvents(10);
createChannel();
AbstractSubchannel subchannel =
(AbstractSubchannel) createSubchannelSafely(
helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
timer.forwardNanos(1234);
((TransportProvider) subchannel.getInternalSubchannel()).obtainActiveTransport();
assertThat(getStats(subchannel).channelTrace.events).contains(new ChannelTrace.Event.Builder()
.setDescription("CONNECTING as requested")
.setSeverity(ChannelTrace.Event.Severity.CT_INFO)
.setTimestampNanos(timer.getTicker().read())
.build());
}
@Test
public void channelTracing_oobChannelStateChangeEvent() throws Exception {
channelBuilder.maxTraceEvents(10);
createChannel();
OobChannel oobChannel = (OobChannel) helper.createOobChannel(
Collections.singletonList(addressGroup), "authority");
timer.forwardNanos(1234);
oobChannel.handleSubchannelStateChange(
ConnectivityStateInfo.forNonError(ConnectivityState.CONNECTING));
assertThat(getStats(oobChannel).channelTrace.events).contains(new ChannelTrace.Event.Builder()
.setDescription("Entering CONNECTING state")
.setSeverity(ChannelTrace.Event.Severity.CT_INFO)
.setTimestampNanos(timer.getTicker().read())
.build());
}
@Test
public void channelTracing_oobChannelCreationEvents() throws Exception {
channelBuilder.maxTraceEvents(10);
createChannel();
timer.forwardNanos(1234);
OobChannel oobChannel = (OobChannel) helper.createOobChannel(
Collections.singletonList(addressGroup), "authority");
assertThat(getStats(channel).channelTrace.events).contains(new ChannelTrace.Event.Builder()
.setDescription("Child OobChannel created")
.setSeverity(ChannelTrace.Event.Severity.CT_INFO)
.setTimestampNanos(timer.getTicker().read())
.setChannelRef(oobChannel)
.build());
assertThat(getStats(oobChannel).channelTrace.events).contains(new ChannelTrace.Event.Builder()
.setDescription("OobChannel for [[[test-addr]/{}]] created")
.setSeverity(ChannelTrace.Event.Severity.CT_INFO)
.setTimestampNanos(timer.getTicker().read())
.build());
assertThat(getStats(oobChannel.getInternalSubchannel()).channelTrace.events).contains(
new ChannelTrace.Event.Builder()
.setDescription("Subchannel for [[[test-addr]/{}]] created")
.setSeverity(ChannelTrace.Event.Severity.CT_INFO)
.setTimestampNanos(timer.getTicker().read())
.build());
}
@Test
public void channelsAndSubchannels_instrumented_state() throws Exception {
createChannel();
ArgumentCaptor<Helper> helperCaptor = ArgumentCaptor.forClass(Helper.class);
verify(mockLoadBalancerProvider).newLoadBalancer(helperCaptor.capture());
helper = helperCaptor.getValue();
assertEquals(CONNECTING, getStats(channel).state);
AbstractSubchannel subchannel =
(AbstractSubchannel) createSubchannelSafely(
helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
assertEquals(IDLE, getStats(subchannel).state);
requestConnectionSafely(helper, subchannel);
assertEquals(CONNECTING, getStats(subchannel).state);
MockClientTransportInfo transportInfo = transports.poll();
assertEquals(CONNECTING, getStats(subchannel).state);
transportInfo.listener.transportReady();
assertEquals(READY, getStats(subchannel).state);
assertEquals(CONNECTING, getStats(channel).state);
updateBalancingStateSafely(helper, READY, mockPicker);
assertEquals(READY, getStats(channel).state);
channel.shutdownNow();
assertEquals(SHUTDOWN, getStats(channel).state);
assertEquals(SHUTDOWN, getStats(subchannel).state);
}
@Test
public void channelStat_callStarted() throws Exception {
createChannel();
ClientCall<String, Integer> call = channel.newCall(method, CallOptions.DEFAULT);
assertEquals(0, getStats(channel).callsStarted);
call.start(mockCallListener, new Metadata());
assertEquals(1, getStats(channel).callsStarted);
assertEquals(executor.getTicker().read(), getStats(channel).lastCallStartedNanos);
}
@Test
public void channelsAndSubChannels_instrumented_success() throws Exception {
channelsAndSubchannels_instrumented0(true);
}
@Test
public void channelsAndSubChannels_instrumented_fail() throws Exception {
channelsAndSubchannels_instrumented0(false);
}
private void channelsAndSubchannels_instrumented0(boolean success) throws Exception {
createChannel();
ClientCall<String, Integer> call = channel.newCall(method, CallOptions.DEFAULT);
// Channel stat bumped when ClientCall.start() called
assertEquals(0, getStats(channel).callsStarted);
call.start(mockCallListener, new Metadata());
assertEquals(1, getStats(channel).callsStarted);
ClientStream mockStream = mock(ClientStream.class);
ClientStreamTracer.Factory factory = mock(ClientStreamTracer.Factory.class);
AbstractSubchannel subchannel =
(AbstractSubchannel) createSubchannelSafely(
helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
requestConnectionSafely(helper, subchannel);
MockClientTransportInfo transportInfo = transports.poll();
transportInfo.listener.transportReady();
ClientTransport mockTransport = transportInfo.transport;
when(mockTransport.newStream(
any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any()))
.thenReturn(mockStream);
when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))).thenReturn(
PickResult.withSubchannel(subchannel, factory));
// subchannel stat bumped when call gets assigned to it
assertEquals(0, getStats(subchannel).callsStarted);
updateBalancingStateSafely(helper, READY, mockPicker);
assertEquals(1, executor.runDueTasks());
verify(mockStream).start(streamListenerCaptor.capture());
assertEquals(1, getStats(subchannel).callsStarted);
ClientStreamListener streamListener = streamListenerCaptor.getValue();
call.halfClose();
// closing stream listener affects subchannel stats immediately
assertEquals(0, getStats(subchannel).callsSucceeded);
assertEquals(0, getStats(subchannel).callsFailed);
streamListener.closed(
success ? Status.OK : Status.UNKNOWN, PROCESSED, new Metadata());
if (success) {
assertEquals(1, getStats(subchannel).callsSucceeded);
assertEquals(0, getStats(subchannel).callsFailed);
} else {
assertEquals(0, getStats(subchannel).callsSucceeded);
assertEquals(1, getStats(subchannel).callsFailed);
}
// channel stats bumped when the ClientCall.Listener is notified
assertEquals(0, getStats(channel).callsSucceeded);
assertEquals(0, getStats(channel).callsFailed);
executor.runDueTasks();
if (success) {
assertEquals(1, getStats(channel).callsSucceeded);
assertEquals(0, getStats(channel).callsFailed);
} else {
assertEquals(0, getStats(channel).callsSucceeded);
assertEquals(1, getStats(channel).callsFailed);
}
}
@Test
public void channelsAndSubchannels_oob_instrumented_success() throws Exception {
channelsAndSubchannels_oob_instrumented0(true);
}
@Test
public void channelsAndSubchannels_oob_instrumented_fail() throws Exception {
channelsAndSubchannels_oob_instrumented0(false);
}
private void channelsAndSubchannels_oob_instrumented0(boolean success) throws Exception {
// set up
ClientStream mockStream = mock(ClientStream.class);
createChannel();
OobChannel oobChannel = (OobChannel) helper.createOobChannel(
Collections.singletonList(addressGroup), "oobauthority");
AbstractSubchannel oobSubchannel = (AbstractSubchannel) oobChannel.getSubchannel();
FakeClock callExecutor = new FakeClock();
CallOptions options =
CallOptions.DEFAULT.withExecutor(callExecutor.getScheduledExecutorService());
ClientCall<String, Integer> call = oobChannel.newCall(method, options);
Metadata headers = new Metadata();
// Channel stat bumped when ClientCall.start() called
assertEquals(0, getStats(oobChannel).callsStarted);
call.start(mockCallListener, headers);
assertEquals(1, getStats(oobChannel).callsStarted);
MockClientTransportInfo transportInfo = transports.poll();
ConnectionClientTransport mockTransport = transportInfo.transport;
ManagedClientTransport.Listener transportListener = transportInfo.listener;
when(mockTransport.newStream(
same(method), same(headers), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any()))
.thenReturn(mockStream);
// subchannel stat bumped when call gets assigned to it
assertEquals(0, getStats(oobSubchannel).callsStarted);
transportListener.transportReady();
callExecutor.runDueTasks();
verify(mockStream).start(streamListenerCaptor.capture());
assertEquals(1, getStats(oobSubchannel).callsStarted);
ClientStreamListener streamListener = streamListenerCaptor.getValue();
call.halfClose();
// closing stream listener affects subchannel stats immediately
assertEquals(0, getStats(oobSubchannel).callsSucceeded);
assertEquals(0, getStats(oobSubchannel).callsFailed);
streamListener.closed(success ? Status.OK : Status.UNKNOWN, PROCESSED, new Metadata());
if (success) {
assertEquals(1, getStats(oobSubchannel).callsSucceeded);
assertEquals(0, getStats(oobSubchannel).callsFailed);
} else {
assertEquals(0, getStats(oobSubchannel).callsSucceeded);
assertEquals(1, getStats(oobSubchannel).callsFailed);
}
// channel stats bumped when the ClientCall.Listener is notified
assertEquals(0, getStats(oobChannel).callsSucceeded);
assertEquals(0, getStats(oobChannel).callsFailed);
callExecutor.runDueTasks();
if (success) {
assertEquals(1, getStats(oobChannel).callsSucceeded);
assertEquals(0, getStats(oobChannel).callsFailed);
} else {
assertEquals(0, getStats(oobChannel).callsSucceeded);
assertEquals(1, getStats(oobChannel).callsFailed);
}
// oob channel is separate from the original channel
assertEquals(0, getStats(channel).callsSucceeded);
assertEquals(0, getStats(channel).callsFailed);
}
@Test
public void channelsAndSubchannels_oob_instrumented_name() throws Exception {
createChannel();
String authority = "oobauthority";
OobChannel oobChannel = (OobChannel) helper.createOobChannel(
Collections.singletonList(addressGroup), authority);
assertEquals(authority, getStats(oobChannel).target);
}
@Test
public void channelsAndSubchannels_oob_instrumented_state() throws Exception {
createChannel();
OobChannel oobChannel = (OobChannel) helper.createOobChannel(
Collections.singletonList(addressGroup), "oobauthority");
assertEquals(IDLE, getStats(oobChannel).state);
oobChannel.getSubchannel().requestConnection();
assertEquals(CONNECTING, getStats(oobChannel).state);
MockClientTransportInfo transportInfo = transports.poll();
ManagedClientTransport.Listener transportListener = transportInfo.listener;
transportListener.transportReady();
assertEquals(READY, getStats(oobChannel).state);
// oobchannel state is separate from the ManagedChannel
assertEquals(CONNECTING, getStats(channel).state);
channel.shutdownNow();
assertEquals(SHUTDOWN, getStats(channel).state);
assertEquals(SHUTDOWN, getStats(oobChannel).state);
}
@Test
public void binaryLogInstalled() throws Exception {
final SettableFuture<Boolean> intercepted = SettableFuture.create();
channelBuilder.binlog = new BinaryLog() {
@Override
public void close() throws IOException {
// noop
}
@Override
public <ReqT, RespT> ServerMethodDefinition<?, ?> wrapMethodDefinition(
ServerMethodDefinition<ReqT, RespT> oMethodDef) {
return oMethodDef;
}
@Override
public Channel wrapChannel(Channel channel) {
return ClientInterceptors.intercept(channel,
new ClientInterceptor() {
@Override
public <ReqT, RespT> ClientCall<ReqT, RespT> interceptCall(
MethodDescriptor<ReqT, RespT> method,
CallOptions callOptions,
Channel next) {
intercepted.set(true);
return next.newCall(method, callOptions);
}
});
}
};
createChannel();
ClientCall<String, Integer> call = channel.newCall(method, CallOptions.DEFAULT);
call.start(mockCallListener, new Metadata());
assertTrue(intercepted.get());
}
@Test
public void retryBackoffThenChannelShutdown_retryShouldStillHappen_newCallShouldFail() {
Map<String, Object> retryPolicy = new HashMap<>();
retryPolicy.put("maxAttempts", 3D);
retryPolicy.put("initialBackoff", "10s");
retryPolicy.put("maxBackoff", "30s");
retryPolicy.put("backoffMultiplier", 2D);
retryPolicy.put("retryableStatusCodes", Arrays.<Object>asList("UNAVAILABLE"));
Map<String, Object> methodConfig = new HashMap<>();
Map<String, Object> name = new HashMap<>();
name.put("service", "service");
methodConfig.put("name", Arrays.<Object>asList(name));
methodConfig.put("retryPolicy", retryPolicy);
Map<String, Object> rawServiceConfig = new HashMap<>();
rawServiceConfig.put("methodConfig", Arrays.<Object>asList(methodConfig));
FakeNameResolverFactory nameResolverFactory =
new FakeNameResolverFactory.Builder(expectedUri)
.setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress)))
.build();
ManagedChannelServiceConfig managedChannelServiceConfig =
createManagedChannelServiceConfig(rawServiceConfig, null);
nameResolverFactory.nextConfigOrError.set(
ConfigOrError.fromConfig(managedChannelServiceConfig));
channelBuilder.nameResolverFactory(nameResolverFactory);
channelBuilder.executor(MoreExecutors.directExecutor());
channelBuilder.enableRetry();
RetriableStream.setRandom(
// not random
new Random() {
@Override
public double nextDouble() {
return 1D; // fake random
}
});
requestConnection = false;
createChannel();
ClientCall<String, Integer> call = channel.newCall(method, CallOptions.DEFAULT);
call.start(mockCallListener, new Metadata());
ArgumentCaptor<Helper> helperCaptor = ArgumentCaptor.forClass(Helper.class);
verify(mockLoadBalancerProvider).newLoadBalancer(helperCaptor.capture());
helper = helperCaptor.getValue();
verify(mockLoadBalancer).acceptResolvedAddresses(resolvedAddressCaptor.capture());
ResolvedAddresses resolvedAddresses = resolvedAddressCaptor.getValue();
assertThat(resolvedAddresses.getAddresses()).isEqualTo(nameResolverFactory.servers);
// simulating request connection and then transport ready after resolved address
Subchannel subchannel =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class)))
.thenReturn(PickResult.withSubchannel(subchannel));
requestConnectionSafely(helper, subchannel);
MockClientTransportInfo transportInfo = transports.poll();
ConnectionClientTransport mockTransport = transportInfo.transport;
ClientStream mockStream = mock(ClientStream.class);
ClientStream mockStream2 = mock(ClientStream.class);
when(mockTransport.newStream(
same(method), any(Metadata.class), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any()))
.thenReturn(mockStream).thenReturn(mockStream2);
transportInfo.listener.transportReady();
updateBalancingStateSafely(helper, READY, mockPicker);
ArgumentCaptor<ClientStreamListener> streamListenerCaptor =
ArgumentCaptor.forClass(ClientStreamListener.class);
verify(mockStream).start(streamListenerCaptor.capture());
assertThat(timer.getPendingTasks()).isEmpty();
// trigger retry
streamListenerCaptor.getValue().closed(
Status.UNAVAILABLE, PROCESSED, new Metadata());
// in backoff
timer.forwardTime(6, TimeUnit.SECONDS);
assertThat(timer.getPendingTasks()).hasSize(1);
verify(mockStream2, never()).start(any(ClientStreamListener.class));
// shutdown during backoff period
channel.shutdown();
assertThat(timer.getPendingTasks()).hasSize(1);
verify(mockCallListener, never()).onClose(any(Status.class), any(Metadata.class));
ClientCall<String, Integer> call2 = channel.newCall(method, CallOptions.DEFAULT);
call2.start(mockCallListener2, new Metadata());
ArgumentCaptor<Status> statusCaptor = ArgumentCaptor.forClass(Status.class);
verify(mockCallListener2).onClose(statusCaptor.capture(), any(Metadata.class));
assertSame(Status.Code.UNAVAILABLE, statusCaptor.getValue().getCode());
assertEquals("Channel shutdown invoked", statusCaptor.getValue().getDescription());
// backoff ends
timer.forwardTime(6, TimeUnit.SECONDS);
assertThat(timer.getPendingTasks()).isEmpty();
verify(mockStream2).start(streamListenerCaptor.capture());
verify(mockLoadBalancer, never()).shutdown();
assertFalse(
"channel.isTerminated() is expected to be false but was true",
channel.isTerminated());
streamListenerCaptor.getValue().closed(
Status.INTERNAL, PROCESSED, new Metadata());
verify(mockLoadBalancer).shutdown();
// simulating the shutdown of load balancer triggers the shutdown of subchannel
shutdownSafely(helper, subchannel);
transportInfo.listener.transportShutdown(Status.INTERNAL);
transportInfo.listener.transportTerminated(); // simulating transport terminated
assertTrue(
"channel.isTerminated() is expected to be true but was false",
channel.isTerminated());
}
@Test
public void hedgingScheduledThenChannelShutdown_hedgeShouldStillHappen_newCallShouldFail() {
Map<String, Object> hedgingPolicy = new HashMap<>();
hedgingPolicy.put("maxAttempts", 3D);
hedgingPolicy.put("hedgingDelay", "10s");
hedgingPolicy.put("nonFatalStatusCodes", Arrays.<Object>asList("UNAVAILABLE"));
Map<String, Object> methodConfig = new HashMap<>();
Map<String, Object> name = new HashMap<>();
name.put("service", "service");
methodConfig.put("name", Arrays.<Object>asList(name));
methodConfig.put("hedgingPolicy", hedgingPolicy);
Map<String, Object> rawServiceConfig = new HashMap<>();
rawServiceConfig.put("methodConfig", Arrays.<Object>asList(methodConfig));
FakeNameResolverFactory nameResolverFactory =
new FakeNameResolverFactory.Builder(expectedUri)
.setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress)))
.build();
ManagedChannelServiceConfig managedChannelServiceConfig =
createManagedChannelServiceConfig(rawServiceConfig, null);
nameResolverFactory.nextConfigOrError.set(
ConfigOrError.fromConfig(managedChannelServiceConfig));
channelBuilder.nameResolverFactory(nameResolverFactory);
channelBuilder.executor(MoreExecutors.directExecutor());
channelBuilder.enableRetry();
requestConnection = false;
createChannel();
ClientCall<String, Integer> call = channel.newCall(method, CallOptions.DEFAULT);
call.start(mockCallListener, new Metadata());
ArgumentCaptor<Helper> helperCaptor = ArgumentCaptor.forClass(Helper.class);
verify(mockLoadBalancerProvider).newLoadBalancer(helperCaptor.capture());
helper = helperCaptor.getValue();
verify(mockLoadBalancer).acceptResolvedAddresses(resolvedAddressCaptor.capture());
ResolvedAddresses resolvedAddresses = resolvedAddressCaptor.getValue();
assertThat(resolvedAddresses.getAddresses()).isEqualTo(nameResolverFactory.servers);
// simulating request connection and then transport ready after resolved address
Subchannel subchannel =
createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener);
when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class)))
.thenReturn(PickResult.withSubchannel(subchannel));
requestConnectionSafely(helper, subchannel);
MockClientTransportInfo transportInfo = transports.poll();
ConnectionClientTransport mockTransport = transportInfo.transport;
ClientStream mockStream = mock(ClientStream.class);
ClientStream mockStream2 = mock(ClientStream.class);
when(mockTransport.newStream(
same(method), any(Metadata.class), any(CallOptions.class),
ArgumentMatchers.<ClientStreamTracer[]>any()))
.thenReturn(mockStream).thenReturn(mockStream2);
transportInfo.listener.transportReady();
updateBalancingStateSafely(helper, READY, mockPicker);
ArgumentCaptor<ClientStreamListener> streamListenerCaptor =
ArgumentCaptor.forClass(ClientStreamListener.class);
verify(mockStream).start(streamListenerCaptor.capture());
// in hedging delay backoff
timer.forwardTime(5, TimeUnit.SECONDS);
assertThat(timer.numPendingTasks()).isEqualTo(1);
// first hedge fails
streamListenerCaptor.getValue().closed(
Status.UNAVAILABLE, PROCESSED, new Metadata());
verify(mockStream2, never()).start(any(ClientStreamListener.class));
// shutdown during backoff period
channel.shutdown();
assertThat(timer.numPendingTasks()).isEqualTo(1);
verify(mockCallListener, never()).onClose(any(Status.class), any(Metadata.class));
ClientCall<String, Integer> call2 = channel.newCall(method, CallOptions.DEFAULT);
call2.start(mockCallListener2, new Metadata());
ArgumentCaptor<Status> statusCaptor = ArgumentCaptor.forClass(Status.class);
verify(mockCallListener2).onClose(statusCaptor.capture(), any(Metadata.class));
assertSame(Status.Code.UNAVAILABLE, statusCaptor.getValue().getCode());
assertEquals("Channel shutdown invoked", statusCaptor.getValue().getDescription());
// backoff ends
timer.forwardTime(5, TimeUnit.SECONDS);
assertThat(timer.numPendingTasks()).isEqualTo(1);
verify(mockStream2).start(streamListenerCaptor.capture());
verify(mockLoadBalancer, never()).shutdown();
assertFalse(
"channel.isTerminated() is expected to be false but was true",
channel.isTerminated());
streamListenerCaptor.getValue().closed(
Status.INTERNAL, PROCESSED, new Metadata());
assertThat(timer.numPendingTasks()).isEqualTo(0);
verify(mockLoadBalancer).shutdown();
// simulating the shutdown of load balancer triggers the shutdown of subchannel
shutdownSafely(helper, subchannel);
// simulating transport shutdown & terminated
transportInfo.listener.transportShutdown(Status.INTERNAL);
transportInfo.listener.transportTerminated();
assertTrue(
"channel.isTerminated() is expected to be true but was false",
channel.isTerminated());
}
@Test
public void badServiceConfigIsRecoverable() throws Exception {
final List<EquivalentAddressGroup> addresses =
ImmutableList.of(new EquivalentAddressGroup(new SocketAddress() {}));
final | ManagedChannelImplTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java | {
"start": 1720,
"end": 5382
} | interface ____ {
/**
* Returns the aggregator factory with which this parser is associated, may return {@code null} indicating the
* aggregation should be skipped (e.g. when trying to aggregate on unmapped fields).
*
* @param aggregationName The name of the aggregation
* @param parser The parser
* @return The resolved aggregator factory or {@code null} in case the aggregation should be skipped
* @throws java.io.IOException When parsing fails
*/
AggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException;
}
/**
* Return the name of this aggregator.
*/
public abstract String name();
/**
* Return the parent aggregator.
*/
public abstract Aggregator parent();
/**
* Return the sub aggregator with the provided name.
*/
public abstract Aggregator subAggregator(String name);
/**
* Resolve the next step of the sort path as though this aggregation
* supported sorting. This is usually the "first step" when resolving
* a sort path because most aggs that support sorting their buckets
* aren't valid in the middle of a sort path.
* <p>
* For example, the {@code terms} aggs supports sorting its buckets, but
* that sort path itself can't contain a different {@code terms}
* aggregation.
*/
public final Aggregator resolveSortPathOnValidAgg(AggregationPath.PathElement next, Iterator<AggregationPath.PathElement> path) {
Aggregator n = subAggregator(next.name());
if (n == null) {
throw new IllegalArgumentException(
"The provided aggregation ["
+ next
+ "] either does not exist, or is "
+ "a pipeline aggregation and cannot be used to sort the buckets."
);
}
if (false == path.hasNext()) {
return n;
}
if (next.key() != null) {
throw new IllegalArgumentException("Key only allowed on last aggregation path element but got [" + next + "]");
}
return n.resolveSortPath(path.next(), path);
}
/**
* Resolve a sort path to the target.
* <p>
* The default implementation throws an exception but we override it on aggregations that support sorting.
*/
public Aggregator resolveSortPath(AggregationPath.PathElement next, Iterator<AggregationPath.PathElement> path) {
throw new IllegalArgumentException(
"Buckets can only be sorted on a sub-aggregator path "
+ "that is built out of zero or more single-bucket aggregations within the path and a final "
+ "single-bucket or a metrics aggregation at the path end. ["
+ name()
+ "] is not single-bucket."
);
}
/**
* Builds a comparator that compares two buckets aggregated by this {@linkplain Aggregator}.
* <p>
* The default implementation throws an exception but we override it on aggregations that support sorting.
*/
public BucketComparator bucketComparator(String key, SortOrder order) {
throw new IllegalArgumentException(
"Buckets can only be sorted on a sub-aggregator path "
+ "that is built out of zero or more single-bucket aggregations within the path and a final "
+ "single-bucket or a metrics aggregation at the path end."
);
}
/**
* Compare two buckets by their ordinal.
*/
@FunctionalInterface
public | Parser |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/Dialect.java | {
"start": 15083,
"end": 17076
} | class ____ support for a certain database
* platform. For example, {@link PostgreSQLDialect} implements support
* for PostgreSQL, and {@link MySQLDialect} implements support for MySQL.
* <p>
* A subclass must provide a public constructor with a single parameter
* of type {@link DialectResolutionInfo}. Alternatively, for purposes of
* backward compatibility with older versions of Hibernate, a constructor
* with no parameters is also allowed.
* <p>
* Almost every subclass must, as a bare minimum, override at least:
* <ul>
* <li>{@link #columnType(int)} to define a mapping from SQL
* {@linkplain SqlTypes type codes} to database column types, and
* <li>{@link #initializeFunctionRegistry(FunctionContributions)} to
* register mappings for standard HQL functions with the
* {@link org.hibernate.query.sqm.function.SqmFunctionRegistry}.
* </ul>
* <p>
* A subclass representing a dialect of SQL which deviates significantly
* from ANSI SQL will certainly override many additional operations.
* <p>
* Subclasses should be thread-safe and immutable.
* <p>
* Since Hibernate 6, a single subclass of {@code Dialect} represents all
* releases of a given product-specific SQL dialect. The version of the
* database is exposed at runtime via the {@link DialectResolutionInfo}
* passed to the constructor, and by the {@link #getVersion()} property.
* <p>
* Programs using Hibernate should migrate away from the use of versioned
* dialect classes like, for example, {@code MySQL8Dialect}. These
* classes are now deprecated and will be removed in a future release.
* <p>
* A custom {@code Dialect} may be specified using the configuration
* property {@value org.hibernate.cfg.AvailableSettings#DIALECT}, but
* for supported databases this property is unnecessary, and Hibernate
* will select the correct {@code Dialect} based on the JDBC URL and
* {@link DialectResolutionInfo}.
*
* @author Gavin King, David Channon
*/
public abstract | implements |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/StagingCommitterFactory.java | {
"start": 1309,
"end": 1758
} | class ____
extends AbstractS3ACommitterFactory {
/**
* Name of this class: {@value}.
*/
public static final String CLASSNAME
= "org.apache.hadoop.fs.s3a.commit.staging.StagingCommitterFactory";
public PathOutputCommitter createTaskCommitter(S3AFileSystem fileSystem,
Path outputPath,
TaskAttemptContext context) throws IOException {
return new StagingCommitter(outputPath, context);
}
}
| StagingCommitterFactory |
java | apache__kafka | clients/src/test/java/org/apache/kafka/common/requests/ConsumerGroupDescribeRequestTest.java | {
"start": 1296,
"end": 3323
} | class ____ {
@Test
void testGetErrorResponse() {
List<String> groupIds = Arrays.asList("group0", "group1");
ConsumerGroupDescribeRequestData data = new ConsumerGroupDescribeRequestData();
data.groupIds().addAll(groupIds);
ConsumerGroupDescribeRequest request = new ConsumerGroupDescribeRequest.Builder(data, true)
.build();
Throwable e = Errors.GROUP_AUTHORIZATION_FAILED.exception();
int throttleTimeMs = 1000;
ConsumerGroupDescribeResponse response = request.getErrorResponse(throttleTimeMs, e);
assertEquals(throttleTimeMs, response.throttleTimeMs());
for (int i = 0; i < groupIds.size(); i++) {
ConsumerGroupDescribeResponseData.DescribedGroup group = response.data().groups().get(i);
assertEquals(groupIds.get(i), group.groupId());
assertEquals(Errors.forException(e).code(), group.errorCode());
}
}
@Test
public void testGetErrorDescribedGroupList() {
List<ConsumerGroupDescribeResponseData.DescribedGroup> expectedDescribedGroupList = Arrays.asList(
new ConsumerGroupDescribeResponseData.DescribedGroup()
.setGroupId("group-id-1")
.setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code()),
new ConsumerGroupDescribeResponseData.DescribedGroup()
.setGroupId("group-id-2")
.setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code()),
new ConsumerGroupDescribeResponseData.DescribedGroup()
.setGroupId("group-id-3")
.setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code())
);
List<ConsumerGroupDescribeResponseData.DescribedGroup> describedGroupList = getErrorDescribedGroupList(
Arrays.asList("group-id-1", "group-id-2", "group-id-3"),
Errors.COORDINATOR_LOAD_IN_PROGRESS
);
assertEquals(expectedDescribedGroupList, describedGroupList);
}
}
| ConsumerGroupDescribeRequestTest |
java | elastic__elasticsearch | x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/ProfileLogger.java | {
"start": 952,
"end": 1676
} | class ____ extends TestWatcher {
private static final Logger LOGGER = LogManager.getLogger(ProfileLogger.class);
private Object profile;
public void extractProfile(Map<String, Object> jsonResponse, Boolean originalProfileParameter) {
if (jsonResponse.containsKey("profile") == false) {
return;
}
profile = jsonResponse.get("profile");
if (Boolean.TRUE.equals(originalProfileParameter) == false) {
jsonResponse.remove("profile");
}
}
public void clearProfile() {
profile = null;
}
@Override
protected void failed(Throwable e, Description description) {
LOGGER.info("Profile: {}", profile);
}
}
| ProfileLogger |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/info/SslInfo.java | {
"start": 7795,
"end": 8229
} | enum ____ {
/**
* The certificate is valid.
*/
VALID(true),
/**
* The certificate's validity date range is in the future.
*/
NOT_YET_VALID(false),
/**
* The certificate's validity date range is in the past.
*/
EXPIRED(false);
private final boolean valid;
Status(boolean valid) {
this.valid = valid;
}
public boolean isValid() {
return this.valid;
}
}
}
}
| Status |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationTable.java | {
"start": 3525,
"end": 3594
} | class ____ extends BaseTable<SubApplicationTable> {
}
| SubApplicationTable |
java | apache__flink | flink-libraries/flink-state-processing-api/src/test/java/org/apache/flink/state/api/input/WindowReaderTest.java | {
"start": 13677,
"end": 14774
} | class ____
extends WindowReaderFunction<Integer, Tuple2<Integer, Integer>, Integer, TimeWindow> {
@Override
public void readWindow(
Integer integer,
Context<TimeWindow> context,
Iterable<Integer> elements,
Collector<Tuple2<Integer, Integer>> out)
throws Exception {
Integer perKey =
context.globalState()
.getReducingState(
new ReducingStateDescriptor<>(
"per-key", new ReduceSum(), Types.INT))
.get();
Integer perPane =
context.windowState()
.getReducingState(
new ReducingStateDescriptor<>(
"per-pane", new ReduceSum(), Types.INT))
.get();
out.collect(Tuple2.of(perKey, perPane));
}
}
private static | MultiFireReaderFunction |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/JobManagerSharedServicesTest.java | {
"start": 1736,
"end": 5497
} | class ____ {
private static final int CPU_CORES = Hardware.getNumberCPUCores();
@TempDir private File TEMPORARY_FOLDER;
@Test
void testFutureExecutorNoConfiguration() throws Exception {
final Configuration config = new Configuration();
final JobManagerSharedServices jobManagerSharedServices =
buildJobManagerSharedServices(config);
try {
ScheduledExecutorService futureExecutor = jobManagerSharedServices.getFutureExecutor();
assertExecutorPoolSize(futureExecutor, CPU_CORES);
} finally {
jobManagerSharedServices.shutdown();
}
}
@Test
void testFutureExecutorConfiguration() throws Exception {
final int futurePoolSize = 8;
final Configuration config = new Configuration();
config.set(JobManagerOptions.JOB_MANAGER_FUTURE_POOL_SIZE, futurePoolSize);
final JobManagerSharedServices jobManagerSharedServices =
buildJobManagerSharedServices(config);
assertExecutorPoolSize(jobManagerSharedServices.getFutureExecutor(), futurePoolSize);
jobManagerSharedServices.shutdown();
}
@Test
void testIoExecutorNoConfiguration() throws Exception {
final Configuration config = new Configuration();
final JobManagerSharedServices jobManagerSharedServices =
buildJobManagerSharedServices(config);
try {
assertExecutorPoolSize(jobManagerSharedServices.getIoExecutor(), CPU_CORES);
} finally {
jobManagerSharedServices.shutdown();
}
}
@Test
void testIoExecutorConfiguration() throws Exception {
final int ioPoolSize = 5;
final Configuration config = new Configuration();
config.set(JobManagerOptions.JOB_MANAGER_IO_POOL_SIZE, ioPoolSize);
final JobManagerSharedServices jobManagerSharedServices =
buildJobManagerSharedServices(config);
try {
assertExecutorPoolSize(jobManagerSharedServices.getIoExecutor(), ioPoolSize);
} finally {
jobManagerSharedServices.shutdown();
}
}
@Nonnull
private JobManagerSharedServices buildJobManagerSharedServices(Configuration configuration)
throws Exception {
return JobManagerSharedServices.fromConfiguration(
configuration,
new BlobServer(configuration, TEMPORARY_FOLDER, new VoidBlobStore()),
new TestingFatalErrorHandler());
}
private void assertExecutorPoolSize(Executor executor, int expectedPoolSize)
throws InterruptedException {
final CountDownLatch expectedPoolSizeLatch = new CountDownLatch(expectedPoolSize);
final int expectedPoolSizePlusOne = expectedPoolSize + 1;
final CountDownLatch expectedPoolSizePlusOneLatch =
new CountDownLatch(expectedPoolSizePlusOne);
final OneShotLatch releaseLatch = new OneShotLatch();
ThrowingRunnable<Exception> countsDown =
() -> {
expectedPoolSizePlusOneLatch.countDown();
expectedPoolSizeLatch.countDown();
// block the runnable to keep the thread occupied
releaseLatch.await();
};
for (int i = 0; i < expectedPoolSizePlusOne; i++) {
executor.execute(ThrowingRunnable.unchecked(countsDown));
}
// the expected pool size latch should complete since we expect to have enough threads
expectedPoolSizeLatch.await();
assertThat(expectedPoolSizePlusOneLatch.getCount()).isOne();
// unblock the runnables
releaseLatch.trigger();
}
}
| JobManagerSharedServicesTest |
java | spring-projects__spring-boot | module/spring-boot-data-neo4j/src/test/java/org/springframework/boot/data/neo4j/domain/country/CountryRepository.java | {
"start": 761,
"end": 833
} | interface ____ extends Neo4jRepository<Country, Long> {
}
| CountryRepository |
java | mockito__mockito | mockito-extensions/mockito-junit-jupiter/src/test/java/org/mockitousage/JunitJupiterTest.java | {
"start": 723,
"end": 1998
} | class ____ {
@Mock private Function<Integer, String> rootMock;
@Captor private ArgumentCaptor<String> rootCaptor;
@InjectMocks private ClassWithDependency classWithDependency;
@Test
void ensure_mock_creation_works() {
assertThat(rootMock).isNotNull();
}
@Test
void can_set_stubs_on_initialized_mock() {
Mockito.when(rootMock.apply(10)).thenReturn("Return");
assertThat(rootMock.apply(10)).isEqualTo("Return");
}
@Test
void ensure_captor_creation_works() {
assertThat(rootCaptor).isNotNull();
}
@Test
void can_capture_with_initialized_captor() {
assertCaptor(rootCaptor);
}
@Test
void initializes_parameters(
@Mock Function<String, String> localMock, @Captor ArgumentCaptor<String> localCaptor) {
Mockito.when(localMock.apply("Para")).thenReturn("Meter");
assertThat(localMock.apply("Para")).isEqualTo("Meter");
assertCaptor(localCaptor);
}
@Test
void initializes_parameters_with_custom_configuration(
@Mock(name = "overriddenName") Function<String, String> localMock) {
assertThat(MockUtil.getMockName(localMock).toString()).isEqualTo("overriddenName");
}
@Nested
| JunitJupiterTest |
java | greenrobot__EventBus | EventBusTestJava/src/main/java/org/greenrobot/eventbus/EventBusInheritanceDisabledSubclassNoMethod.java | {
"start": 111,
"end": 205
} | class ____ extends EventBusInheritanceDisabledTest {
}
| EventBusInheritanceDisabledSubclassNoMethod |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/TwilioComponentBuilderFactory.java | {
"start": 7737,
"end": 9257
} | class ____
extends AbstractComponentBuilder<TwilioComponent>
implements TwilioComponentBuilder {
@Override
protected TwilioComponent buildConcreteComponent() {
return new TwilioComponent();
}
@Override
protected boolean setPropertyOnComponent(
Component component,
String name,
Object value) {
switch (name) {
case "configuration": ((TwilioComponent) component).setConfiguration((org.apache.camel.component.twilio.TwilioConfiguration) value); return true;
case "bridgeErrorHandler": ((TwilioComponent) component).setBridgeErrorHandler((boolean) value); return true;
case "lazyStartProducer": ((TwilioComponent) component).setLazyStartProducer((boolean) value); return true;
case "autowiredEnabled": ((TwilioComponent) component).setAutowiredEnabled((boolean) value); return true;
case "restClient": ((TwilioComponent) component).setRestClient((com.twilio.http.TwilioRestClient) value); return true;
case "accountSid": ((TwilioComponent) component).setAccountSid((java.lang.String) value); return true;
case "password": ((TwilioComponent) component).setPassword((java.lang.String) value); return true;
case "username": ((TwilioComponent) component).setUsername((java.lang.String) value); return true;
default: return false;
}
}
}
} | TwilioComponentBuilderImpl |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/Hamlet.java | {
"start": 204661,
"end": 207310
} | class ____<T extends __> extends EImp<T> implements HamletSpec.DL {
public DL(String name, T parent, EnumSet<EOpt> opts) {
super(name, parent, opts);
}
@Override
public DL<T> $id(String value) {
addAttr("id", value);
return this;
}
@Override
public DL<T> $class(String value) {
addAttr("class", value);
return this;
}
@Override
public DL<T> $title(String value) {
addAttr("title", value);
return this;
}
@Override
public DL<T> $style(String value) {
addAttr("style", value);
return this;
}
@Override
public DL<T> $lang(String value) {
addAttr("lang", value);
return this;
}
@Override
public DL<T> $dir(Dir value) {
addAttr("dir", value);
return this;
}
@Override
public DL<T> $onclick(String value) {
addAttr("onclick", value);
return this;
}
@Override
public DL<T> $ondblclick(String value) {
addAttr("ondblclick", value);
return this;
}
@Override
public DL<T> $onmousedown(String value) {
addAttr("onmousedown", value);
return this;
}
@Override
public DL<T> $onmouseup(String value) {
addAttr("onmouseup", value);
return this;
}
@Override
public DL<T> $onmouseover(String value) {
addAttr("onmouseover", value);
return this;
}
@Override
public DL<T> $onmousemove(String value) {
addAttr("onmousemove", value);
return this;
}
@Override
public DL<T> $onmouseout(String value) {
addAttr("onmouseout", value);
return this;
}
@Override
public DL<T> $onkeypress(String value) {
addAttr("onkeypress", value);
return this;
}
@Override
public DL<T> $onkeydown(String value) {
addAttr("onkeydown", value);
return this;
}
@Override
public DL<T> $onkeyup(String value) {
addAttr("onkeyup", value);
return this;
}
@Override
public DT<DL<T>> dt() {
closeAttrs();
return dt_(this, false);
}
@Override
public DL<T> dt(String cdata) {
return dt().__(cdata).__();
}
@Override
public DD<DL<T>> dd() {
closeAttrs();
return dd_(this, false);
}
@Override
public DL<T> dd(String cdata) {
return dd().__(cdata).__();
}
}
private <T extends __> DT<T> dt_(T e, boolean inline) {
return new DT<T>("dt", e, opt(false, inline, false)); }
private <T extends __> DD<T> dd_(T e, boolean inline) {
return new DD<T>("dd", e, opt(false, inline, false)); }
public | DL |
java | quarkusio__quarkus | extensions/websockets-next/deployment/src/main/java/io/quarkus/websockets/next/deployment/WebSocketEndpointBuildItem.java | {
"start": 503,
"end": 2157
} | class ____ extends MultiBuildItem {
public final boolean isClient;
public final BeanInfo bean;
// The path is using Vertx syntax for path params, i.e. /foo/:bar
public final String path;
// @WebSocket#endpointId() or @WebSocketClient#clientId()
public final String id;
public final InboundProcessingMode inboundProcessingMode;
public final Callback onOpen;
public final Callback onTextMessage;
public final Callback onBinaryMessage;
public final Callback onPingMessage;
public final Callback onPongMessage;
public final Callback onClose;
public final List<Callback> onErrors;
WebSocketEndpointBuildItem(boolean isClient, BeanInfo bean, String path, String id,
InboundProcessingMode inboundProcessingMode,
Callback onOpen, Callback onTextMessage, Callback onBinaryMessage, Callback onPingMessage,
Callback onPongMessage, Callback onClose, List<Callback> onErrors) {
this.isClient = isClient;
this.bean = bean;
this.path = path;
this.id = id;
this.inboundProcessingMode = inboundProcessingMode;
this.onOpen = onOpen;
this.onTextMessage = onTextMessage;
this.onBinaryMessage = onBinaryMessage;
this.onPingMessage = onPingMessage;
this.onPongMessage = onPongMessage;
this.onClose = onClose;
this.onErrors = onErrors;
}
public boolean isClient() {
return isClient;
}
public boolean isServer() {
return !isClient;
}
public DotName beanClassName() {
return bean.getImplClazz().name();
}
}
| WebSocketEndpointBuildItem |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/event/spi/EvictEventListener.java | {
"start": 293,
"end": 482
} | interface ____ {
/**
* Handle the given evict event.
*
* @param event The evict event to be handled.
*/
void onEvict(EvictEvent event) throws HibernateException;
}
| EvictEventListener |
java | apache__commons-lang | src/test/java/org/apache/commons/lang3/ValidateTest.java | {
"start": 28732,
"end": 29382
} | class ____ {
@Test
void shouldNotThrowExceptionWhenValueIsInstanceOfClass() {
Validate.isInstanceOf(String.class, "hi", "Error %s=%s", "Name", "Value");
}
@Test
void shouldThrowIllegalArgumentExceptionWithGivenMessageWhenValueIsNotInstanceOfClass() {
final IllegalArgumentException ex = assertIllegalArgumentException(
() -> Validate.isInstanceOf(List.class, "hi", "Error %s=%s", "Name", "Value"));
assertEquals("Error Name=Value", ex.getMessage());
}
}
@Nested
final | WithMessageTemplate |
java | netty__netty | transport-native-io_uring/src/test/java/io/netty/channel/uring/IoUringSocketHalfClosedTest.java | {
"start": 1209,
"end": 2147
} | class ____ extends SocketHalfClosedTest {
@BeforeAll
public static void loadJNI() {
assumeTrue(IoUring.isAvailable());
}
@Override
protected List<TestsuitePermutation.BootstrapComboFactory<ServerBootstrap, Bootstrap>> newFactories() {
return IoUringSocketTestPermutation.INSTANCE.socket();
}
@Disabled
@Test
public void testAutoCloseFalseDoesShutdownOutput(TestInfo testInfo) throws Throwable {
// This test only works on Linux / BSD / MacOS as we assume some semantics that are not true for Windows.
Assumptions.assumeFalse(PlatformDependent.isWindows());
this.run(testInfo, new Runner<ServerBootstrap, Bootstrap>() {
public void run(ServerBootstrap serverBootstrap, Bootstrap bootstrap) throws Throwable {
testAutoCloseFalseDoesShutdownOutput(serverBootstrap, bootstrap);
}
});
}
}
| IoUringSocketHalfClosedTest |
java | grpc__grpc-java | xds/src/main/java/io/grpc/xds/ConfigOrError.java | {
"start": 829,
"end": 1559
} | class ____<T> {
/**
* Returns a {@link ConfigOrError} for the successfully converted data object.
*/
static <T> ConfigOrError<T> fromConfig(T config) {
return new ConfigOrError<>(config);
}
/**
* Returns a {@link ConfigOrError} for the failure to convert the data object.
*/
static <T> ConfigOrError<T> fromError(String errorDetail) {
return new ConfigOrError<>(errorDetail);
}
final String errorDetail;
final T config;
private ConfigOrError(T config) {
this.config = checkNotNull(config, "config");
this.errorDetail = null;
}
private ConfigOrError(String errorDetail) {
this.config = null;
this.errorDetail = checkNotNull(errorDetail, "errorDetail");
}
}
| ConfigOrError |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/rest/action/document/RestTermVectorsAction.java | {
"start": 1417,
"end": 4905
} | class ____ extends BaseRestHandler {
@Override
public List<Route> routes() {
return List.of(
new Route(GET, "/{index}/_termvectors"),
new Route(POST, "/{index}/_termvectors"),
new Route(GET, "/{index}/_termvectors/{id}"),
new Route(POST, "/{index}/_termvectors/{id}")
);
}
@Override
public String getName() {
return "document_term_vectors_action";
}
@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
TermVectorsRequest termVectorsRequest = new TermVectorsRequest(request.param("index"), request.param("id"));
if (request.hasContentOrSourceParam()) {
try (XContentParser parser = request.contentOrSourceParamParser()) {
TermVectorsRequest.parseRequest(termVectorsRequest, parser, request.getRestApiVersion());
}
}
readURIParameters(termVectorsRequest, request);
return channel -> client.termVectors(termVectorsRequest, new RestToXContentListener<>(channel));
}
public static void readURIParameters(TermVectorsRequest termVectorsRequest, RestRequest request) {
String fields = request.param("fields");
addFieldStringsFromParameter(termVectorsRequest, fields);
termVectorsRequest.offsets(request.paramAsBoolean("offsets", termVectorsRequest.offsets()));
termVectorsRequest.positions(request.paramAsBoolean("positions", termVectorsRequest.positions()));
termVectorsRequest.payloads(request.paramAsBoolean("payloads", termVectorsRequest.payloads()));
termVectorsRequest.routing(request.param("routing"));
termVectorsRequest.realtime(request.paramAsBoolean("realtime", termVectorsRequest.realtime()));
termVectorsRequest.version(RestActions.parseVersion(request, termVectorsRequest.version()));
termVectorsRequest.versionType(VersionType.fromString(request.param("version_type"), termVectorsRequest.versionType()));
termVectorsRequest.preference(request.param("preference"));
termVectorsRequest.termStatistics(request.paramAsBoolean("termStatistics", termVectorsRequest.termStatistics()));
termVectorsRequest.termStatistics(request.paramAsBoolean("term_statistics", termVectorsRequest.termStatistics()));
termVectorsRequest.fieldStatistics(request.paramAsBoolean("fieldStatistics", termVectorsRequest.fieldStatistics()));
termVectorsRequest.fieldStatistics(request.paramAsBoolean("field_statistics", termVectorsRequest.fieldStatistics()));
}
public static void addFieldStringsFromParameter(TermVectorsRequest termVectorsRequest, String fields) {
Set<String> selectedFields = termVectorsRequest.selectedFields();
if (fields != null) {
String[] paramFieldStrings = Strings.commaDelimitedListToStringArray(fields);
for (String field : paramFieldStrings) {
if (selectedFields == null) {
selectedFields = new HashSet<>();
}
if (selectedFields.contains(field) == false) {
field = field.replaceAll("\\s", "");
selectedFields.add(field);
}
}
}
if (selectedFields != null) {
termVectorsRequest.selectedFields(selectedFields.toArray(new String[selectedFields.size()]));
}
}
}
| RestTermVectorsAction |
java | elastic__elasticsearch | x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/aggregations/support/AggregateMetricsValuesSourceType.java | {
"start": 1068,
"end": 3183
} | enum ____ implements ValuesSourceType {
AGGREGATE_METRIC() {
@Override
public RuntimeException getUnregisteredException(String message) {
return new UnsupportedAggregationOnDownsampledIndex(message);
}
@Override
public ValuesSource getEmpty() {
throw new IllegalArgumentException("Can't deal with unmapped AggregateMetricsValuesSource type " + this.value());
}
@Override
public ValuesSource getScript(AggregationScript.LeafFactory script, ValueType scriptValueType) {
throw AggregationErrors.valuesSourceDoesNotSupportScritps(this.value());
}
@Override
public ValuesSource getField(FieldContext fieldContext, AggregationScript.LeafFactory script) {
final IndexFieldData<?> indexFieldData = fieldContext.indexFieldData();
if ((indexFieldData instanceof IndexAggregateMetricDoubleFieldData) == false) {
throw new IllegalArgumentException(
"Expected aggregate_metric_double type on field ["
+ fieldContext.field()
+ "], but got ["
+ fieldContext.fieldType().typeName()
+ "]"
);
}
return new AggregateMetricsValuesSource.AggregateMetricDouble.Fielddata((IndexAggregateMetricDoubleFieldData) indexFieldData);
}
@Override
public ValuesSource replaceMissing(
ValuesSource valuesSource,
Object rawMissing,
DocValueFormat docValueFormat,
LongSupplier nowInMillis
) {
throw new IllegalArgumentException("Can't apply missing values on a " + valuesSource.getClass());
}
};
@Override
public String typeName() {
return value();
}
public static ValuesSourceType fromString(String name) {
return valueOf(name.trim().toUpperCase(Locale.ROOT));
}
public String value() {
return name().toLowerCase(Locale.ROOT);
}
}
| AggregateMetricsValuesSourceType |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/functions/aggfunctions/MinWithRetractAggFunctionTest.java | {
"start": 6410,
"end": 7143
} | class ____
extends NumberMinWithRetractAggFunctionTestBase<Double> {
@Override
protected Double getMinValue() {
return -Double.MAX_VALUE / 2;
}
@Override
protected Double getMaxValue() {
return Double.MAX_VALUE / 2;
}
@Override
protected Double getValue(String v) {
return Double.valueOf(v);
}
@Override
protected AggregateFunction<Double, MinWithRetractAccumulator<Double>> getAggregator() {
return new MinWithRetractAggFunction<>(DataTypes.DOUBLE().getLogicalType());
}
}
/** Test for {@link BooleanType}. */
@Nested
final | DoubleMinWithRetractAggFunctionTest |
java | apache__spark | sql/api/src/main/java/org/apache/spark/sql/api/java/UDF11.java | {
"start": 981,
"end": 1179
} | interface ____<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, R> extends Serializable {
R call(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11) throws Exception;
}
| UDF11 |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/api/watermark/Watermark.java | {
"start": 2089,
"end": 3460
} | class ____ extends StreamElement {
/** The watermark that signifies end-of-event-time. */
public static final Watermark MAX_WATERMARK = new Watermark(Long.MAX_VALUE);
/** The watermark that signifies is used before any actual watermark has been generated. */
public static final Watermark UNINITIALIZED = new Watermark(Long.MIN_VALUE);
// ------------------------------------------------------------------------
/** The timestamp of the watermark in milliseconds. */
protected final long timestamp;
/** Creates a new watermark with the given timestamp in milliseconds. */
public Watermark(long timestamp) {
this.timestamp = timestamp;
}
/** Returns the timestamp associated with this {@link Watermark} in milliseconds. */
public long getTimestamp() {
return timestamp;
}
// ------------------------------------------------------------------------
@Override
public boolean equals(Object o) {
return this == o
|| o != null
&& o.getClass() == this.getClass()
&& ((Watermark) o).timestamp == timestamp;
}
@Override
public int hashCode() {
return (int) (timestamp ^ (timestamp >>> 32));
}
@Override
public String toString() {
return "Watermark @ " + timestamp;
}
}
| Watermark |
java | apache__camel | components/camel-kubernetes/src/generated/java/org/apache/camel/component/kubernetes/service_accounts/KubernetesServiceAccountsEndpointUriFactory.java | {
"start": 537,
"end": 3438
} | class ____ extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory {
private static final String BASE = ":masterUrl";
private static final Set<String> PROPERTY_NAMES;
private static final Set<String> SECRET_PROPERTY_NAMES;
private static final Map<String, String> MULTI_VALUE_PREFIXES;
static {
Set<String> props = new HashSet<>(22);
props.add("apiVersion");
props.add("caCertData");
props.add("caCertFile");
props.add("clientCertData");
props.add("clientCertFile");
props.add("clientKeyAlgo");
props.add("clientKeyData");
props.add("clientKeyFile");
props.add("clientKeyPassphrase");
props.add("connectionTimeout");
props.add("dnsDomain");
props.add("kubernetesClient");
props.add("lazyStartProducer");
props.add("masterUrl");
props.add("namespace");
props.add("oauthToken");
props.add("operation");
props.add("password");
props.add("portName");
props.add("portProtocol");
props.add("trustCerts");
props.add("username");
PROPERTY_NAMES = Collections.unmodifiableSet(props);
Set<String> secretProps = new HashSet<>(12);
secretProps.add("caCertData");
secretProps.add("caCertFile");
secretProps.add("clientCertData");
secretProps.add("clientCertFile");
secretProps.add("clientKeyAlgo");
secretProps.add("clientKeyData");
secretProps.add("clientKeyFile");
secretProps.add("clientKeyPassphrase");
secretProps.add("oauthToken");
secretProps.add("password");
secretProps.add("trustCerts");
secretProps.add("username");
SECRET_PROPERTY_NAMES = Collections.unmodifiableSet(secretProps);
MULTI_VALUE_PREFIXES = Collections.emptyMap();
}
@Override
public boolean isEnabled(String scheme) {
return "kubernetes-service-accounts".equals(scheme);
}
@Override
public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException {
String syntax = scheme + BASE;
String uri = syntax;
Map<String, Object> copy = new HashMap<>(properties);
uri = buildPathParameter(syntax, uri, "masterUrl", null, true, copy);
uri = buildQueryParameters(uri, copy, encode);
return uri;
}
@Override
public Set<String> propertyNames() {
return PROPERTY_NAMES;
}
@Override
public Set<String> secretPropertyNames() {
return SECRET_PROPERTY_NAMES;
}
@Override
public Map<String, String> multiValuePrefixes() {
return MULTI_VALUE_PREFIXES;
}
@Override
public boolean isLenientProperties() {
return false;
}
}
| KubernetesServiceAccountsEndpointUriFactory |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/loading/multiLoad/MultiLoadLockingTest.java | {
"start": 14691,
"end": 15179
} | class ____ {
@Id
private Long id;
@Basic(optional = false)
@NaturalId
private String name;
protected Customer() {
}
public Customer(Long id, String name) {
this.id = id;
this.name = name;
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
@Entity(name = "EntityWithAggregateId")
public static | Customer |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/DoNotCallSuggesterTest.java | {
"start": 3458,
"end": 3833
} | class ____ {
public void foo() {
throw new RuntimeException();
}
}
""")
.doTest();
}
@Test
public void finalClass_publicFinalMethod_throwsAVariable() {
testHelper
.addSourceLines(
"Test.java",
"""
import java.io.IOException;
final | Test |
java | apache__camel | components/camel-kubernetes/src/generated/java/org/apache/camel/component/kubernetes/pods/KubernetesPodsEndpointConfigurer.java | {
"start": 742,
"end": 13263
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
KubernetesPodsEndpoint target = (KubernetesPodsEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiversion":
case "apiVersion": target.getConfiguration().setApiVersion(property(camelContext, java.lang.String.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "cacertdata":
case "caCertData": target.getConfiguration().setCaCertData(property(camelContext, java.lang.String.class, value)); return true;
case "cacertfile":
case "caCertFile": target.getConfiguration().setCaCertFile(property(camelContext, java.lang.String.class, value)); return true;
case "clientcertdata":
case "clientCertData": target.getConfiguration().setClientCertData(property(camelContext, java.lang.String.class, value)); return true;
case "clientcertfile":
case "clientCertFile": target.getConfiguration().setClientCertFile(property(camelContext, java.lang.String.class, value)); return true;
case "clientkeyalgo":
case "clientKeyAlgo": target.getConfiguration().setClientKeyAlgo(property(camelContext, java.lang.String.class, value)); return true;
case "clientkeydata":
case "clientKeyData": target.getConfiguration().setClientKeyData(property(camelContext, java.lang.String.class, value)); return true;
case "clientkeyfile":
case "clientKeyFile": target.getConfiguration().setClientKeyFile(property(camelContext, java.lang.String.class, value)); return true;
case "clientkeypassphrase":
case "clientKeyPassphrase": target.getConfiguration().setClientKeyPassphrase(property(camelContext, java.lang.String.class, value)); return true;
case "connectiontimeout":
case "connectionTimeout": target.getConfiguration().setConnectionTimeout(property(camelContext, java.lang.Integer.class, value)); return true;
case "crdgroup":
case "crdGroup": target.getConfiguration().setCrdGroup(property(camelContext, java.lang.String.class, value)); return true;
case "crdname":
case "crdName": target.getConfiguration().setCrdName(property(camelContext, java.lang.String.class, value)); return true;
case "crdplural":
case "crdPlural": target.getConfiguration().setCrdPlural(property(camelContext, java.lang.String.class, value)); return true;
case "crdscope":
case "crdScope": target.getConfiguration().setCrdScope(property(camelContext, java.lang.String.class, value)); return true;
case "crdversion":
case "crdVersion": target.getConfiguration().setCrdVersion(property(camelContext, java.lang.String.class, value)); return true;
case "dnsdomain":
case "dnsDomain": target.getConfiguration().setDnsDomain(property(camelContext, java.lang.String.class, value)); return true;
case "exceptionhandler":
case "exceptionHandler": target.setExceptionHandler(property(camelContext, org.apache.camel.spi.ExceptionHandler.class, value)); return true;
case "exchangepattern":
case "exchangePattern": target.setExchangePattern(property(camelContext, org.apache.camel.ExchangePattern.class, value)); return true;
case "kubernetesclient":
case "kubernetesClient": target.getConfiguration().setKubernetesClient(property(camelContext, io.fabric8.kubernetes.client.KubernetesClient.class, value)); return true;
case "labelkey":
case "labelKey": target.getConfiguration().setLabelKey(property(camelContext, java.lang.String.class, value)); return true;
case "labelvalue":
case "labelValue": target.getConfiguration().setLabelValue(property(camelContext, java.lang.String.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "namespace": target.getConfiguration().setNamespace(property(camelContext, java.lang.String.class, value)); return true;
case "oauthtoken":
case "oauthToken": target.getConfiguration().setOauthToken(property(camelContext, java.lang.String.class, value)); return true;
case "operation": target.getConfiguration().setOperation(property(camelContext, java.lang.String.class, value)); return true;
case "password": target.getConfiguration().setPassword(property(camelContext, java.lang.String.class, value)); return true;
case "poolsize":
case "poolSize": target.getConfiguration().setPoolSize(property(camelContext, int.class, value)); return true;
case "portname":
case "portName": target.getConfiguration().setPortName(property(camelContext, java.lang.String.class, value)); return true;
case "portprotocol":
case "portProtocol": target.getConfiguration().setPortProtocol(property(camelContext, java.lang.String.class, value)); return true;
case "resourcename":
case "resourceName": target.getConfiguration().setResourceName(property(camelContext, java.lang.String.class, value)); return true;
case "trustcerts":
case "trustCerts": target.getConfiguration().setTrustCerts(property(camelContext, java.lang.Boolean.class, value)); return true;
case "username": target.getConfiguration().setUsername(property(camelContext, java.lang.String.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiversion":
case "apiVersion": return java.lang.String.class;
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "cacertdata":
case "caCertData": return java.lang.String.class;
case "cacertfile":
case "caCertFile": return java.lang.String.class;
case "clientcertdata":
case "clientCertData": return java.lang.String.class;
case "clientcertfile":
case "clientCertFile": return java.lang.String.class;
case "clientkeyalgo":
case "clientKeyAlgo": return java.lang.String.class;
case "clientkeydata":
case "clientKeyData": return java.lang.String.class;
case "clientkeyfile":
case "clientKeyFile": return java.lang.String.class;
case "clientkeypassphrase":
case "clientKeyPassphrase": return java.lang.String.class;
case "connectiontimeout":
case "connectionTimeout": return java.lang.Integer.class;
case "crdgroup":
case "crdGroup": return java.lang.String.class;
case "crdname":
case "crdName": return java.lang.String.class;
case "crdplural":
case "crdPlural": return java.lang.String.class;
case "crdscope":
case "crdScope": return java.lang.String.class;
case "crdversion":
case "crdVersion": return java.lang.String.class;
case "dnsdomain":
case "dnsDomain": return java.lang.String.class;
case "exceptionhandler":
case "exceptionHandler": return org.apache.camel.spi.ExceptionHandler.class;
case "exchangepattern":
case "exchangePattern": return org.apache.camel.ExchangePattern.class;
case "kubernetesclient":
case "kubernetesClient": return io.fabric8.kubernetes.client.KubernetesClient.class;
case "labelkey":
case "labelKey": return java.lang.String.class;
case "labelvalue":
case "labelValue": return java.lang.String.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "namespace": return java.lang.String.class;
case "oauthtoken":
case "oauthToken": return java.lang.String.class;
case "operation": return java.lang.String.class;
case "password": return java.lang.String.class;
case "poolsize":
case "poolSize": return int.class;
case "portname":
case "portName": return java.lang.String.class;
case "portprotocol":
case "portProtocol": return java.lang.String.class;
case "resourcename":
case "resourceName": return java.lang.String.class;
case "trustcerts":
case "trustCerts": return java.lang.Boolean.class;
case "username": return java.lang.String.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
KubernetesPodsEndpoint target = (KubernetesPodsEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiversion":
case "apiVersion": return target.getConfiguration().getApiVersion();
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "cacertdata":
case "caCertData": return target.getConfiguration().getCaCertData();
case "cacertfile":
case "caCertFile": return target.getConfiguration().getCaCertFile();
case "clientcertdata":
case "clientCertData": return target.getConfiguration().getClientCertData();
case "clientcertfile":
case "clientCertFile": return target.getConfiguration().getClientCertFile();
case "clientkeyalgo":
case "clientKeyAlgo": return target.getConfiguration().getClientKeyAlgo();
case "clientkeydata":
case "clientKeyData": return target.getConfiguration().getClientKeyData();
case "clientkeyfile":
case "clientKeyFile": return target.getConfiguration().getClientKeyFile();
case "clientkeypassphrase":
case "clientKeyPassphrase": return target.getConfiguration().getClientKeyPassphrase();
case "connectiontimeout":
case "connectionTimeout": return target.getConfiguration().getConnectionTimeout();
case "crdgroup":
case "crdGroup": return target.getConfiguration().getCrdGroup();
case "crdname":
case "crdName": return target.getConfiguration().getCrdName();
case "crdplural":
case "crdPlural": return target.getConfiguration().getCrdPlural();
case "crdscope":
case "crdScope": return target.getConfiguration().getCrdScope();
case "crdversion":
case "crdVersion": return target.getConfiguration().getCrdVersion();
case "dnsdomain":
case "dnsDomain": return target.getConfiguration().getDnsDomain();
case "exceptionhandler":
case "exceptionHandler": return target.getExceptionHandler();
case "exchangepattern":
case "exchangePattern": return target.getExchangePattern();
case "kubernetesclient":
case "kubernetesClient": return target.getConfiguration().getKubernetesClient();
case "labelkey":
case "labelKey": return target.getConfiguration().getLabelKey();
case "labelvalue":
case "labelValue": return target.getConfiguration().getLabelValue();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "namespace": return target.getConfiguration().getNamespace();
case "oauthtoken":
case "oauthToken": return target.getConfiguration().getOauthToken();
case "operation": return target.getConfiguration().getOperation();
case "password": return target.getConfiguration().getPassword();
case "poolsize":
case "poolSize": return target.getConfiguration().getPoolSize();
case "portname":
case "portName": return target.getConfiguration().getPortName();
case "portprotocol":
case "portProtocol": return target.getConfiguration().getPortProtocol();
case "resourcename":
case "resourceName": return target.getConfiguration().getResourceName();
case "trustcerts":
case "trustCerts": return target.getConfiguration().getTrustCerts();
case "username": return target.getConfiguration().getUsername();
default: return null;
}
}
}
| KubernetesPodsEndpointConfigurer |
java | quarkusio__quarkus | extensions/observability-devservices/common/src/main/java/io/quarkus/observability/common/ContainerConstants.java | {
"start": 55,
"end": 759
} | class ____ {
// Images
public static final String LGTM = "docker.io/grafana/otel-lgtm:0.11.17";
// Ports
public static final int GRAFANA_PORT = 3000;
public static final int OTEL_GRPC_EXPORTER_PORT = 4317;
public static final int OTEL_HTTP_EXPORTER_PORT = 4318;
public static final String OTEL_GRPC_PROTOCOL = "grpc";
public static final String OTEL_HTTP_PROTOCOL = "http/protobuf";
// Overrides
public static final int SCRAPING_INTERVAL = 10;
public static final String OTEL_METRIC_EXPORT_INTERVAL = "10s";
public static final String OTEL_BSP_SCHEDULE_DELAY = "3s";
public static final String OTEL_BLRP_SCHEDULE_DELAY = "1s";
}
| ContainerConstants |
java | apache__rocketmq | client/src/main/java/org/apache/rocketmq/client/impl/consumer/RebalanceImpl.java | {
"start": 2254,
"end": 32173
} | class ____ {
protected static final Logger log = LoggerFactory.getLogger(RebalanceImpl.class);
protected final ConcurrentMap<MessageQueue, ProcessQueue> processQueueTable = new ConcurrentHashMap<>(64);
protected final ConcurrentMap<MessageQueue, PopProcessQueue> popProcessQueueTable = new ConcurrentHashMap<>(64);
protected final ConcurrentMap<String/* topic */, Set<MessageQueue>> topicSubscribeInfoTable =
new ConcurrentHashMap<>();
protected final ConcurrentMap<String /* topic */, SubscriptionData> subscriptionInner =
new ConcurrentHashMap<>();
protected String consumerGroup;
protected MessageModel messageModel;
protected AllocateMessageQueueStrategy allocateMessageQueueStrategy;
protected MQClientInstance mQClientFactory;
private static final int QUERY_ASSIGNMENT_TIMEOUT = 3000;
public RebalanceImpl(String consumerGroup, MessageModel messageModel,
AllocateMessageQueueStrategy allocateMessageQueueStrategy,
MQClientInstance mQClientFactory) {
this.consumerGroup = consumerGroup;
this.messageModel = messageModel;
this.allocateMessageQueueStrategy = allocateMessageQueueStrategy;
this.mQClientFactory = mQClientFactory;
}
public void unlock(final MessageQueue mq, final boolean oneway) {
FindBrokerResult findBrokerResult = this.mQClientFactory.findBrokerAddressInSubscribe(this.mQClientFactory.getBrokerNameFromMessageQueue(mq), MixAll.MASTER_ID, true);
if (findBrokerResult != null) {
UnlockBatchRequestBody requestBody = new UnlockBatchRequestBody();
requestBody.setConsumerGroup(this.consumerGroup);
requestBody.setClientId(this.mQClientFactory.getClientId());
requestBody.getMqSet().add(mq);
try {
this.mQClientFactory.getMQClientAPIImpl().unlockBatchMQ(findBrokerResult.getBrokerAddr(), requestBody, 1000, oneway);
log.warn("unlock messageQueue. group:{}, clientId:{}, mq:{}",
this.consumerGroup,
this.mQClientFactory.getClientId(),
mq);
} catch (Exception e) {
log.error("unlockBatchMQ exception, " + mq, e);
}
}
}
public void unlockAll(final boolean oneway) {
HashMap<String, Set<MessageQueue>> brokerMqs = this.buildProcessQueueTableByBrokerName();
for (final Map.Entry<String, Set<MessageQueue>> entry : brokerMqs.entrySet()) {
final String brokerName = entry.getKey();
final Set<MessageQueue> mqs = entry.getValue();
if (mqs.isEmpty()) {
continue;
}
FindBrokerResult findBrokerResult = this.mQClientFactory.findBrokerAddressInSubscribe(brokerName, MixAll.MASTER_ID, true);
if (findBrokerResult != null) {
UnlockBatchRequestBody requestBody = new UnlockBatchRequestBody();
requestBody.setConsumerGroup(this.consumerGroup);
requestBody.setClientId(this.mQClientFactory.getClientId());
requestBody.setMqSet(mqs);
try {
this.mQClientFactory.getMQClientAPIImpl().unlockBatchMQ(findBrokerResult.getBrokerAddr(), requestBody, 1000, oneway);
for (MessageQueue mq : mqs) {
ProcessQueue processQueue = this.processQueueTable.get(mq);
if (processQueue != null) {
processQueue.setLocked(false);
log.info("the message queue unlock OK, Group: {} {}", this.consumerGroup, mq);
}
}
} catch (Exception e) {
log.error("unlockBatchMQ exception, " + mqs, e);
}
}
}
}
private HashMap<String/* brokerName */, Set<MessageQueue>> buildProcessQueueTableByBrokerName() {
HashMap<String, Set<MessageQueue>> result = new HashMap<>();
for (Map.Entry<MessageQueue, ProcessQueue> entry : this.processQueueTable.entrySet()) {
MessageQueue mq = entry.getKey();
ProcessQueue pq = entry.getValue();
if (pq.isDropped()) {
continue;
}
String destBrokerName = this.mQClientFactory.getBrokerNameFromMessageQueue(mq);
Set<MessageQueue> mqs = result.get(destBrokerName);
if (null == mqs) {
mqs = new HashSet<>();
result.put(mq.getBrokerName(), mqs);
}
mqs.add(mq);
}
return result;
}
public boolean lock(final MessageQueue mq) {
FindBrokerResult findBrokerResult = this.mQClientFactory.findBrokerAddressInSubscribe(this.mQClientFactory.getBrokerNameFromMessageQueue(mq), MixAll.MASTER_ID, true);
if (findBrokerResult != null) {
LockBatchRequestBody requestBody = new LockBatchRequestBody();
requestBody.setConsumerGroup(this.consumerGroup);
requestBody.setClientId(this.mQClientFactory.getClientId());
requestBody.getMqSet().add(mq);
try {
Set<MessageQueue> lockedMq =
this.mQClientFactory.getMQClientAPIImpl().lockBatchMQ(findBrokerResult.getBrokerAddr(), requestBody, 1000);
for (MessageQueue mmqq : lockedMq) {
ProcessQueue processQueue = this.processQueueTable.get(mmqq);
if (processQueue != null) {
processQueue.setLocked(true);
processQueue.setLastLockTimestamp(System.currentTimeMillis());
}
}
boolean lockOK = lockedMq.contains(mq);
log.info("message queue lock {}, {} {}", lockOK ? "OK" : "Failed", this.consumerGroup, mq);
return lockOK;
} catch (Exception e) {
log.error("lockBatchMQ exception, " + mq, e);
}
}
return false;
}
public void lockAll() {
HashMap<String, Set<MessageQueue>> brokerMqs = this.buildProcessQueueTableByBrokerName();
Iterator<Entry<String, Set<MessageQueue>>> it = brokerMqs.entrySet().iterator();
while (it.hasNext()) {
Entry<String, Set<MessageQueue>> entry = it.next();
final String brokerName = entry.getKey();
final Set<MessageQueue> mqs = entry.getValue();
if (mqs.isEmpty()) {
continue;
}
FindBrokerResult findBrokerResult = this.mQClientFactory.findBrokerAddressInSubscribe(brokerName, MixAll.MASTER_ID, true);
if (findBrokerResult != null) {
LockBatchRequestBody requestBody = new LockBatchRequestBody();
requestBody.setConsumerGroup(this.consumerGroup);
requestBody.setClientId(this.mQClientFactory.getClientId());
requestBody.setMqSet(mqs);
try {
Set<MessageQueue> lockOKMQSet =
this.mQClientFactory.getMQClientAPIImpl().lockBatchMQ(findBrokerResult.getBrokerAddr(), requestBody, 1000);
for (MessageQueue mq : mqs) {
ProcessQueue processQueue = this.processQueueTable.get(mq);
if (processQueue != null) {
if (lockOKMQSet.contains(mq)) {
if (!processQueue.isLocked()) {
log.info("the message queue locked OK, Group: {} {}", this.consumerGroup, mq);
}
processQueue.setLocked(true);
processQueue.setLastLockTimestamp(System.currentTimeMillis());
} else {
processQueue.setLocked(false);
log.warn("the message queue locked Failed, Group: {} {}", this.consumerGroup, mq);
}
}
}
} catch (Exception e) {
log.error("lockBatchMQ exception, " + mqs, e);
}
}
}
}
public boolean clientRebalance(String topic) {
return true;
}
public boolean doRebalance(final boolean isOrder) {
boolean balanced = true;
Map<String, SubscriptionData> subTable = this.getSubscriptionInner();
if (subTable != null) {
for (final Map.Entry<String, SubscriptionData> entry : subTable.entrySet()) {
final String topic = entry.getKey();
try {
if (!clientRebalance(topic)) {
boolean result = this.getRebalanceResultFromBroker(topic, isOrder);
if (!result) {
balanced = false;
}
} else {
boolean result = this.rebalanceByTopic(topic, isOrder);
if (!result) {
balanced = false;
}
}
} catch (Throwable e) {
if (!topic.startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX)) {
log.warn("rebalance Exception", e);
balanced = false;
}
}
}
}
this.truncateMessageQueueNotMyTopic();
return balanced;
}
public ConcurrentMap<String, SubscriptionData> getSubscriptionInner() {
return subscriptionInner;
}
private boolean rebalanceByTopic(final String topic, final boolean isOrder) {
boolean balanced = true;
switch (messageModel) {
case BROADCASTING: {
Set<MessageQueue> mqSet = this.topicSubscribeInfoTable.get(topic);
if (mqSet != null) {
boolean changed = this.updateProcessQueueTableInRebalance(topic, mqSet, false);
if (changed) {
this.messageQueueChanged(topic, mqSet, mqSet);
log.info("messageQueueChanged {} {} {} {}", consumerGroup, topic, mqSet, mqSet);
}
balanced = mqSet.equals(getWorkingMessageQueue(topic));
} else {
this.messageQueueChanged(topic, Collections.<MessageQueue>emptySet(), Collections.<MessageQueue>emptySet());
log.warn("doRebalance, {}, but the topic[{}] not exist.", consumerGroup, topic);
}
break;
}
case CLUSTERING: {
Set<MessageQueue> mqSet = this.topicSubscribeInfoTable.get(topic);
List<String> cidAll = this.mQClientFactory.findConsumerIdList(topic, consumerGroup);
if (null == mqSet) {
if (!topic.startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX)) {
this.messageQueueChanged(topic, Collections.<MessageQueue>emptySet(), Collections.<MessageQueue>emptySet());
log.warn("doRebalance, {}, but the topic[{}] not exist.", consumerGroup, topic);
}
}
if (null == cidAll) {
log.warn("doRebalance, {} {}, get consumer id list failed", consumerGroup, topic);
}
if (mqSet != null && cidAll != null) {
List<MessageQueue> mqAll = new ArrayList<>();
mqAll.addAll(mqSet);
Collections.sort(mqAll);
Collections.sort(cidAll);
AllocateMessageQueueStrategy strategy = this.allocateMessageQueueStrategy;
List<MessageQueue> allocateResult = null;
try {
allocateResult = strategy.allocate(
this.consumerGroup,
this.mQClientFactory.getClientId(),
mqAll,
cidAll);
} catch (Throwable e) {
log.error("allocate message queue exception. strategy name: {}, ex: {}", strategy.getName(), e);
return false;
}
Set<MessageQueue> allocateResultSet = new HashSet<>();
if (allocateResult != null) {
allocateResultSet.addAll(allocateResult);
}
boolean changed = this.updateProcessQueueTableInRebalance(topic, allocateResultSet, isOrder);
if (changed) {
log.info(
"client rebalanced result changed. allocateMessageQueueStrategyName={}, group={}, topic={}, clientId={}, mqAllSize={}, cidAllSize={}, rebalanceResultSize={}, rebalanceResultSet={}",
strategy.getName(), consumerGroup, topic, this.mQClientFactory.getClientId(), mqSet.size(), cidAll.size(),
allocateResultSet.size(), allocateResultSet);
this.messageQueueChanged(topic, mqSet, allocateResultSet);
}
balanced = allocateResultSet.equals(getWorkingMessageQueue(topic));
}
break;
}
default:
break;
}
return balanced;
}
private boolean getRebalanceResultFromBroker(final String topic, final boolean isOrder) {
String strategyName = this.allocateMessageQueueStrategy.getName();
Set<MessageQueueAssignment> messageQueueAssignments;
try {
messageQueueAssignments = this.mQClientFactory.queryAssignment(topic, consumerGroup,
strategyName, messageModel, QUERY_ASSIGNMENT_TIMEOUT);
} catch (Exception e) {
log.error("allocate message queue exception. strategy name: {}, ex: {}", strategyName, e);
return false;
}
// null means invalid result, we should skip the update logic
if (messageQueueAssignments == null) {
return false;
}
Set<MessageQueue> mqSet = new HashSet<>();
for (MessageQueueAssignment messageQueueAssignment : messageQueueAssignments) {
if (messageQueueAssignment.getMessageQueue() != null) {
mqSet.add(messageQueueAssignment.getMessageQueue());
}
}
Set<MessageQueue> mqAll = null;
boolean changed = this.updateMessageQueueAssignment(topic, messageQueueAssignments, isOrder);
if (changed) {
log.info("broker rebalanced result changed. allocateMessageQueueStrategyName={}, group={}, topic={}, clientId={}, assignmentSet={}",
strategyName, consumerGroup, topic, this.mQClientFactory.getClientId(), messageQueueAssignments);
this.messageQueueChanged(topic, mqAll, mqSet);
}
return mqSet.equals(getWorkingMessageQueue(topic));
}
private Set<MessageQueue> getWorkingMessageQueue(String topic) {
Set<MessageQueue> queueSet = new HashSet<>();
for (Entry<MessageQueue, ProcessQueue> entry : this.processQueueTable.entrySet()) {
MessageQueue mq = entry.getKey();
ProcessQueue pq = entry.getValue();
if (mq.getTopic().equals(topic) && !pq.isDropped()) {
queueSet.add(mq);
}
}
for (Entry<MessageQueue, PopProcessQueue> entry : this.popProcessQueueTable.entrySet()) {
MessageQueue mq = entry.getKey();
PopProcessQueue pq = entry.getValue();
if (mq.getTopic().equals(topic) && !pq.isDropped()) {
queueSet.add(mq);
}
}
return queueSet;
}
private void truncateMessageQueueNotMyTopic() {
Map<String, SubscriptionData> subTable = this.getSubscriptionInner();
for (MessageQueue mq : this.processQueueTable.keySet()) {
if (!subTable.containsKey(mq.getTopic())) {
ProcessQueue pq = this.processQueueTable.remove(mq);
if (pq != null) {
pq.setDropped(true);
log.info("doRebalance, {}, truncateMessageQueueNotMyTopic remove unnecessary mq, {}", consumerGroup, mq);
}
}
}
for (MessageQueue mq : this.popProcessQueueTable.keySet()) {
if (!subTable.containsKey(mq.getTopic())) {
PopProcessQueue pq = this.popProcessQueueTable.remove(mq);
if (pq != null) {
pq.setDropped(true);
log.info("doRebalance, {}, truncateMessageQueueNotMyTopic remove unnecessary pop mq, {}", consumerGroup, mq);
}
}
}
}
private boolean updateProcessQueueTableInRebalance(final String topic, final Set<MessageQueue> mqSet,
final boolean needLockMq) {
boolean changed = false;
// drop process queues no longer belong me
HashMap<MessageQueue, ProcessQueue> removeQueueMap = new HashMap<>(this.processQueueTable.size());
Iterator<Entry<MessageQueue, ProcessQueue>> it = this.processQueueTable.entrySet().iterator();
while (it.hasNext()) {
Entry<MessageQueue, ProcessQueue> next = it.next();
MessageQueue mq = next.getKey();
ProcessQueue pq = next.getValue();
if (mq.getTopic().equals(topic)) {
if (!mqSet.contains(mq)) {
pq.setDropped(true);
removeQueueMap.put(mq, pq);
} else if (pq.isPullExpired() && this.consumeType() == ConsumeType.CONSUME_PASSIVELY) {
pq.setDropped(true);
removeQueueMap.put(mq, pq);
log.error("[BUG]doRebalance, {}, try remove unnecessary mq, {}, because pull is pause, so try to fixed it",
consumerGroup, mq);
}
}
}
// remove message queues no longer belong me
for (Entry<MessageQueue, ProcessQueue> entry : removeQueueMap.entrySet()) {
MessageQueue mq = entry.getKey();
ProcessQueue pq = entry.getValue();
if (this.removeUnnecessaryMessageQueue(mq, pq)) {
this.processQueueTable.remove(mq);
changed = true;
log.info("doRebalance, {}, remove unnecessary mq, {}", consumerGroup, mq);
}
}
// add new message queue
boolean allMQLocked = true;
List<PullRequest> pullRequestList = new ArrayList<>();
for (MessageQueue mq : mqSet) {
if (!this.processQueueTable.containsKey(mq)) {
if (needLockMq && !this.lock(mq)) {
log.warn("doRebalance, {}, add a new mq failed, {}, because lock failed", consumerGroup, mq);
allMQLocked = false;
continue;
}
this.removeDirtyOffset(mq);
ProcessQueue pq = createProcessQueue();
pq.setLocked(true);
long nextOffset = this.computePullFromWhere(mq);
if (nextOffset >= 0) {
ProcessQueue pre = this.processQueueTable.putIfAbsent(mq, pq);
if (pre != null) {
log.info("doRebalance, {}, mq already exists, {}", consumerGroup, mq);
} else {
log.info("doRebalance, {}, add a new mq, {}", consumerGroup, mq);
PullRequest pullRequest = new PullRequest();
pullRequest.setConsumerGroup(consumerGroup);
pullRequest.setNextOffset(nextOffset);
pullRequest.setMessageQueue(mq);
pullRequest.setProcessQueue(pq);
pullRequestList.add(pullRequest);
changed = true;
}
} else {
log.warn("doRebalance, {}, add new mq failed, {}", consumerGroup, mq);
}
}
}
if (!allMQLocked) {
mQClientFactory.rebalanceLater(500);
}
this.dispatchPullRequest(pullRequestList, 500);
return changed;
}
private boolean updateMessageQueueAssignment(final String topic, final Set<MessageQueueAssignment> assignments,
final boolean isOrder) {
boolean changed = false;
Map<MessageQueue, MessageQueueAssignment> mq2PushAssignment = new HashMap<>();
Map<MessageQueue, MessageQueueAssignment> mq2PopAssignment = new HashMap<>();
for (MessageQueueAssignment assignment : assignments) {
MessageQueue messageQueue = assignment.getMessageQueue();
if (messageQueue == null) {
continue;
}
if (MessageRequestMode.POP == assignment.getMode()) {
mq2PopAssignment.put(messageQueue, assignment);
} else {
mq2PushAssignment.put(messageQueue, assignment);
}
}
if (!topic.startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX)) {
if (mq2PopAssignment.isEmpty() && !mq2PushAssignment.isEmpty()) {
//pop switch to push
//subscribe pop retry topic
try {
final String retryTopic = KeyBuilder.buildPopRetryTopic(topic, getConsumerGroup());
SubscriptionData subscriptionData = FilterAPI.buildSubscriptionData(retryTopic, SubscriptionData.SUB_ALL);
getSubscriptionInner().put(retryTopic, subscriptionData);
} catch (Exception ignored) {
}
} else if (!mq2PopAssignment.isEmpty() && mq2PushAssignment.isEmpty()) {
//push switch to pop
//unsubscribe pop retry topic
try {
final String retryTopic = KeyBuilder.buildPopRetryTopic(topic, getConsumerGroup());
getSubscriptionInner().remove(retryTopic);
} catch (Exception ignored) {
}
}
}
{
// drop process queues no longer belong me
HashMap<MessageQueue, ProcessQueue> removeQueueMap = new HashMap<>(this.processQueueTable.size());
Iterator<Entry<MessageQueue, ProcessQueue>> it = this.processQueueTable.entrySet().iterator();
while (it.hasNext()) {
Entry<MessageQueue, ProcessQueue> next = it.next();
MessageQueue mq = next.getKey();
ProcessQueue pq = next.getValue();
if (mq.getTopic().equals(topic)) {
if (!mq2PushAssignment.containsKey(mq)) {
pq.setDropped(true);
removeQueueMap.put(mq, pq);
} else if (pq.isPullExpired() && this.consumeType() == ConsumeType.CONSUME_PASSIVELY) {
pq.setDropped(true);
removeQueueMap.put(mq, pq);
log.error("[BUG]doRebalance, {}, try remove unnecessary mq, {}, because pull is pause, so try to fixed it",
consumerGroup, mq);
}
}
}
// remove message queues no longer belong me
for (Entry<MessageQueue, ProcessQueue> entry : removeQueueMap.entrySet()) {
MessageQueue mq = entry.getKey();
ProcessQueue pq = entry.getValue();
if (this.removeUnnecessaryMessageQueue(mq, pq)) {
this.processQueueTable.remove(mq);
changed = true;
log.info("doRebalance, {}, remove unnecessary mq, {}", consumerGroup, mq);
}
}
}
{
HashMap<MessageQueue, PopProcessQueue> removeQueueMap = new HashMap<>(this.popProcessQueueTable.size());
Iterator<Entry<MessageQueue, PopProcessQueue>> it = this.popProcessQueueTable.entrySet().iterator();
while (it.hasNext()) {
Entry<MessageQueue, PopProcessQueue> next = it.next();
MessageQueue mq = next.getKey();
PopProcessQueue pq = next.getValue();
if (mq.getTopic().equals(topic)) {
if (!mq2PopAssignment.containsKey(mq)) {
//the queue is no longer your assignment
pq.setDropped(true);
removeQueueMap.put(mq, pq);
} else if (pq.isPullExpired() && this.consumeType() == ConsumeType.CONSUME_PASSIVELY) {
pq.setDropped(true);
removeQueueMap.put(mq, pq);
log.error("[BUG]doRebalance, {}, try remove unnecessary pop mq, {}, because pop is pause, so try to fixed it",
consumerGroup, mq);
}
}
}
// remove message queues no longer belong me
for (Entry<MessageQueue, PopProcessQueue> entry : removeQueueMap.entrySet()) {
MessageQueue mq = entry.getKey();
PopProcessQueue pq = entry.getValue();
if (this.removeUnnecessaryPopMessageQueue(mq, pq)) {
this.popProcessQueueTable.remove(mq);
changed = true;
log.info("doRebalance, {}, remove unnecessary pop mq, {}", consumerGroup, mq);
}
}
}
{
// add new message queue
boolean allMQLocked = true;
List<PullRequest> pullRequestList = new ArrayList<>();
for (MessageQueue mq : mq2PushAssignment.keySet()) {
if (!this.processQueueTable.containsKey(mq)) {
if (isOrder && !this.lock(mq)) {
log.warn("doRebalance, {}, add a new mq failed, {}, because lock failed", consumerGroup, mq);
allMQLocked = false;
continue;
}
this.removeDirtyOffset(mq);
ProcessQueue pq = createProcessQueue();
pq.setLocked(true);
long nextOffset = -1L;
try {
nextOffset = this.computePullFromWhereWithException(mq);
} catch (Exception e) {
log.info("doRebalance, {}, compute offset failed, {}", consumerGroup, mq);
continue;
}
if (nextOffset >= 0) {
ProcessQueue pre = this.processQueueTable.putIfAbsent(mq, pq);
if (pre != null) {
log.info("doRebalance, {}, mq already exists, {}", consumerGroup, mq);
} else {
log.info("doRebalance, {}, add a new mq, {}", consumerGroup, mq);
PullRequest pullRequest = new PullRequest();
pullRequest.setConsumerGroup(consumerGroup);
pullRequest.setNextOffset(nextOffset);
pullRequest.setMessageQueue(mq);
pullRequest.setProcessQueue(pq);
pullRequestList.add(pullRequest);
changed = true;
}
} else {
log.warn("doRebalance, {}, add new mq failed, {}", consumerGroup, mq);
}
}
}
if (!allMQLocked) {
mQClientFactory.rebalanceLater(500);
}
this.dispatchPullRequest(pullRequestList, 500);
}
{
// add new message queue
List<PopRequest> popRequestList = new ArrayList<>();
for (MessageQueue mq : mq2PopAssignment.keySet()) {
if (!this.popProcessQueueTable.containsKey(mq)) {
PopProcessQueue pq = createPopProcessQueue();
PopProcessQueue pre = this.popProcessQueueTable.putIfAbsent(mq, pq);
if (pre != null) {
log.info("doRebalance, {}, mq pop already exists, {}", consumerGroup, mq);
} else {
log.info("doRebalance, {}, add a new pop mq, {}", consumerGroup, mq);
PopRequest popRequest = new PopRequest();
popRequest.setTopic(topic);
popRequest.setConsumerGroup(consumerGroup);
popRequest.setMessageQueue(mq);
popRequest.setPopProcessQueue(pq);
popRequest.setInitMode(getConsumeInitMode());
popRequestList.add(popRequest);
changed = true;
}
}
}
this.dispatchPopPullRequest(popRequestList, 500);
}
return changed;
}
public abstract void messageQueueChanged(final String topic, final Set<MessageQueue> mqAll,
final Set<MessageQueue> mqDivided);
public abstract boolean removeUnnecessaryMessageQueue(final MessageQueue mq, final ProcessQueue pq);
public boolean removeUnnecessaryPopMessageQueue(final MessageQueue mq, final PopProcessQueue pq) {
return true;
}
public abstract ConsumeType consumeType();
public abstract void removeDirtyOffset(final MessageQueue mq);
/**
* When the network is unstable, using this | RebalanceImpl |
java | spring-projects__spring-security | web/src/main/java/org/springframework/security/web/server/header/CrossOriginEmbedderPolicyServerHttpHeadersWriter.java | {
"start": 2200,
"end": 2506
} | enum ____ {
UNSAFE_NONE("unsafe-none"),
REQUIRE_CORP("require-corp"),
CREDENTIALLESS("credentialless");
private final String policy;
CrossOriginEmbedderPolicy(String policy) {
this.policy = policy;
}
public String getPolicy() {
return this.policy;
}
}
}
| CrossOriginEmbedderPolicy |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/DetachedPreviousRowStateTest.java | {
"start": 4114,
"end": 4477
} | class ____ {
@Id
@GeneratedValue
private long id;
private String description;
@OneToOne(fetch = FetchType.LAZY)
private Product product;
public Description() {
}
public Description(Product product) {
this.product = product;
}
}
@Entity(name = "LocalizedDescription")
@Table(name = "localized_description_tbl")
public static | Description |
java | quarkusio__quarkus | extensions/smallrye-fault-tolerance/deployment/src/test/java/io/quarkus/smallrye/faulttolerance/test/asynchronous/additional/BlockingNonBlockingOnClassTest.java | {
"start": 449,
"end": 974
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(BlockingNonBlockingOnClassService.class))
.assertException(e -> {
assertEquals(DefinitionException.class, e.getClass());
assertTrue(e.getMessage().contains("Both @Blocking and @NonBlocking present"));
});
@Test
public void test() {
fail();
}
}
| BlockingNonBlockingOnClassTest |
java | elastic__elasticsearch | x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMStatDisruptionIT.java | {
"start": 10447,
"end": 32608
} | class ____ extends FsRepository {
private static final String TYPE = "restart_before_listeners";
private final Runnable beforeResponseRunnable;
protected TestRestartBeforeListenersRepo(
ProjectId projectId,
RepositoryMetadata metadata,
Environment env,
NamedXContentRegistry namedXContentRegistry,
ClusterService clusterService,
BigArrays bigArrays,
RecoverySettings recoverySettings,
Runnable beforeResponseRunnable
) {
super(projectId, metadata, env, namedXContentRegistry, clusterService, bigArrays, recoverySettings);
this.beforeResponseRunnable = beforeResponseRunnable;
}
@Override
public void finalizeSnapshot(FinalizeSnapshotContext fsc) {
var newFinalizeContext = new FinalizeSnapshotContext(
false,
fsc.updatedShardGenerations(),
fsc.repositoryStateId(),
fsc.clusterMetadata(),
fsc.snapshotInfo(),
fsc.repositoryMetaVersion(),
fsc,
() -> {
// run the passed lambda before calling the usual callback
// this is where the cluster can be restarted before SLM is called back with the snapshotInfo
beforeResponseRunnable.run();
fsc.onDone();
}
);
super.finalizeSnapshot(newFinalizeContext);
}
}
@Before
public void clearRepoFinalizeRunnable() {
TestRestartBeforeListenersRepoPlugin.clearOnResponse();
}
/**
* Test that if there is a currently running snapshot it is not inferred to be a failure
*/
public void testCurrentlyRunningSnapshotNotRecordedAsFailure() throws Exception {
final String idxName = "test-idx";
final String repoName = "test-repo";
final String policyName = "test-policy";
internalCluster().startMasterOnlyNodes(1);
final String masterNode = internalCluster().getMasterName();
final String dataNode = internalCluster().startDataOnlyNode();
ensureStableCluster(2);
createRandomIndex(idxName, dataNode);
createRepository(repoName, TestDelayedRepo.TYPE);
createSnapshotPolicy(policyName, "snap", NEVER_EXECUTE_CRON_SCHEDULE, repoName, idxName);
ensureGreen();
String snapshotA = executePolicy(masterNode, policyName);
logger.info("Created snapshot A: " + snapshotA);
// wait until snapshotA is registered before starting snapshotB
assertBusy(() -> assertRegistered(policyName, List.of(snapshotA)), 1, TimeUnit.MINUTES);
// create another snapshot while A is still running
String snapshotB = executePolicy(masterNode, policyName);
logger.info("Created snapshot B: " + snapshotB);
// wait until both snapshots are registered before allowing snapshotA to continue
assertBusy(() -> assertRegistered(policyName, List.of(snapshotA, snapshotB)), 1, TimeUnit.MINUTES);
// remove delay from snapshotA allowing it to finish
TestDelayedRepoPlugin.removeDelay();
waitForSnapshot(repoName, snapshotA);
waitForSnapshot(repoName, snapshotB);
assertBusy(() -> {
assertSnapshotSuccess(repoName, snapshotA);
assertSnapshotSuccess(repoName, snapshotB);
assertMetadata(policyName, 2, 0, 0);
assertRegistered(policyName, List.of());
}, 1, TimeUnit.MINUTES);
}
/**
* Test that after successful snapshot registered is empty
*/
public void testSuccessSnapshot() throws Exception {
final String idxName = "test-idx";
final String repoName = "test-repo";
final String policyName = "test-policy";
internalCluster().startMasterOnlyNodes(1);
final String masterNode = internalCluster().getMasterName();
final String dataNode = internalCluster().startDataOnlyNode();
ensureStableCluster(2);
createRandomIndex(idxName, dataNode);
createRepository(repoName, TestRestartBeforeListenersRepo.TYPE);
createSnapshotPolicy(policyName, "snap", NEVER_EXECUTE_CRON_SCHEDULE, repoName, idxName);
ensureGreen();
String snapshotName = executePolicy(masterNode, policyName);
logger.info("Created snapshot: " + snapshotName);
waitForSnapshot(repoName, snapshotName);
assertBusy(() -> {
assertSnapshotSuccess(repoName, snapshotName);
assertMetadata(policyName, 1, 0, 0);
assertRegistered(policyName, List.of());
}, 1, TimeUnit.MINUTES);
}
/**
* Test that after a failure which fails stats uploads, then a success,
* registered snapshot from failure is added to invocationsSinceLastSuccess.
*/
public void testFailSnapshotFailStatsThenSuccessRecoverStats() throws Exception {
final String idxName = "test-idx";
final String repoName = "test-repo";
final String policyName = "test-policy";
internalCluster().startMasterOnlyNodes(1);
final String masterNode = internalCluster().getMasterName();
final String dataNode = internalCluster().startDataOnlyNode();
ensureStableCluster(2);
// Add network disruption so snapshot fails with PARTIAL status
NetworkDisruption networkDisruption = isolateMasterDisruption(NetworkDisruption.DISCONNECT);
internalCluster().setDisruptionScheme(networkDisruption);
// wire into repo so code can be run on test thread after snapshot finalize, but before SLM is called back
var runDuringFinalize = new RunDuringFinalize();
TestRestartBeforeListenersRepoPlugin.setOnResponse(runDuringFinalize.finalizeThreadRunnable());
createRandomIndex(idxName, dataNode);
createRepository(repoName, TestRestartBeforeListenersRepo.TYPE);
createSnapshotPolicy(policyName, "snap", NEVER_EXECUTE_CRON_SCHEDULE, repoName, idxName);
ensureGreen();
networkDisruption.startDisrupting();
String snapshotName = executePolicy(masterNode, policyName);
logger.info("Created snapshot: " + snapshotName);
// restart snapshot after snapshot finalize, but before SLM callback called
runDuringFinalize.awaitAndRun(() -> {
try {
internalCluster().restartNode(masterNode);
} catch (Exception e) {
throw new RuntimeException(e);
}
});
assertBusy(() -> {
assertSnapshotPartial(repoName, snapshotName);
assertMetadata(policyName, 0, 0, 0);
assertRegistered(policyName, List.of(snapshotName));
}, 1, TimeUnit.MINUTES);
awaitNoMoreRunningOperations();
ensureGreen();
// Now execute again, and succeed. The failure from the previous run will now be recorded.
TestRestartBeforeListenersRepoPlugin.clearOnResponse();
final String snapshotName2 = executePolicy(masterNode, policyName);
assertNotEquals(snapshotName, snapshotName2);
logger.info("Created snapshot: " + snapshotName2);
waitForSnapshot(repoName, snapshotName2);
assertBusy(() -> {
assertSnapshotSuccess(repoName, snapshotName2);
// Check stats, this time past failure should be accounted for
assertMetadata(policyName, 1, 1, 0);
assertRegistered(policyName, List.of());
}, 1, TimeUnit.MINUTES);
}
/**
* Test that after a failure then a failure that successfully sets stats
* registeredRuns from failure is added to invocationsSinceLastSuccess.
*/
public void testFailSnapshotFailStatsRecoverStats() throws Exception {
final String idxName = "test-idx";
final String repoName = "test-repo";
final String policyName = "test-policy";
internalCluster().startMasterOnlyNodes(1);
final String masterNode = internalCluster().getMasterName();
final String dataNode = internalCluster().startDataOnlyNode();
ensureStableCluster(2);
// Add network disruption so snapshot fails with PARTIAL status
NetworkDisruption networkDisruption = isolateMasterDisruption(NetworkDisruption.DISCONNECT);
internalCluster().setDisruptionScheme(networkDisruption);
// wire into repo so code can be run on test thread after snapshot finalize, but before SLM is called back
var runDuringFinalize = new RunDuringFinalize();
TestRestartBeforeListenersRepoPlugin.setOnResponse(runDuringFinalize.finalizeThreadRunnable());
createRandomIndex(idxName, dataNode);
createRepository(repoName, TestRestartBeforeListenersRepo.TYPE);
createSnapshotPolicy(policyName, "snap", NEVER_EXECUTE_CRON_SCHEDULE, repoName, idxName);
awaitNoMoreRunningOperations();
ensureGreen();
networkDisruption.startDisrupting();
String snapshotName = executePolicy(masterNode, policyName);
logger.info("Created snapshot: " + snapshotName);
// restart snapshot after snapshot finalize, but before SLM callback called
runDuringFinalize.awaitAndRun(() -> {
try {
internalCluster().restartNode(masterNode);
} catch (Exception e) {
throw new RuntimeException(e);
}
});
assertBusy(() -> {
assertSnapshotPartial(repoName, snapshotName);
assertMetadata(policyName, 0, 0, 0);
assertRegistered(policyName, List.of(snapshotName));
}, 1, TimeUnit.MINUTES);
awaitNoMoreRunningOperations();
ensureGreen();
// Now execute again, but don't fail the stat upload. The failure from the previous run will now be recorded.
var runDuringFinalize2 = new RunDuringFinalize();
TestRestartBeforeListenersRepoPlugin.setOnResponse(runDuringFinalize2.finalizeThreadRunnable());
networkDisruption.startDisrupting();
final String snapshotName2 = executePolicy(masterNode, policyName);
assertNotEquals(snapshotName, snapshotName2);
logger.info("Created snapshot: " + snapshotName2);
runDuringFinalize2.awaitAndRun(networkDisruption::stopDisrupting);
assertBusy(() -> {
assertSnapshotPartial(repoName, snapshotName2);
// Check metadata, this time past failure should be accounted for
assertMetadata(policyName, 0, 2, 2);
assertRegistered(policyName, List.of());
}, 1, TimeUnit.MINUTES);
}
/**
* Test that after a failed snapshot with a master restart during stat upload, update of invocationsSinceLastSuccess is lost.
*/
public void testFailedSnapshotFailStats() throws Exception {
final String idxName = "test-idx";
final String repoName = "test-repo";
final String policyName = "test-policy";
internalCluster().startMasterOnlyNodes(1);
final String masterNode = internalCluster().getMasterName();
final String dataNode = internalCluster().startDataOnlyNode();
ensureStableCluster(2);
// Add network disruption so snapshot fails with PARTIAL status
NetworkDisruption networkDisruption = isolateMasterDisruption(NetworkDisruption.DISCONNECT);
internalCluster().setDisruptionScheme(networkDisruption);
// wire into repo so code can be run on test thread after snapshot finalize, but before SLM is called back
var runDuringFinalize = new RunDuringFinalize();
TestRestartBeforeListenersRepoPlugin.setOnResponse(runDuringFinalize.finalizeThreadRunnable());
createRandomIndex(idxName, dataNode);
createRepository(repoName, TestRestartBeforeListenersRepo.TYPE);
createSnapshotPolicy(policyName, "snap", NEVER_EXECUTE_CRON_SCHEDULE, repoName, idxName);
ensureGreen();
networkDisruption.startDisrupting();
String snapshotName = executePolicy(masterNode, policyName);
// restart snapshot after snapshot finalize, but before SLM callback called
runDuringFinalize.awaitAndRun(() -> {
try {
internalCluster().restartNode(masterNode);
} catch (Exception e) {
throw new RuntimeException(e);
}
});
assertBusy(() -> {
assertSnapshotPartial(repoName, snapshotName);
assertMetadata(policyName, 0, 0, 0);
}, 1, TimeUnit.MINUTES);
}
/**
* Confirm normal behavior during failure that successfully sets stats in cluster state.
*/
public void testFailedSnapshotSubmitStats() throws Exception {
final String idxName = "test-idx";
final String repoName = "test-repo";
final String policyName = "test-policy";
internalCluster().startMasterOnlyNodes(1);
final String masterNode = internalCluster().getMasterName();
final String dataNode = internalCluster().startDataOnlyNode();
ensureStableCluster(2);
// Add network disruption so snapshot fails with PARTIAL status
NetworkDisruption networkDisruption = isolateMasterDisruption(NetworkDisruption.DISCONNECT);
internalCluster().setDisruptionScheme(networkDisruption);
// wire into repo so code can be run on test thread after snapshot finalize, but before SLM is called back
var runDuringFinalize = new RunDuringFinalize();
TestRestartBeforeListenersRepoPlugin.setOnResponse(runDuringFinalize.finalizeThreadRunnable());
createRandomIndex(idxName, dataNode);
createRepository(repoName, TestRestartBeforeListenersRepo.TYPE);
createSnapshotPolicy(policyName, "snap", NEVER_EXECUTE_CRON_SCHEDULE, repoName, idxName);
ensureGreen();
networkDisruption.startDisrupting();
String snapshotName = executePolicy(masterNode, policyName);
// wait for snapshot to complete and network disruption to stop
runDuringFinalize.awaitAndRun(networkDisruption::stopDisrupting);
assertBusy(() -> {
assertSnapshotPartial(repoName, snapshotName);
assertMetadata(policyName, 0, 1, 1);
}, 1, TimeUnit.MINUTES);
}
private void assertMetadata(String policyName, long taken, long failure, long invocationsSinceLastSuccess) {
var snapshotLifecycleMetadata = getSnapshotLifecycleMetadata();
var snapshotLifecyclePolicyMetadata = snapshotLifecycleMetadata.getSnapshotConfigurations().get(policyName);
assertStats(snapshotLifecycleMetadata, policyName, taken, failure);
if (taken > 0) {
assertNotNull(snapshotLifecyclePolicyMetadata.getLastSuccess());
} else {
assertNull(snapshotLifecyclePolicyMetadata.getLastSuccess());
}
if (failure > 0) {
assertNotNull(snapshotLifecyclePolicyMetadata.getLastFailure());
} else {
assertNull(snapshotLifecyclePolicyMetadata.getLastFailure());
}
assertEquals(invocationsSinceLastSuccess, snapshotLifecyclePolicyMetadata.getInvocationsSinceLastSuccess());
}
private SnapshotLifecycleMetadata getSnapshotLifecycleMetadata() {
final ClusterStateResponse clusterStateResponse = client().admin()
.cluster()
.state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT))
.actionGet();
ClusterState state = clusterStateResponse.getState();
return state.metadata().getProject().custom(SnapshotLifecycleMetadata.TYPE);
}
private RegisteredPolicySnapshots getRegisteredSnapshots() {
final ClusterStateResponse clusterStateResponse = client().admin()
.cluster()
.state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT))
.actionGet();
ClusterState state = clusterStateResponse.getState();
return state.metadata().getProject().custom(RegisteredPolicySnapshots.TYPE, RegisteredPolicySnapshots.EMPTY);
}
private SnapshotInfo getSnapshotInfo(String repository, String snapshot) {
GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repository)
.setSnapshots(snapshot)
.get();
return snapshotsStatusResponse.getSnapshots().get(0);
}
private SnapshotsStatusResponse getSnapshotStatus(String repo, String snapshotName) {
return clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repo).setSnapshots(snapshotName).get();
}
private void assertSnapshotSuccess(String repository, String snapshot) {
try {
SnapshotInfo snapshotInfo = getSnapshotInfo(repository, snapshot);
assertEquals(SnapshotState.SUCCESS, snapshotInfo.state());
assertEquals(1, snapshotInfo.successfulShards());
assertEquals(0, snapshotInfo.failedShards());
logger.info("Checked snapshot exists and is state SUCCESS");
} catch (SnapshotMissingException e) {
fail("expected a snapshot with name " + snapshot + " but it does yet not exist");
}
}
private void assertSnapshotPartial(String repository, String snapshot) {
try {
SnapshotInfo snapshotInfo = getSnapshotInfo(repository, snapshot);
assertEquals(SnapshotState.PARTIAL, snapshotInfo.state());
assertEquals(0, snapshotInfo.successfulShards());
assertEquals(1, snapshotInfo.failedShards());
logger.info("Checked snapshot exists and is state PARTIAL");
} catch (SnapshotMissingException e) {
fail("expected a snapshot with name " + snapshot + " but it does yet not exist");
}
}
private void assertStats(SnapshotLifecycleMetadata snapshotLifecycleMetadata, String policyName, long taken, long failed) {
var stats = snapshotLifecycleMetadata.getStats().getMetrics().get(policyName);
logger.info("stats: " + stats);
if (taken == 0 && failed == 0) {
assertTrue(stats == null || (stats.getSnapshotTakenCount() == 0 && stats.getSnapshotFailedCount() == 0));
} else {
assertNotNull(stats);
assertEquals(taken, stats.getSnapshotTakenCount());
assertEquals(failed, stats.getSnapshotFailedCount());
}
}
private void assertRegistered(String policyName, List<String> expected) {
var registered = getRegisteredSnapshots();
var policySnaps = registered.getSnapshotsByPolicy(policyName).stream().map(SnapshotId::getName).toList();
assertEquals(expected, policySnaps);
}
private void createRandomIndex(String idxName, String dataNodeName) throws InterruptedException {
Settings settings = indexSettings(1, 0).put("index.routing.allocation.require._name", dataNodeName).build();
createIndex(idxName, settings);
logger.info("--> indexing some data");
final int numdocs = randomIntBetween(10, 100);
IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];
for (int i = 0; i < builders.length; i++) {
builders[i] = prepareIndex(idxName).setId(Integer.toString(i)).setSource("field1", "bar " + i);
}
indexRandom(true, builders);
indicesAdmin().refresh(new RefreshRequest(idxName)).actionGet();
}
private void createSnapshotPolicy(String policyName, String snapshotNamePattern, String schedule, String repoId, String indexPattern) {
Map<String, Object> snapConfig = new HashMap<>();
snapConfig.put("indices", Collections.singletonList(indexPattern));
snapConfig.put("ignore_unavailable", false);
snapConfig.put("partial", true);
SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy(
policyName,
snapshotNamePattern,
schedule,
repoId,
snapConfig,
SnapshotRetentionConfiguration.EMPTY
);
PutSnapshotLifecycleAction.Request putLifecycle = new PutSnapshotLifecycleAction.Request(
TEST_REQUEST_TIMEOUT,
TEST_REQUEST_TIMEOUT,
policyName,
policy
);
try {
client().execute(PutSnapshotLifecycleAction.INSTANCE, putLifecycle).get();
} catch (Exception e) {
logger.error("failed to create slm policy", e);
fail("failed to create policy " + policy + " got: " + e);
}
}
/**
* Execute the given policy and return the generated snapshot name
*/
private String executePolicy(String node, String policyId) throws ExecutionException, InterruptedException {
ExecuteSnapshotLifecycleAction.Request executeReq = new ExecuteSnapshotLifecycleAction.Request(
TEST_REQUEST_TIMEOUT,
TEST_REQUEST_TIMEOUT,
policyId
);
ExecuteSnapshotLifecycleAction.Response resp = client(node).execute(ExecuteSnapshotLifecycleAction.INSTANCE, executeReq).get();
return resp.getSnapshotName();
}
private void waitForSnapshot(String repo, String snapshotName) throws Exception {
assertBusy(() -> {
try {
SnapshotsStatusResponse s = getSnapshotStatus(repo, snapshotName);
assertThat("expected a snapshot but none were returned", s.getSnapshots().size(), equalTo(1));
SnapshotStatus status = s.getSnapshots().get(0);
logger.info("--> waiting for snapshot {} to be completed, got: {}", snapshotName, status.getState());
assertThat(status.getState(), equalTo(SnapshotsInProgress.State.SUCCESS));
} catch (SnapshotMissingException e) {
logger.error("expected a snapshot but it was missing", e);
fail("expected a snapshot with name " + snapshotName + " but it does not exist");
}
});
}
/**
* The purpose of this | TestRestartBeforeListenersRepo |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java | {
"start": 2050,
"end": 37576
} | class ____ {
private static final int BLOCKSIZE = 1024;
private static final short REPLICATION = 3;
private static final long seed = 0L;
private static final Path dir = new Path("/TestQuotaByStorageType");
private MiniDFSCluster cluster;
private FSDirectory fsdir;
private DistributedFileSystem dfs;
private FSNamesystem fsn;
protected static final Logger LOG =
LoggerFactory.getLogger(TestQuotaByStorageType.class);
@BeforeEach
public void setUp() throws Exception {
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
// Setup a 3-node cluster and configure
// each node with 1 SSD and 1 DISK without capacity limitation
cluster = new MiniDFSCluster
.Builder(conf)
.numDataNodes(REPLICATION)
.storageTypes(new StorageType[]{StorageType.SSD, StorageType.DEFAULT})
.build();
cluster.waitActive();
refreshClusterState();
}
@AfterEach
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
// Cluster state must be refreshed after each start/restart in the test
private void refreshClusterState() throws IOException{
fsdir = cluster.getNamesystem().getFSDirectory();
dfs = cluster.getFileSystem();
fsn = cluster.getNamesystem();
}
@Test
@Timeout(value = 60)
public void testQuotaByStorageTypeWithFileCreateOneSSD() throws Exception {
testQuotaByStorageTypeWithFileCreateCase(
HdfsConstants.ONESSD_STORAGE_POLICY_NAME,
StorageType.SSD,
(short)1);
}
@Test
@Timeout(value = 60)
public void testQuotaByStorageTypeWithFileCreateAllSSD() throws Exception {
testQuotaByStorageTypeWithFileCreateCase(
HdfsConstants.ALLSSD_STORAGE_POLICY_NAME,
StorageType.SSD,
(short)3);
}
void testQuotaByStorageTypeWithFileCreateCase(
String storagePolicy, StorageType storageType, short replication) throws Exception {
final Path foo = new Path(dir, "foo");
Path createdFile1 = new Path(foo, "created_file1.data");
dfs.mkdirs(foo);
// set storage policy on directory "foo" to storagePolicy
dfs.setStoragePolicy(foo, storagePolicy);
// set quota by storage type on directory "foo"
dfs.setQuotaByStorageType(foo, storageType, BLOCKSIZE * 10);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(fnode.isQuotaSet());
// Create file of size 2 * BLOCKSIZE under directory "foo"
long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify space consumed and remaining quota
long storageTypeConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(storageType);
assertEquals(file1Len * replication, storageTypeConsumed);
}
@Test
@Timeout(value = 60)
public void testQuotaByStorageTypeWithFileCreateAppend() throws Exception {
final Path foo = new Path(dir, "foo");
Path createdFile1 = new Path(foo, "created_file1.data");
dfs.mkdirs(foo);
// set storage policy on directory "foo" to ONESSD
dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
// set quota by storage type on directory "foo"
dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 4);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(fnode.isQuotaSet());
// Create file of size 2 * BLOCKSIZE under directory "foo"
long file1Len = BLOCKSIZE * 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify space consumed and remaining quota
long ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumed);
// append several blocks
int appendLen = BLOCKSIZE * 2;
DFSTestUtil.appendFile(dfs, createdFile1, appendLen);
file1Len += appendLen;
ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumed);
ContentSummary cs = dfs.getContentSummary(foo);
assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len);
assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2);
}
@Test
@Timeout(value = 60)
public void testQuotaByStorageTypeWithFileCreateDelete() throws Exception {
final Path foo = new Path(dir, "foo");
Path createdFile1 = new Path(foo, "created_file1.data");
dfs.mkdirs(foo);
dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
// set quota by storage type on directory "foo"
dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 10);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(fnode.isQuotaSet());
// Create file of size 2.5 * BLOCKSIZE under directory "foo"
long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify space consumed and remaining quota
long storageTypeConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, storageTypeConsumed);
// Delete file and verify the consumed space of the storage type is updated
dfs.delete(createdFile1, false);
storageTypeConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(0, storageTypeConsumed);
QuotaCounts counts = fnode.computeQuotaUsage(
fsn.getBlockManager().getStoragePolicySuite(), true);
assertEquals(0, counts.getTypeSpaces().get(StorageType.SSD),
fnode.dumpTreeRecursively().toString());
ContentSummary cs = dfs.getContentSummary(foo);
assertEquals(cs.getSpaceConsumed(), 0);
assertEquals(cs.getTypeConsumed(StorageType.SSD), 0);
assertEquals(cs.getTypeConsumed(StorageType.DISK), 0);
}
@Test
@Timeout(value = 60)
public void testQuotaByStorageTypeWithFileCreateRename() throws Exception {
final Path foo = new Path(dir, "foo");
dfs.mkdirs(foo);
Path createdFile1foo = new Path(foo, "created_file1.data");
final Path bar = new Path(dir, "bar");
dfs.mkdirs(bar);
Path createdFile1bar = new Path(bar, "created_file1.data");
// set storage policy on directory "foo" and "bar" to ONESSD
dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
dfs.setStoragePolicy(bar, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
// set quota by storage type on directory "foo"
dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 4);
dfs.setQuotaByStorageType(bar, StorageType.SSD, BLOCKSIZE * 2);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(fnode.isQuotaSet());
// Create file of size 3 * BLOCKSIZE under directory "foo"
long file1Len = BLOCKSIZE * 3;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1foo, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify space consumed and remaining quota
long ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumed);
// move file from foo to bar
try {
dfs.rename(createdFile1foo, createdFile1bar);
fail("Should have failed with QuotaByStorageTypeExceededException ");
} catch (Throwable t) {
LOG.info("Got expected exception ", t);
}
ContentSummary cs = dfs.getContentSummary(foo);
assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len);
assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2);
}
/**
* Test if the quota can be correctly updated for create file even
* QuotaByStorageTypeExceededException is thrown
*/
@Test
@Timeout(value = 60)
public void testQuotaByStorageTypeExceptionWithFileCreate() throws Exception {
final Path foo = new Path(dir, "foo");
Path createdFile1 = new Path(foo, "created_file1.data");
dfs.mkdirs(foo);
dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 4);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(fnode.isQuotaSet());
// Create the 1st file of size 2 * BLOCKSIZE under directory "foo" and expect no exception
long file1Len = BLOCKSIZE * 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
long currentSSDConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, currentSSDConsumed);
// Create the 2nd file of size 1.5 * BLOCKSIZE under directory "foo" and expect no exception
Path createdFile2 = new Path(foo, "created_file2.data");
long file2Len = BLOCKSIZE + BLOCKSIZE / 2;
DFSTestUtil.createFile(dfs, createdFile2, bufLen, file2Len, BLOCKSIZE, REPLICATION, seed);
currentSSDConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len + file2Len, currentSSDConsumed);
// Create the 3rd file of size BLOCKSIZE under directory "foo" and expect quota exceeded exception
Path createdFile3 = new Path(foo, "created_file3.data");
long file3Len = BLOCKSIZE;
try {
DFSTestUtil.createFile(dfs, createdFile3, bufLen, file3Len, BLOCKSIZE, REPLICATION, seed);
fail("Should have failed with QuotaByStorageTypeExceededException ");
} catch (Throwable t) {
LOG.info("Got expected exception ", t);
currentSSDConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len + file2Len, currentSSDConsumed);
}
}
@Test
@Timeout(value = 60)
public void testQuotaByStorageTypeParentOffChildOff() throws Exception {
final Path parent = new Path(dir, "parent");
final Path child = new Path(parent, "child");
dfs.mkdirs(parent);
dfs.mkdirs(child);
dfs.setStoragePolicy(parent, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
// Create file of size 2.5 * BLOCKSIZE under child directory.
// Since both parent and child directory do not have SSD quota set,
// expect succeed without exception
Path createdFile1 = new Path(child, "created_file1.data");
long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE,
REPLICATION, seed);
// Verify SSD usage at the root level as both parent/child don't have DirectoryWithQuotaFeature
INode fnode = fsdir.getINode4Write("/");
long ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumed);
}
@Test
@Timeout(value = 60)
public void testQuotaByStorageTypeParentOffChildOn() throws Exception {
final Path parent = new Path(dir, "parent");
final Path child = new Path(parent, "child");
dfs.mkdirs(parent);
dfs.mkdirs(child);
dfs.setStoragePolicy(parent, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
dfs.setQuotaByStorageType(child, StorageType.SSD, 2 * BLOCKSIZE);
// Create file of size 2.5 * BLOCKSIZE under child directory
// Since child directory have SSD quota of 2 * BLOCKSIZE,
// expect an exception when creating files under child directory.
Path createdFile1 = new Path(child, "created_file1.data");
long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
int bufLen = BLOCKSIZE / 16;
try {
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE,
REPLICATION, seed);
fail("Should have failed with QuotaByStorageTypeExceededException ");
} catch (Throwable t) {
LOG.info("Got expected exception ", t);
}
}
@Test
@Timeout(value = 60)
public void testQuotaByStorageTypeParentOnChildOff() throws Exception {
short replication = 1;
final Path parent = new Path(dir, "parent");
final Path child = new Path(parent, "child");
dfs.mkdirs(parent);
dfs.mkdirs(child);
dfs.setStoragePolicy(parent, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
dfs.setQuotaByStorageType(parent, StorageType.SSD, 3 * BLOCKSIZE);
// Create file of size 2.5 * BLOCKSIZE under child directory
// Verify parent Quota applies
Path createdFile1 = new Path(child, "created_file1.data");
long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE,
replication, seed);
INode fnode = fsdir.getINode4Write(parent.toString());
assertTrue(fnode.isDirectory());
assertTrue(fnode.isQuotaSet());
long currentSSDConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, currentSSDConsumed);
// Create the 2nd file of size BLOCKSIZE under child directory and expect quota exceeded exception
Path createdFile2 = new Path(child, "created_file2.data");
long file2Len = BLOCKSIZE;
try {
DFSTestUtil.createFile(dfs, createdFile2, bufLen, file2Len, BLOCKSIZE, replication, seed);
fail("Should have failed with QuotaByStorageTypeExceededException ");
} catch (Throwable t) {
LOG.info("Got expected exception ", t);
currentSSDConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, currentSSDConsumed);
}
}
@Test
@Timeout(value = 60)
public void testQuotaByStorageTypeParentOnChildOn() throws Exception {
final Path parent = new Path(dir, "parent");
final Path child = new Path(parent, "child");
dfs.mkdirs(parent);
dfs.mkdirs(child);
dfs.setStoragePolicy(parent, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
dfs.setQuotaByStorageType(parent, StorageType.SSD, 2 * BLOCKSIZE);
dfs.setQuotaByStorageType(child, StorageType.SSD, 3 * BLOCKSIZE);
// Create file of size 2.5 * BLOCKSIZE under child directory
// Verify parent Quota applies
Path createdFile1 = new Path(child, "created_file1.data");
long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
int bufLen = BLOCKSIZE / 16;
try {
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE,
REPLICATION, seed);
fail("Should have failed with QuotaByStorageTypeExceededException ");
} catch (Throwable t) {
LOG.info("Got expected exception ", t);
}
}
/**
* Both traditional space quota and the storage type quota for SSD are set and
* not exceeded.
*/
@Test
@Timeout(value = 60)
public void testQuotaByStorageTypeWithTraditionalQuota() throws Exception {
final Path foo = new Path(dir, "foo");
dfs.mkdirs(foo);
dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 10);
dfs.setQuota(foo, Long.MAX_VALUE - 1, REPLICATION * BLOCKSIZE * 10);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(fnode.isQuotaSet());
Path createdFile = new Path(foo, "created_file.data");
long fileLen = BLOCKSIZE * 2 + BLOCKSIZE / 2;
DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16,
fileLen, BLOCKSIZE, REPLICATION, seed);
QuotaCounts cnt = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed();
assertEquals(2, cnt.getNameSpace());
assertEquals(fileLen * REPLICATION, cnt.getStorageSpace());
dfs.delete(createdFile, true);
QuotaCounts cntAfterDelete = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed();
assertEquals(1, cntAfterDelete.getNameSpace());
assertEquals(0, cntAfterDelete.getStorageSpace());
// Validate the computeQuotaUsage()
QuotaCounts counts = fnode.computeQuotaUsage(
fsn.getBlockManager().getStoragePolicySuite(), true);
assertEquals(1, counts.getNameSpace(), fnode.dumpTreeRecursively().toString());
assertEquals(0, counts.getStorageSpace(), fnode.dumpTreeRecursively().toString());
}
/**
* Both traditional space quota and the storage type quota for SSD are set and
* exceeded. expect DSQuotaExceededException is thrown as we check traditional
* space quota first and then storage type quota.
*/
@Test
@Timeout(value = 60)
public void testQuotaByStorageTypeAndTraditionalQuotaException1()
throws Exception {
testQuotaByStorageTypeOrTraditionalQuotaExceededCase(
4 * REPLICATION, 4, 5, REPLICATION);
}
/**
* Both traditional space quota and the storage type quota for SSD are set and
* SSD quota is exceeded but traditional space quota is not exceeded.
*/
@Test
@Timeout(value = 60)
public void testQuotaByStorageTypeAndTraditionalQuotaException2()
throws Exception {
testQuotaByStorageTypeOrTraditionalQuotaExceededCase(
5 * REPLICATION, 4, 5, REPLICATION);
}
/**
* Both traditional space quota and the storage type quota for SSD are set and
* traditional space quota is exceeded but SSD quota is not exceeded.
*/
@Test
@Timeout(value = 60)
public void testQuotaByStorageTypeAndTraditionalQuotaException3()
throws Exception {
testQuotaByStorageTypeOrTraditionalQuotaExceededCase(
4 * REPLICATION, 5, 5, REPLICATION);
}
private void testQuotaByStorageTypeOrTraditionalQuotaExceededCase(
long storageSpaceQuotaInBlocks, long ssdQuotaInBlocks,
long testFileLenInBlocks, short replication) throws Exception {
final String METHOD_NAME = GenericTestUtils.getMethodName();
final Path testDir = new Path(dir, METHOD_NAME);
dfs.mkdirs(testDir);
dfs.setStoragePolicy(testDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
final long ssdQuota = BLOCKSIZE * ssdQuotaInBlocks;
final long storageSpaceQuota = BLOCKSIZE * storageSpaceQuotaInBlocks;
dfs.setQuota(testDir, Long.MAX_VALUE - 1, storageSpaceQuota);
dfs.setQuotaByStorageType(testDir, StorageType.SSD, ssdQuota);
INode testDirNode = fsdir.getINode4Write(testDir.toString());
assertTrue(testDirNode.isDirectory());
assertTrue(testDirNode.isQuotaSet());
Path createdFile = new Path(testDir, "created_file.data");
long fileLen = testFileLenInBlocks * BLOCKSIZE;
try {
DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16,
fileLen, BLOCKSIZE, replication, seed);
fail("Should have failed with DSQuotaExceededException or " +
"QuotaByStorageTypeExceededException ");
} catch (Throwable t) {
LOG.info("Got expected exception ", t);
long currentSSDConsumed = testDirNode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(Math.min(ssdQuota, storageSpaceQuota / replication), currentSSDConsumed);
}
}
@Test
@Timeout(value = 60)
public void testQuotaByStorageTypeWithSnapshot() throws Exception {
final Path sub1 = new Path(dir, "Sub1");
dfs.mkdirs(sub1);
// Setup ONE_SSD policy and SSD quota of 4 * BLOCKSIZE on sub1
dfs.setStoragePolicy(sub1, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
dfs.setQuotaByStorageType(sub1, StorageType.SSD, 4 * BLOCKSIZE);
INode sub1Node = fsdir.getINode4Write(sub1.toString());
assertTrue(sub1Node.isDirectory());
assertTrue(sub1Node.isQuotaSet());
// Create file1 of size 2 * BLOCKSIZE under sub1
Path file1 = new Path(sub1, "file1");
long file1Len = 2 * BLOCKSIZE;
DFSTestUtil.createFile(dfs, file1, file1Len, REPLICATION, seed);
// Create snapshot on sub1 named s1
SnapshotTestHelper.createSnapshot(dfs, sub1, "s1");
// Verify sub1 SSD usage is unchanged after creating snapshot s1
long ssdConsumed = sub1Node.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumed);
// Delete file1
dfs.delete(file1, false);
// Verify sub1 SSD usage is unchanged due to the existence of snapshot s1
ssdConsumed = sub1Node.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumed);
QuotaCounts counts1 = sub1Node.computeQuotaUsage(
fsn.getBlockManager().getStoragePolicySuite(), true);
assertEquals(file1Len, counts1.getTypeSpaces().get(StorageType.SSD),
sub1Node.dumpTreeRecursively().toString());
ContentSummary cs1 = dfs.getContentSummary(sub1);
assertEquals(cs1.getSpaceConsumed(), file1Len * REPLICATION);
assertEquals(cs1.getTypeConsumed(StorageType.SSD), file1Len);
assertEquals(cs1.getTypeConsumed(StorageType.DISK), file1Len * 2);
// Delete the snapshot s1
dfs.deleteSnapshot(sub1, "s1");
// Verify sub1 SSD usage is fully reclaimed and changed to 0
ssdConsumed = sub1Node.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(0, ssdConsumed);
QuotaCounts counts2 = sub1Node.computeQuotaUsage(
fsn.getBlockManager().getStoragePolicySuite(), true);
assertEquals(0, counts2.getTypeSpaces().get(StorageType.SSD),
sub1Node.dumpTreeRecursively().toString());
ContentSummary cs2 = dfs.getContentSummary(sub1);
assertEquals(cs2.getSpaceConsumed(), 0);
assertEquals(cs2.getTypeConsumed(StorageType.SSD), 0);
assertEquals(cs2.getTypeConsumed(StorageType.DISK), 0);
}
@Test
@Timeout(value = 60)
public void testQuotaByStorageTypeWithFileCreateTruncate() throws Exception {
final Path foo = new Path(dir, "foo");
Path createdFile1 = new Path(foo, "created_file1.data");
dfs.mkdirs(foo);
// set storage policy on directory "foo" to ONESSD
dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
// set quota by storage type on directory "foo"
dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 4);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(fnode.isQuotaSet());
// Create file of size 2 * BLOCKSIZE under directory "foo"
long file1Len = BLOCKSIZE * 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify SSD consumed before truncate
long ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumed);
// Truncate file to 1 * BLOCKSIZE
int newFile1Len = BLOCKSIZE;
dfs.truncate(createdFile1, newFile1Len);
// Verify SSD consumed after truncate
ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(newFile1Len, ssdConsumed);
ContentSummary cs = dfs.getContentSummary(foo);
assertEquals(cs.getSpaceConsumed(), newFile1Len * REPLICATION);
assertEquals(cs.getTypeConsumed(StorageType.SSD), newFile1Len);
assertEquals(cs.getTypeConsumed(StorageType.DISK), newFile1Len * 2);
}
@Test
public void testQuotaByStorageTypePersistenceInEditLog() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
final Path testDir = new Path(dir, METHOD_NAME);
Path createdFile1 = new Path(testDir, "created_file1.data");
dfs.mkdirs(testDir);
// set storage policy on testDir to ONESSD
dfs.setStoragePolicy(testDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
// set quota by storage type on testDir
final long SSD_QUOTA = BLOCKSIZE * 4;
dfs.setQuotaByStorageType(testDir, StorageType.SSD, SSD_QUOTA);
INode testDirNode = fsdir.getINode4Write(testDir.toString());
assertTrue(testDirNode.isDirectory());
assertTrue(testDirNode.isQuotaSet());
// Create file of size 2 * BLOCKSIZE under testDir
long file1Len = BLOCKSIZE * 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify SSD consumed before namenode restart
long ssdConsumed = testDirNode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumed);
// Restart namenode to make sure the editlog is correct
cluster.restartNameNode(true);
refreshClusterState();
INode testDirNodeAfterNNRestart = fsdir.getINode4Write(testDir.toString());
// Verify quota is still set
assertTrue(testDirNode.isDirectory());
assertTrue(testDirNode.isQuotaSet());
QuotaCounts qc = testDirNodeAfterNNRestart.getQuotaCounts();
assertEquals(SSD_QUOTA, qc.getTypeSpace(StorageType.SSD));
for (StorageType t: StorageType.getTypesSupportingQuota()) {
if (t != StorageType.SSD) {
assertEquals(HdfsConstants.QUOTA_RESET, qc.getTypeSpace(t));
}
}
long ssdConsumedAfterNNRestart = testDirNodeAfterNNRestart.asDirectory()
.getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumedAfterNNRestart);
}
@Test
public void testQuotaByStorageTypePersistenceInFsImage() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
final Path testDir = new Path(dir, METHOD_NAME);
Path createdFile1 = new Path(testDir, "created_file1.data");
dfs.mkdirs(testDir);
// set storage policy on testDir to ONESSD
dfs.setStoragePolicy(testDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
// set quota by storage type on testDir
final long SSD_QUOTA = BLOCKSIZE * 4;
dfs.setQuotaByStorageType(testDir, StorageType.SSD, SSD_QUOTA);
INode testDirNode = fsdir.getINode4Write(testDir.toString());
assertTrue(testDirNode.isDirectory());
assertTrue(testDirNode.isQuotaSet());
// Create file of size 2 * BLOCKSIZE under testDir
long file1Len = BLOCKSIZE * 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify SSD consumed before namenode restart
long ssdConsumed = testDirNode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumed);
// Restart the namenode with checkpoint to make sure fsImage is correct
dfs.setSafeMode(SafeModeAction.ENTER);
dfs.saveNamespace();
dfs.setSafeMode(SafeModeAction.LEAVE);
cluster.restartNameNode(true);
refreshClusterState();
INode testDirNodeAfterNNRestart = fsdir.getINode4Write(testDir.toString());
assertTrue(testDirNode.isDirectory());
assertTrue(testDirNode.isQuotaSet());
QuotaCounts qc = testDirNodeAfterNNRestart.getQuotaCounts();
assertEquals(SSD_QUOTA, qc.getTypeSpace(StorageType.SSD));
for (StorageType t: StorageType.getTypesSupportingQuota()) {
if (t != StorageType.SSD) {
assertEquals(HdfsConstants.QUOTA_RESET, qc.getTypeSpace(t));
}
}
long ssdConsumedAfterNNRestart = testDirNodeAfterNNRestart.asDirectory()
.getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumedAfterNNRestart);
}
@Test
@Timeout(value = 60)
public void testContentSummaryWithoutQuotaByStorageType() throws Exception {
final Path foo = new Path(dir, "foo");
Path createdFile1 = new Path(foo, "created_file1.data");
dfs.mkdirs(foo);
// set storage policy on directory "foo" to ONESSD
dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(!fnode.isQuotaSet());
// Create file of size 2 * BLOCKSIZE under directory "foo"
long file1Len = BLOCKSIZE * 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify getContentSummary without any quota set
ContentSummary cs = dfs.getContentSummary(foo);
assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len);
assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2);
}
@Test
@Timeout(value = 60)
public void testContentSummaryWithoutStoragePolicy() throws Exception {
final Path foo = new Path(dir, "foo");
Path createdFile1 = new Path(foo, "created_file1.data");
dfs.mkdirs(foo);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(!fnode.isQuotaSet());
// Create file of size 2 * BLOCKSIZE under directory "foo"
long file1Len = BLOCKSIZE * 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify getContentSummary without any quota set
// Expect no type quota and usage information available
ContentSummary cs = dfs.getContentSummary(foo);
assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
for (StorageType t : StorageType.values()) {
assertEquals(cs.getTypeConsumed(t), 0);
assertEquals(cs.getTypeQuota(t), -1);
}
}
/**
* Tests space quota for storage policy = WARM.
*/
@Test
public void testStorageSpaceQuotaWithWarmPolicy() throws IOException {
final Path testDir = new Path(dir,
GenericTestUtils.getMethodName());
assertTrue(dfs.mkdirs(testDir));
/* set policy to HOT */
dfs.setStoragePolicy(testDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
/* init space quota */
final long storageSpaceQuota = BLOCKSIZE * 6;
final long storageTypeSpaceQuota = BLOCKSIZE * 1;
/* set space quota */
dfs.setQuota(testDir, HdfsConstants.QUOTA_DONT_SET, storageSpaceQuota);
/* init vars */
Path createdFile;
final long fileLen = BLOCKSIZE;
/**
* create one file with 3 replicas, REPLICATION * BLOCKSIZE go to DISK due
* to HOT policy
*/
createdFile = new Path(testDir, "file1.data");
DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16, fileLen, BLOCKSIZE,
REPLICATION, seed);
assertTrue(dfs.exists(createdFile));
assertTrue(dfs.isFile(createdFile));
/* set space quota for DISK */
dfs.setQuotaByStorageType(testDir, StorageType.DISK, storageTypeSpaceQuota);
/* set policy to WARM */
dfs.setStoragePolicy(testDir, HdfsConstants.WARM_STORAGE_POLICY_NAME);
/* create another file with 3 replicas */
try {
createdFile = new Path(testDir, "file2.data");
/**
* This will fail since quota on DISK is 1 block but space consumed on
* DISK is already 3 blocks due to the first file creation.
*/
DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16, fileLen,
BLOCKSIZE, REPLICATION, seed);
fail("should fail on QuotaByStorageTypeExceededException");
} catch (QuotaByStorageTypeExceededException e) {
LOG.info("Got expected exception ", e);
assertThat(e.toString())
.contains("Quota by storage type",
"DISK on path",
testDir.toString());
}
}
/**
* Tests if changing replication factor results in copying file as quota
* doesn't exceed.
*/
@Test
@Timeout(value = 30)
public void testStorageSpaceQuotaWithRepFactor() throws IOException {
final Path testDir = new Path(dir,
GenericTestUtils.getMethodName());
assertTrue(dfs.mkdirs(testDir));
final long storageSpaceQuota = BLOCKSIZE * 2;
/* set policy to HOT */
dfs.setStoragePolicy(testDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
/* set space quota */
dfs.setQuota(testDir, HdfsConstants.QUOTA_DONT_SET, storageSpaceQuota);
/* init vars */
Path createdFile = null;
final long fileLen = BLOCKSIZE;
try {
/* create one file with 3 replicas */
createdFile = new Path(testDir, "file1.data");
DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16, fileLen,
BLOCKSIZE, REPLICATION, seed);
fail("should fail on DSQuotaExceededException");
} catch (DSQuotaExceededException e) {
LOG.info("Got expected exception ", e);
assertThat(e.toString())
.contains("DiskSpace quota", testDir.toString());
}
/* try creating file again with 2 replicas */
createdFile = new Path(testDir, "file2.data");
DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16, fileLen, BLOCKSIZE,
(short) 2, seed);
assertTrue(dfs.exists(createdFile));
assertTrue(dfs.isFile(createdFile));
}
/**
* Tests if clearing quota per heterogeneous storage doesn't result in
* clearing quota for another storage.
*
* @throws IOException
*/
@Test
@Timeout(value = 30)
public void testStorageSpaceQuotaPerQuotaClear() throws IOException {
final Path testDir = new Path(dir,
GenericTestUtils.getMethodName());
assertTrue(dfs.mkdirs(testDir));
final long diskSpaceQuota = BLOCKSIZE * 1;
final long ssdSpaceQuota = BLOCKSIZE * 2;
/* set space quota */
dfs.setQuotaByStorageType(testDir, StorageType.DISK, diskSpaceQuota);
dfs.setQuotaByStorageType(testDir, StorageType.SSD, ssdSpaceQuota);
final INode testDirNode = fsdir.getINode4Write(testDir.toString());
assertTrue(testDirNode.isDirectory());
assertTrue(testDirNode.isQuotaSet());
/* verify space quota by storage type */
assertEquals(diskSpaceQuota,
testDirNode.asDirectory().getDirectoryWithQuotaFeature().getQuota()
.getTypeSpace(StorageType.DISK));
assertEquals(ssdSpaceQuota,
testDirNode.asDirectory().getDirectoryWithQuotaFeature().getQuota()
.getTypeSpace(StorageType.SSD));
/* clear DISK space quota */
dfs.setQuotaByStorageType(
testDir,
StorageType.DISK,
HdfsConstants.QUOTA_RESET);
/* verify space quota by storage type after clearing DISK's */
assertEquals(-1,
testDirNode.asDirectory().getDirectoryWithQuotaFeature().getQuota()
.getTypeSpace(StorageType.DISK));
assertEquals(ssdSpaceQuota,
testDirNode.asDirectory().getDirectoryWithQuotaFeature().getQuota()
.getTypeSpace(StorageType.SSD));
}
}
| TestQuotaByStorageType |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/api/graph/util/StreamNodeUpdateRequestInfo.java | {
"start": 1090,
"end": 1758
} | class ____ {
private final Integer nodeId;
// Null means it does not request to change the typeSerializersIn.
@Nullable private TypeSerializer<?>[] typeSerializersIn;
public StreamNodeUpdateRequestInfo(Integer nodeId) {
this.nodeId = nodeId;
}
public StreamNodeUpdateRequestInfo withTypeSerializersIn(
TypeSerializer<?>[] typeSerializersIn) {
this.typeSerializersIn = typeSerializersIn;
return this;
}
public Integer getNodeId() {
return nodeId;
}
@Nullable
public TypeSerializer<?>[] getTypeSerializersIn() {
return typeSerializersIn;
}
}
| StreamNodeUpdateRequestInfo |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/S3AMultipartUploaderBuilder.java | {
"start": 1233,
"end": 2207
} | class ____ extends
MultipartUploaderBuilderImpl<S3AMultipartUploader, S3AMultipartUploaderBuilder> {
private final WriteOperations writeOperations;
private final StoreContext context;
private final S3AMultipartUploaderStatistics statistics;
public S3AMultipartUploaderBuilder(
@Nonnull final S3AFileSystem fileSystem,
@Nonnull final WriteOperations writeOperations,
@Nonnull final StoreContext context,
@Nonnull final Path p,
@Nonnull final S3AMultipartUploaderStatistics statistics) {
super(fileSystem, p);
this.writeOperations = writeOperations;
this.context = context;
this.statistics = statistics;
}
@Override
public S3AMultipartUploaderBuilder getThisBuilder() {
return this;
}
@Override
public S3AMultipartUploader build()
throws IllegalArgumentException, IOException {
return new S3AMultipartUploader(this, writeOperations, context, statistics);
}
}
| S3AMultipartUploaderBuilder |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/notfound/ignore/NotFoundIgnoreManyToOneTest.java | {
"start": 1469,
"end": 10822
} | class ____ {
@Test
@JiraKey( "HHH-15060" )
public void testProxyCoin(SessionFactoryScope scope) {
scope.inTransaction( (session) -> {
// Coin#1 has the broken fk
final Coin proxy = session.byId( Coin.class ).getReference( 1 );
assertThat( proxy ).isNotNull();
Hibernate.initialize( proxy );
assertThat( Hibernate.isInitialized( proxy ) ).isTrue();
assertThat( proxy.getCurrency() ).isNull();
} );
}
@Test
@JiraKey( "HHH-15060" )
public void testProxyCurrency(SessionFactoryScope scope) {
scope.inTransaction( (session) -> {
// Currency#1 does not exist
final Currency proxy = session.byId( Currency.class ).getReference( 1 );
try {
Hibernate.initialize( proxy );
Assertions.fail( "Expecting ObjectNotFoundException" );
}
catch (ObjectNotFoundException expected) {
assertThat( expected.getEntityName() ).endsWith( "Currency" );
assertThat( expected.getIdentifier() ).isEqualTo( 1 );
}
} );
}
@Test
@JiraKey( "HHH-15060" )
public void testGet(SessionFactoryScope scope) {
final SQLStatementInspector statementInspector = scope.getCollectingStatementInspector();
statementInspector.clear();
scope.inTransaction( (session) -> {
session.get( Coin.class, 2 );
assertThat( statementInspector.getSqlQueries() ).hasSize( 1 );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).contains( " Coin " );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).contains( " Currency " );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).contains( " join " );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).doesNotContain( " inner " );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).doesNotContain( " cross " );
} );
statementInspector.clear();
scope.inTransaction( (session) -> {
final Coin coin = session.get( Coin.class, 1 );
assertThat( coin.getCurrency() ).isNull();
// technically we could use a subsequent-select rather than a join...
assertThat( statementInspector.getSqlQueries() ).hasSize( 1 );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).contains( " join " );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).doesNotContain( " inner " );
} );
}
@Test
@JiraKey( "HHH-15060" )
public void testQueryImplicitPathDereferencePredicateBaseline(SessionFactoryScope scope) {
final SQLStatementInspector statementInspector = scope.getCollectingStatementInspector();
statementInspector.clear();
scope.inTransaction( (session) -> {
final String hql = "select c from Coin c where c.currency.name = 'Euro'";
final List<Coin> coins = session.createSelectionQuery( hql, Coin.class ).getResultList();
assertThat( coins ).hasSize( 0 );
assertThat( statementInspector.getSqlQueries() ).hasSize( 1 );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).contains( " join " );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).doesNotContain( " left " );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).doesNotContain( " cross " );
} );
}
@Test
@JiraKey( "HHH-15060" )
public void testQueryImplicitPathDereferencePredicateBaseline2(SessionFactoryScope scope) {
final SQLStatementInspector statementInspector = scope.getCollectingStatementInspector();
statementInspector.clear();
scope.inTransaction( (session) -> {
final String hql = "select c.id from Coin c where c.currency.id = 1";
final List<Integer> coins = session.createSelectionQuery( hql, Integer.class ).getResultList();
assertThat( coins ).isEmpty();
// technically we could use a subsequent-select rather than a join...
assertThat( statementInspector.getSqlQueries() ).hasSize( 1 );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).contains( " join " );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).doesNotContain( " left " );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).doesNotContain( " cross " );
} );
}
/**
* Baseline for {@link #testQueryImplicitPathDereferencePredicate}. Ultimately, we want
* SQL generated there to behave exactly the same as this query
*/
@Test
@JiraKey( "HHH-15060" )
public void testQueryImplicitPathDereferencePredicateBaseline3(SessionFactoryScope scope) {
final SQLStatementInspector statementInspector = scope.getCollectingStatementInspector();
statementInspector.clear();
scope.inTransaction( (session) -> {
final String hql = "select c from Coin c join fetch c.currency c2 where c2.name = 'USD'";
final List<Coin> coins = session.createQuery( hql, Coin.class ).getResultList();
assertThat( coins ).hasSize( 1 );
assertThat( statementInspector.getSqlQueries() ).hasSize( 1 );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).contains( " Coin " );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).contains( " Currency " );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).contains( " join " );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).doesNotContain( " left " );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).doesNotContain( " cross " );
} );
statementInspector.clear();
scope.inTransaction( (session) -> {
final String hql = "select c from Coin c join fetch c.currency c2 where c2.id = 1";
final List<Coin> coins = session.createQuery( hql, Coin.class ).getResultList();
assertThat( coins ).hasSize( 0 );
assertThat( statementInspector.getSqlQueries() ).hasSize( 1 );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).contains( " Coin " );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).contains( " Currency " );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).contains( " join " );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).doesNotContain( " left " );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).doesNotContain( " cross " );
} );
}
@Test
@JiraKey( "HHH-15060" )
public void testQueryImplicitPathDereferencePredicate(SessionFactoryScope scope) {
final SQLStatementInspector statementInspector = scope.getCollectingStatementInspector();
statementInspector.clear();
scope.inTransaction( (session) -> {
final String hql = "select c from Coin c where c.currency.id = 1";
final List<Coin> coins = session.createSelectionQuery( hql, Coin.class ).getResultList();
assertThat( coins ).isEmpty();
assertThat( statementInspector.getSqlQueries() ).hasSize( 1 );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).contains( " join " );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).doesNotContain( " left " );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).doesNotContain( " cross " );
} );
}
@Test
@JiraKey( "HHH-15060" )
public void testQueryOwnerSelection(SessionFactoryScope scope) {
scope.inTransaction( (session) -> {
final String hql = "select c from Coin c where c.id = 1";
final Coin coin = session.createQuery( hql, Coin.class ).uniqueResult();
assertThat( coin ).isNotNull();
assertThat( Hibernate.isPropertyInitialized( coin, "currency" ) ).isTrue();
assertThat( Hibernate.isInitialized( coin.getCurrency() ) ).isTrue();
assertThat( coin.getCurrency() ).isNull();
} );
scope.inTransaction( (session) -> {
final String hql = "select c from Coin c where c.id = 2";
final Coin coin = session.createQuery( hql, Coin.class ).uniqueResult();
assertThat( Hibernate.isPropertyInitialized( coin, "currency" ) ).isTrue();
assertThat( Hibernate.isInitialized( coin.getCurrency() ) ).isTrue();
} );
}
@Test
@JiraKey( "HHH-15060" )
public void testQueryAssociationSelection(SessionFactoryScope scope) {
final SQLStatementInspector statementInspector = scope.getCollectingStatementInspector();
statementInspector.clear();
scope.inTransaction( (session) -> {
final String hql = "select c.currency from Coin c where c.id = 1";
final List<Currency> currencies = session.createSelectionQuery( hql, Currency.class ).getResultList();
assertThat( currencies ).hasSize( 0 );
assertThat( statementInspector.getSqlQueries() ).hasSize( 1 );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).contains( " Coin " );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).contains( " Currency " );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).contains( " join " );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).doesNotContain( " left " );
assertThat( statementInspector.getSqlQueries().get( 0 ) ).doesNotContain( " cross " );
} );
}
@BeforeEach
protected void prepareTestData(SessionFactoryScope scope) {
scope.inTransaction( (session) -> {
Currency euro = new Currency( 1, "Euro" );
Coin fiveC = new Coin( 1, "Five cents", euro );
session.persist( euro );
session.persist( fiveC );
Currency usd = new Currency( 2, "USD" );
Coin penny = new Coin( 2, "Penny", usd );
session.persist( usd );
session.persist( penny );
} );
scope.inTransaction( (session) -> {
session.createMutationQuery( "delete Currency where id = 1" ).executeUpdate();
} );
}
@AfterEach
protected void dropTestData(SessionFactoryScope scope) throws Exception {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Entity(name = "Coin")
public static | NotFoundIgnoreManyToOneTest |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/security/auth/KafkaPrincipalSerde.java | {
"start": 941,
"end": 1142
} | interface ____ {@link KafkaPrincipal} for the purpose of inter-broker forwarding.
* Any serialization/deserialization failure should raise a {@link SerializationException} to be consistent.
*/
public | for |
java | google__guice | extensions/grapher/src/com/google/inject/grapher/Node.java | {
"start": 798,
"end": 1822
} | class ____ {
/**
* When set to true, the source object is ignored in {@link #equals} and {@link #hashCode}. Only
* used in tests.
*/
static boolean ignoreSourceInComparisons = false;
private final NodeId id;
private final Object source;
protected Node(NodeId id, Object source) {
this.id = id;
this.source = source;
}
public NodeId getId() {
return id;
}
public Object getSource() {
return source;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof Node)) {
return false;
}
Node other = (Node) obj;
return Objects.equal(id, other.id)
&& (ignoreSourceInComparisons || Objects.equal(source, other.source));
}
@Override
public int hashCode() {
return ignoreSourceInComparisons ? id.hashCode() : Objects.hashCode(id, source);
}
/**
* Returns a copy of the node with a new ID.
*
* @param id new ID of the node
* @return copy of the node with a new ID
*/
public abstract Node copy(NodeId id);
}
| Node |
java | netty__netty | buffer/src/main/java/io/netty/buffer/AdaptivePoolingAllocator.java | {
"start": 47934,
"end": 54558
} | class ____ implements ChunkInfo {
protected final AbstractByteBuf delegate;
protected Magazine magazine;
private final AdaptivePoolingAllocator allocator;
private final ChunkReleasePredicate chunkReleasePredicate;
// Always populate the refCnt field, so HotSpot doesn't emit `null` checks.
// This is safe to do even on native-image.
private final RefCnt refCnt = new RefCnt();
private final int capacity;
private final boolean pooled;
protected int allocatedBytes;
Chunk() {
// Constructor only used by the MAGAZINE_FREED sentinel.
delegate = null;
magazine = null;
allocator = null;
chunkReleasePredicate = null;
capacity = 0;
pooled = false;
}
Chunk(AbstractByteBuf delegate, Magazine magazine, boolean pooled,
ChunkReleasePredicate chunkReleasePredicate) {
this.delegate = delegate;
this.pooled = pooled;
capacity = delegate.capacity();
attachToMagazine(magazine);
// We need the top-level allocator so ByteBuf.capacity(int) can call reallocate()
allocator = magazine.group.allocator;
this.chunkReleasePredicate = chunkReleasePredicate;
if (PlatformDependent.isJfrEnabled() && AllocateChunkEvent.isEventEnabled()) {
AllocateChunkEvent event = new AllocateChunkEvent();
if (event.shouldCommit()) {
event.fill(this, AdaptiveByteBufAllocator.class);
event.pooled = pooled;
event.threadLocal = magazine.allocationLock == null;
event.commit();
}
}
}
Magazine currentMagazine() {
return magazine;
}
void detachFromMagazine() {
if (magazine != null) {
magazine = null;
}
}
void attachToMagazine(Magazine magazine) {
assert this.magazine == null;
this.magazine = magazine;
}
/**
* Called when a magazine is done using this chunk, probably because it was emptied.
*/
boolean releaseFromMagazine() {
return release();
}
/**
* Called when a ByteBuf is done using its allocation in this chunk.
*/
boolean releaseSegment(int ignoredSegmentId) {
return release();
}
private void retain() {
RefCnt.retain(refCnt);
}
protected boolean release() {
boolean deallocate = RefCnt.release(refCnt);
if (deallocate) {
deallocate();
}
return deallocate;
}
protected void deallocate() {
Magazine mag = magazine;
int chunkSize = delegate.capacity();
if (!pooled || chunkReleasePredicate.shouldReleaseChunk(chunkSize) || mag == null) {
// Drop the chunk if the parent allocator is closed,
// or if the chunk deviates too much from the preferred chunk size.
detachFromMagazine();
onRelease();
allocator.chunkRegistry.remove(this);
delegate.release();
} else {
RefCnt.resetRefCnt(refCnt);
delegate.setIndex(0, 0);
allocatedBytes = 0;
if (!mag.trySetNextInLine(this)) {
// As this Chunk does not belong to the mag anymore we need to decrease the used memory .
detachFromMagazine();
if (!mag.offerToQueue(this)) {
// The central queue is full. Ensure we release again as we previously did use resetRefCnt()
// which did increase the reference count by 1.
boolean released = RefCnt.release(refCnt);
onRelease();
allocator.chunkRegistry.remove(this);
delegate.release();
assert released;
} else {
onReturn(false);
}
} else {
onReturn(true);
}
}
}
private void onReturn(boolean returnedToMagazine) {
if (PlatformDependent.isJfrEnabled() && ReturnChunkEvent.isEventEnabled()) {
ReturnChunkEvent event = new ReturnChunkEvent();
if (event.shouldCommit()) {
event.fill(this, AdaptiveByteBufAllocator.class);
event.returnedToMagazine = returnedToMagazine;
event.commit();
}
}
}
private void onRelease() {
if (PlatformDependent.isJfrEnabled() && FreeChunkEvent.isEventEnabled()) {
FreeChunkEvent event = new FreeChunkEvent();
if (event.shouldCommit()) {
event.fill(this, AdaptiveByteBufAllocator.class);
event.pooled = pooled;
event.commit();
}
}
}
public void readInitInto(AdaptiveByteBuf buf, int size, int startingCapacity, int maxCapacity) {
int startIndex = allocatedBytes;
allocatedBytes = startIndex + startingCapacity;
Chunk chunk = this;
chunk.retain();
try {
buf.init(delegate, chunk, 0, 0, startIndex, size, startingCapacity, maxCapacity);
chunk = null;
} finally {
if (chunk != null) {
// If chunk is not null we know that buf.init(...) failed and so we need to manually release
// the chunk again as we retained it before calling buf.init(...). Beside this we also need to
// restore the old allocatedBytes value.
allocatedBytes = startIndex;
chunk.release();
}
}
}
public int remainingCapacity() {
return capacity - allocatedBytes;
}
@Override
public int capacity() {
return capacity;
}
@Override
public boolean isDirect() {
return delegate.isDirect();
}
@Override
public long memoryAddress() {
return delegate._memoryAddress();
}
}
private static final | Chunk |
java | google__error-prone | core/src/test/java/com/google/errorprone/ErrorProneJavaCompilerTest.java | {
"start": 18739,
"end": 19052
} | class ____ {
String s = "old-value";
}
""");
}
@Test
public void patchAllWithDisableAllChecks() throws IOException {
JavaFileObject fileObject =
createOnDiskFileObject(
"StringConstantWrapper.java",
"""
| StringConstantWrapper |
java | netty__netty | handler/src/test/java/io/netty/handler/ssl/CipherSuiteConverterTest.java | {
"start": 1123,
"end": 27144
} | class ____ {
private static final InternalLogger logger = InternalLoggerFactory.getInstance(CipherSuiteConverterTest.class);
@Test
public void testJ2OMappings() throws Exception {
testJ2OMapping("TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", "ECDHE-ECDSA-AES128-SHA256");
testJ2OMapping("TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", "ECDHE-RSA-AES128-SHA256");
testJ2OMapping("TLS_RSA_WITH_AES_128_CBC_SHA256", "AES128-SHA256");
testJ2OMapping("TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256", "ECDH-ECDSA-AES128-SHA256");
testJ2OMapping("TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256", "ECDH-RSA-AES128-SHA256");
testJ2OMapping("TLS_DHE_RSA_WITH_AES_128_CBC_SHA256", "DHE-RSA-AES128-SHA256");
testJ2OMapping("TLS_DHE_DSS_WITH_AES_128_CBC_SHA256", "DHE-DSS-AES128-SHA256");
testJ2OMapping("TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", "ECDHE-ECDSA-AES128-SHA");
testJ2OMapping("TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", "ECDHE-RSA-AES128-SHA");
testJ2OMapping("TLS_RSA_WITH_AES_128_CBC_SHA", "AES128-SHA");
testJ2OMapping("TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA", "ECDH-ECDSA-AES128-SHA");
testJ2OMapping("TLS_ECDH_RSA_WITH_AES_128_CBC_SHA", "ECDH-RSA-AES128-SHA");
testJ2OMapping("TLS_DHE_RSA_WITH_AES_128_CBC_SHA", "DHE-RSA-AES128-SHA");
testJ2OMapping("TLS_DHE_DSS_WITH_AES_128_CBC_SHA", "DHE-DSS-AES128-SHA");
testJ2OMapping("TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "ECDHE-ECDSA-AES128-GCM-SHA256");
testJ2OMapping("TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "ECDHE-RSA-AES128-GCM-SHA256");
testJ2OMapping("TLS_RSA_WITH_AES_128_GCM_SHA256", "AES128-GCM-SHA256");
testJ2OMapping("TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256", "ECDH-ECDSA-AES128-GCM-SHA256");
testJ2OMapping("TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256", "ECDH-RSA-AES128-GCM-SHA256");
testJ2OMapping("TLS_DHE_RSA_WITH_AES_128_GCM_SHA256", "DHE-RSA-AES128-GCM-SHA256");
testJ2OMapping("TLS_DHE_DSS_WITH_AES_128_GCM_SHA256", "DHE-DSS-AES128-GCM-SHA256");
testJ2OMapping("TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA", "ECDHE-ECDSA-DES-CBC3-SHA");
testJ2OMapping("TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", "ECDHE-RSA-DES-CBC3-SHA");
testJ2OMapping("SSL_RSA_WITH_3DES_EDE_CBC_SHA", "DES-CBC3-SHA");
testJ2OMapping("TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA", "ECDH-ECDSA-DES-CBC3-SHA");
testJ2OMapping("TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA", "ECDH-RSA-DES-CBC3-SHA");
testJ2OMapping("SSL_DHE_RSA_WITH_3DES_EDE_CBC_SHA", "DHE-RSA-DES-CBC3-SHA");
testJ2OMapping("SSL_DHE_DSS_WITH_3DES_EDE_CBC_SHA", "DHE-DSS-DES-CBC3-SHA");
testJ2OMapping("TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", "ECDHE-ECDSA-RC4-SHA");
testJ2OMapping("TLS_ECDHE_RSA_WITH_RC4_128_SHA", "ECDHE-RSA-RC4-SHA");
testJ2OMapping("SSL_RSA_WITH_RC4_128_SHA", "RC4-SHA");
testJ2OMapping("TLS_ECDH_ECDSA_WITH_RC4_128_SHA", "ECDH-ECDSA-RC4-SHA");
testJ2OMapping("TLS_ECDH_RSA_WITH_RC4_128_SHA", "ECDH-RSA-RC4-SHA");
testJ2OMapping("SSL_RSA_WITH_RC4_128_MD5", "RC4-MD5");
testJ2OMapping("TLS_DH_anon_WITH_AES_128_GCM_SHA256", "ADH-AES128-GCM-SHA256");
testJ2OMapping("TLS_DH_anon_WITH_AES_128_CBC_SHA256", "ADH-AES128-SHA256");
testJ2OMapping("TLS_ECDH_anon_WITH_AES_128_CBC_SHA", "AECDH-AES128-SHA");
testJ2OMapping("TLS_DH_anon_WITH_AES_128_CBC_SHA", "ADH-AES128-SHA");
testJ2OMapping("TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA", "AECDH-DES-CBC3-SHA");
testJ2OMapping("SSL_DH_anon_WITH_3DES_EDE_CBC_SHA", "ADH-DES-CBC3-SHA");
testJ2OMapping("TLS_ECDH_anon_WITH_RC4_128_SHA", "AECDH-RC4-SHA");
testJ2OMapping("SSL_DH_anon_WITH_RC4_128_MD5", "ADH-RC4-MD5");
testJ2OMapping("SSL_RSA_WITH_DES_CBC_SHA", "DES-CBC-SHA");
testJ2OMapping("SSL_DHE_RSA_WITH_DES_CBC_SHA", "DHE-RSA-DES-CBC-SHA");
testJ2OMapping("SSL_DHE_DSS_WITH_DES_CBC_SHA", "DHE-DSS-DES-CBC-SHA");
testJ2OMapping("SSL_DH_anon_WITH_DES_CBC_SHA", "ADH-DES-CBC-SHA");
testJ2OMapping("SSL_RSA_EXPORT_WITH_DES40_CBC_SHA", "EXP-DES-CBC-SHA");
testJ2OMapping("SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA", "EXP-DHE-RSA-DES-CBC-SHA");
testJ2OMapping("SSL_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA", "EXP-DHE-DSS-DES-CBC-SHA");
testJ2OMapping("SSL_DH_anon_EXPORT_WITH_DES40_CBC_SHA", "EXP-ADH-DES-CBC-SHA");
testJ2OMapping("SSL_RSA_EXPORT_WITH_RC4_40_MD5", "EXP-RC4-MD5");
testJ2OMapping("SSL_DH_anon_EXPORT_WITH_RC4_40_MD5", "EXP-ADH-RC4-MD5");
testJ2OMapping("TLS_RSA_WITH_NULL_SHA256", "NULL-SHA256");
testJ2OMapping("TLS_ECDHE_ECDSA_WITH_NULL_SHA", "ECDHE-ECDSA-NULL-SHA");
testJ2OMapping("TLS_ECDHE_RSA_WITH_NULL_SHA", "ECDHE-RSA-NULL-SHA");
testJ2OMapping("SSL_RSA_WITH_NULL_SHA", "NULL-SHA");
testJ2OMapping("TLS_ECDH_ECDSA_WITH_NULL_SHA", "ECDH-ECDSA-NULL-SHA");
testJ2OMapping("TLS_ECDH_RSA_WITH_NULL_SHA", "ECDH-RSA-NULL-SHA");
testJ2OMapping("TLS_ECDH_anon_WITH_NULL_SHA", "AECDH-NULL-SHA");
testJ2OMapping("SSL_RSA_WITH_NULL_MD5", "NULL-MD5");
testJ2OMapping("TLS_KRB5_WITH_3DES_EDE_CBC_SHA", "KRB5-DES-CBC3-SHA");
testJ2OMapping("TLS_KRB5_WITH_3DES_EDE_CBC_MD5", "KRB5-DES-CBC3-MD5");
testJ2OMapping("TLS_KRB5_WITH_RC4_128_SHA", "KRB5-RC4-SHA");
testJ2OMapping("TLS_KRB5_WITH_RC4_128_MD5", "KRB5-RC4-MD5");
testJ2OMapping("TLS_KRB5_WITH_DES_CBC_SHA", "KRB5-DES-CBC-SHA");
testJ2OMapping("TLS_KRB5_WITH_DES_CBC_MD5", "KRB5-DES-CBC-MD5");
testJ2OMapping("TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA", "EXP-KRB5-DES-CBC-SHA");
testJ2OMapping("TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5", "EXP-KRB5-DES-CBC-MD5");
testJ2OMapping("TLS_KRB5_EXPORT_WITH_RC4_40_SHA", "EXP-KRB5-RC4-SHA");
testJ2OMapping("TLS_KRB5_EXPORT_WITH_RC4_40_MD5", "EXP-KRB5-RC4-MD5");
testJ2OMapping("SSL_RSA_EXPORT_WITH_RC2_CBC_40_MD5", "EXP-RC2-CBC-MD5");
testJ2OMapping("TLS_DHE_DSS_WITH_AES_256_CBC_SHA", "DHE-DSS-AES256-SHA");
testJ2OMapping("TLS_DHE_RSA_WITH_AES_256_CBC_SHA", "DHE-RSA-AES256-SHA");
testJ2OMapping("TLS_DH_anon_WITH_AES_256_CBC_SHA", "ADH-AES256-SHA");
testJ2OMapping("TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", "ECDHE-ECDSA-AES256-SHA");
testJ2OMapping("TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "ECDHE-RSA-AES256-SHA");
testJ2OMapping("TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA", "ECDH-ECDSA-AES256-SHA");
testJ2OMapping("TLS_ECDH_RSA_WITH_AES_256_CBC_SHA", "ECDH-RSA-AES256-SHA");
testJ2OMapping("TLS_ECDH_anon_WITH_AES_256_CBC_SHA", "AECDH-AES256-SHA");
testJ2OMapping("TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5", "EXP-KRB5-RC2-CBC-MD5");
testJ2OMapping("TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA", "EXP-KRB5-RC2-CBC-SHA");
testJ2OMapping("TLS_RSA_WITH_AES_256_CBC_SHA", "AES256-SHA");
// For historical reasons the CHACHA20 ciphers do not follow OpenSSL's custom naming
// convention and omits the HMAC algorithm portion of the name.
testJ2OMapping("TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", "ECDHE-RSA-CHACHA20-POLY1305");
testJ2OMapping("TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "ECDHE-ECDSA-CHACHA20-POLY1305");
testJ2OMapping("TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256", "DHE-RSA-CHACHA20-POLY1305");
testJ2OMapping("TLS_PSK_WITH_CHACHA20_POLY1305_SHA256", "PSK-CHACHA20-POLY1305");
testJ2OMapping("TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256", "ECDHE-PSK-CHACHA20-POLY1305");
testJ2OMapping("TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256", "DHE-PSK-CHACHA20-POLY1305");
testJ2OMapping("TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256", "RSA-PSK-CHACHA20-POLY1305");
testJ2OMapping("TLS_AES_128_GCM_SHA256", "TLS_AES_128_GCM_SHA256");
testJ2OMapping("TLS_AES_256_GCM_SHA384", "TLS_AES_256_GCM_SHA384");
testJ2OMapping("TLS_CHACHA20_POLY1305_SHA256", "TLS_CHACHA20_POLY1305_SHA256");
}
private static void testJ2OMapping(String javaCipherSuite, String openSslCipherSuite) {
final String actual = CipherSuiteConverter.toOpenSslUncached(javaCipherSuite, false);
logger.info("{} => {}", javaCipherSuite, actual);
assertEquals(openSslCipherSuite, actual);
}
@Test
public void testO2JMappings() throws Exception {
testO2JMapping("ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", "ECDHE-ECDSA-AES128-SHA256");
testO2JMapping("ECDHE_RSA_WITH_AES_128_CBC_SHA256", "ECDHE-RSA-AES128-SHA256");
testO2JMapping("RSA_WITH_AES_128_CBC_SHA256", "AES128-SHA256");
testO2JMapping("ECDH_ECDSA_WITH_AES_128_CBC_SHA256", "ECDH-ECDSA-AES128-SHA256");
testO2JMapping("ECDH_RSA_WITH_AES_128_CBC_SHA256", "ECDH-RSA-AES128-SHA256");
testO2JMapping("DHE_RSA_WITH_AES_128_CBC_SHA256", "DHE-RSA-AES128-SHA256");
testO2JMapping("DHE_DSS_WITH_AES_128_CBC_SHA256", "DHE-DSS-AES128-SHA256");
testO2JMapping("ECDHE_ECDSA_WITH_AES_128_CBC_SHA", "ECDHE-ECDSA-AES128-SHA");
testO2JMapping("ECDHE_RSA_WITH_AES_128_CBC_SHA", "ECDHE-RSA-AES128-SHA");
testO2JMapping("RSA_WITH_AES_128_CBC_SHA", "AES128-SHA");
testO2JMapping("ECDH_ECDSA_WITH_AES_128_CBC_SHA", "ECDH-ECDSA-AES128-SHA");
testO2JMapping("ECDH_RSA_WITH_AES_128_CBC_SHA", "ECDH-RSA-AES128-SHA");
testO2JMapping("DHE_RSA_WITH_AES_128_CBC_SHA", "DHE-RSA-AES128-SHA");
testO2JMapping("DHE_DSS_WITH_AES_128_CBC_SHA", "DHE-DSS-AES128-SHA");
testO2JMapping("ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "ECDHE-ECDSA-AES128-GCM-SHA256");
testO2JMapping("ECDHE_RSA_WITH_AES_128_GCM_SHA256", "ECDHE-RSA-AES128-GCM-SHA256");
testO2JMapping("RSA_WITH_AES_128_GCM_SHA256", "AES128-GCM-SHA256");
testO2JMapping("ECDH_ECDSA_WITH_AES_128_GCM_SHA256", "ECDH-ECDSA-AES128-GCM-SHA256");
testO2JMapping("ECDH_RSA_WITH_AES_128_GCM_SHA256", "ECDH-RSA-AES128-GCM-SHA256");
testO2JMapping("DHE_RSA_WITH_AES_128_GCM_SHA256", "DHE-RSA-AES128-GCM-SHA256");
testO2JMapping("DHE_DSS_WITH_AES_128_GCM_SHA256", "DHE-DSS-AES128-GCM-SHA256");
testO2JMapping("ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA", "ECDHE-ECDSA-DES-CBC3-SHA");
testO2JMapping("ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", "ECDHE-RSA-DES-CBC3-SHA");
testO2JMapping("RSA_WITH_3DES_EDE_CBC_SHA", "DES-CBC3-SHA");
testO2JMapping("ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA", "ECDH-ECDSA-DES-CBC3-SHA");
testO2JMapping("ECDH_RSA_WITH_3DES_EDE_CBC_SHA", "ECDH-RSA-DES-CBC3-SHA");
testO2JMapping("DHE_RSA_WITH_3DES_EDE_CBC_SHA", "DHE-RSA-DES-CBC3-SHA");
testO2JMapping("DHE_DSS_WITH_3DES_EDE_CBC_SHA", "DHE-DSS-DES-CBC3-SHA");
testO2JMapping("ECDHE_ECDSA_WITH_RC4_128_SHA", "ECDHE-ECDSA-RC4-SHA");
testO2JMapping("ECDHE_RSA_WITH_RC4_128_SHA", "ECDHE-RSA-RC4-SHA");
testO2JMapping("RSA_WITH_RC4_128_SHA", "RC4-SHA");
testO2JMapping("ECDH_ECDSA_WITH_RC4_128_SHA", "ECDH-ECDSA-RC4-SHA");
testO2JMapping("ECDH_RSA_WITH_RC4_128_SHA", "ECDH-RSA-RC4-SHA");
testO2JMapping("RSA_WITH_RC4_128_MD5", "RC4-MD5");
testO2JMapping("DH_anon_WITH_AES_128_GCM_SHA256", "ADH-AES128-GCM-SHA256");
testO2JMapping("DH_anon_WITH_AES_128_CBC_SHA256", "ADH-AES128-SHA256");
testO2JMapping("ECDH_anon_WITH_AES_128_CBC_SHA", "AECDH-AES128-SHA");
testO2JMapping("DH_anon_WITH_AES_128_CBC_SHA", "ADH-AES128-SHA");
testO2JMapping("ECDH_anon_WITH_3DES_EDE_CBC_SHA", "AECDH-DES-CBC3-SHA");
testO2JMapping("DH_anon_WITH_3DES_EDE_CBC_SHA", "ADH-DES-CBC3-SHA");
testO2JMapping("ECDH_anon_WITH_RC4_128_SHA", "AECDH-RC4-SHA");
testO2JMapping("DH_anon_WITH_RC4_128_MD5", "ADH-RC4-MD5");
testO2JMapping("RSA_WITH_DES_CBC_SHA", "DES-CBC-SHA");
testO2JMapping("DHE_RSA_WITH_DES_CBC_SHA", "DHE-RSA-DES-CBC-SHA");
testO2JMapping("DHE_DSS_WITH_DES_CBC_SHA", "DHE-DSS-DES-CBC-SHA");
testO2JMapping("DH_anon_WITH_DES_CBC_SHA", "ADH-DES-CBC-SHA");
testO2JMapping("RSA_EXPORT_WITH_DES_CBC_40_SHA", "EXP-DES-CBC-SHA");
testO2JMapping("DHE_RSA_EXPORT_WITH_DES_CBC_40_SHA", "EXP-DHE-RSA-DES-CBC-SHA");
testO2JMapping("DHE_DSS_EXPORT_WITH_DES_CBC_40_SHA", "EXP-DHE-DSS-DES-CBC-SHA");
testO2JMapping("DH_anon_EXPORT_WITH_DES_CBC_40_SHA", "EXP-ADH-DES-CBC-SHA");
testO2JMapping("RSA_EXPORT_WITH_RC4_40_MD5", "EXP-RC4-MD5");
testO2JMapping("DH_anon_EXPORT_WITH_RC4_40_MD5", "EXP-ADH-RC4-MD5");
testO2JMapping("RSA_WITH_NULL_SHA256", "NULL-SHA256");
testO2JMapping("ECDHE_ECDSA_WITH_NULL_SHA", "ECDHE-ECDSA-NULL-SHA");
testO2JMapping("ECDHE_RSA_WITH_NULL_SHA", "ECDHE-RSA-NULL-SHA");
testO2JMapping("RSA_WITH_NULL_SHA", "NULL-SHA");
testO2JMapping("ECDH_ECDSA_WITH_NULL_SHA", "ECDH-ECDSA-NULL-SHA");
testO2JMapping("ECDH_RSA_WITH_NULL_SHA", "ECDH-RSA-NULL-SHA");
testO2JMapping("ECDH_anon_WITH_NULL_SHA", "AECDH-NULL-SHA");
testO2JMapping("RSA_WITH_NULL_MD5", "NULL-MD5");
testO2JMapping("KRB5_WITH_3DES_EDE_CBC_SHA", "KRB5-DES-CBC3-SHA");
testO2JMapping("KRB5_WITH_3DES_EDE_CBC_MD5", "KRB5-DES-CBC3-MD5");
testO2JMapping("KRB5_WITH_RC4_128_SHA", "KRB5-RC4-SHA");
testO2JMapping("KRB5_WITH_RC4_128_MD5", "KRB5-RC4-MD5");
testO2JMapping("KRB5_WITH_DES_CBC_SHA", "KRB5-DES-CBC-SHA");
testO2JMapping("KRB5_WITH_DES_CBC_MD5", "KRB5-DES-CBC-MD5");
testO2JMapping("KRB5_EXPORT_WITH_DES_CBC_40_SHA", "EXP-KRB5-DES-CBC-SHA");
testO2JMapping("KRB5_EXPORT_WITH_DES_CBC_40_MD5", "EXP-KRB5-DES-CBC-MD5");
testO2JMapping("KRB5_EXPORT_WITH_RC4_40_SHA", "EXP-KRB5-RC4-SHA");
testO2JMapping("KRB5_EXPORT_WITH_RC4_40_MD5", "EXP-KRB5-RC4-MD5");
testO2JMapping("RSA_EXPORT_WITH_RC2_CBC_40_MD5", "EXP-RC2-CBC-MD5");
testO2JMapping("DHE_DSS_WITH_AES_256_CBC_SHA", "DHE-DSS-AES256-SHA");
testO2JMapping("DHE_RSA_WITH_AES_256_CBC_SHA", "DHE-RSA-AES256-SHA");
testO2JMapping("DH_anon_WITH_AES_256_CBC_SHA", "ADH-AES256-SHA");
testO2JMapping("ECDHE_ECDSA_WITH_AES_256_CBC_SHA", "ECDHE-ECDSA-AES256-SHA");
testO2JMapping("ECDHE_RSA_WITH_AES_256_CBC_SHA", "ECDHE-RSA-AES256-SHA");
testO2JMapping("ECDH_ECDSA_WITH_AES_256_CBC_SHA", "ECDH-ECDSA-AES256-SHA");
testO2JMapping("ECDH_RSA_WITH_AES_256_CBC_SHA", "ECDH-RSA-AES256-SHA");
testO2JMapping("ECDH_anon_WITH_AES_256_CBC_SHA", "AECDH-AES256-SHA");
testO2JMapping("KRB5_EXPORT_WITH_RC2_CBC_40_MD5", "EXP-KRB5-RC2-CBC-MD5");
testO2JMapping("KRB5_EXPORT_WITH_RC2_CBC_40_SHA", "EXP-KRB5-RC2-CBC-SHA");
testO2JMapping("RSA_WITH_AES_256_CBC_SHA", "AES256-SHA");
// Test the known mappings that actually do not exist in Java
testO2JMapping("EDH_DSS_WITH_3DES_EDE_CBC_SHA", "EDH-DSS-DES-CBC3-SHA");
testO2JMapping("RSA_WITH_SEED_SHA", "SEED-SHA");
testO2JMapping("RSA_WITH_CAMELLIA128_SHA", "CAMELLIA128-SHA");
testO2JMapping("RSA_WITH_IDEA_CBC_SHA", "IDEA-CBC-SHA");
testO2JMapping("PSK_WITH_AES_128_CBC_SHA", "PSK-AES128-CBC-SHA");
testO2JMapping("PSK_WITH_3DES_EDE_CBC_SHA", "PSK-3DES-EDE-CBC-SHA");
testO2JMapping("KRB5_WITH_IDEA_CBC_SHA", "KRB5-IDEA-CBC-SHA");
testO2JMapping("KRB5_WITH_IDEA_CBC_MD5", "KRB5-IDEA-CBC-MD5");
testO2JMapping("PSK_WITH_RC4_128_SHA", "PSK-RC4-SHA");
testO2JMapping("ECDHE_RSA_WITH_AES_256_GCM_SHA384", "ECDHE-RSA-AES256-GCM-SHA384");
testO2JMapping("ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "ECDHE-ECDSA-AES256-GCM-SHA384");
testO2JMapping("ECDHE_RSA_WITH_AES_256_CBC_SHA384", "ECDHE-RSA-AES256-SHA384");
testO2JMapping("ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", "ECDHE-ECDSA-AES256-SHA384");
testO2JMapping("DHE_DSS_WITH_AES_256_GCM_SHA384", "DHE-DSS-AES256-GCM-SHA384");
testO2JMapping("DHE_RSA_WITH_AES_256_GCM_SHA384", "DHE-RSA-AES256-GCM-SHA384");
testO2JMapping("DHE_RSA_WITH_AES_256_CBC_SHA256", "DHE-RSA-AES256-SHA256");
testO2JMapping("DHE_DSS_WITH_AES_256_CBC_SHA256", "DHE-DSS-AES256-SHA256");
testO2JMapping("DHE_RSA_WITH_CAMELLIA256_SHA", "DHE-RSA-CAMELLIA256-SHA");
testO2JMapping("DHE_DSS_WITH_CAMELLIA256_SHA", "DHE-DSS-CAMELLIA256-SHA");
testO2JMapping("ECDH_RSA_WITH_AES_256_GCM_SHA384", "ECDH-RSA-AES256-GCM-SHA384");
testO2JMapping("ECDH_ECDSA_WITH_AES_256_GCM_SHA384", "ECDH-ECDSA-AES256-GCM-SHA384");
testO2JMapping("ECDH_RSA_WITH_AES_256_CBC_SHA384", "ECDH-RSA-AES256-SHA384");
testO2JMapping("ECDH_ECDSA_WITH_AES_256_CBC_SHA384", "ECDH-ECDSA-AES256-SHA384");
testO2JMapping("RSA_WITH_AES_256_GCM_SHA384", "AES256-GCM-SHA384");
testO2JMapping("RSA_WITH_AES_256_CBC_SHA256", "AES256-SHA256");
testO2JMapping("RSA_WITH_CAMELLIA256_SHA", "CAMELLIA256-SHA");
testO2JMapping("PSK_WITH_AES_256_CBC_SHA", "PSK-AES256-CBC-SHA");
testO2JMapping("DHE_RSA_WITH_SEED_SHA", "DHE-RSA-SEED-SHA");
testO2JMapping("DHE_DSS_WITH_SEED_SHA", "DHE-DSS-SEED-SHA");
testO2JMapping("DHE_RSA_WITH_CAMELLIA128_SHA", "DHE-RSA-CAMELLIA128-SHA");
testO2JMapping("DHE_DSS_WITH_CAMELLIA128_SHA", "DHE-DSS-CAMELLIA128-SHA");
testO2JMapping("EDH_RSA_WITH_3DES_EDE_CBC_SHA", "EDH-RSA-DES-CBC3-SHA");
testO2JMapping("SRP_DSS_WITH_AES_256_CBC_SHA", "SRP-DSS-AES-256-CBC-SHA");
testO2JMapping("SRP_RSA_WITH_AES_256_CBC_SHA", "SRP-RSA-AES-256-CBC-SHA");
testO2JMapping("SRP_WITH_AES_256_CBC_SHA", "SRP-AES-256-CBC-SHA");
testO2JMapping("DH_anon_WITH_AES_256_GCM_SHA384", "ADH-AES256-GCM-SHA384");
testO2JMapping("DH_anon_WITH_AES_256_CBC_SHA256", "ADH-AES256-SHA256");
testO2JMapping("DH_anon_WITH_CAMELLIA256_SHA", "ADH-CAMELLIA256-SHA");
testO2JMapping("SRP_DSS_WITH_AES_128_CBC_SHA", "SRP-DSS-AES-128-CBC-SHA");
testO2JMapping("SRP_RSA_WITH_AES_128_CBC_SHA", "SRP-RSA-AES-128-CBC-SHA");
testO2JMapping("SRP_WITH_AES_128_CBC_SHA", "SRP-AES-128-CBC-SHA");
testO2JMapping("DH_anon_WITH_SEED_SHA", "ADH-SEED-SHA");
testO2JMapping("DH_anon_WITH_CAMELLIA128_SHA", "ADH-CAMELLIA128-SHA");
testO2JMapping("RSA_WITH_RC2_CBC_MD5", "RC2-CBC-MD5");
testO2JMapping("SRP_DSS_WITH_3DES_EDE_CBC_SHA", "SRP-DSS-3DES-EDE-CBC-SHA");
testO2JMapping("SRP_RSA_WITH_3DES_EDE_CBC_SHA", "SRP-RSA-3DES-EDE-CBC-SHA");
testO2JMapping("SRP_WITH_3DES_EDE_CBC_SHA", "SRP-3DES-EDE-CBC-SHA");
testO2JMapping("RSA_WITH_3DES_EDE_CBC_MD5", "DES-CBC3-MD5");
testO2JMapping("EDH_RSA_WITH_DES_CBC_SHA", "EDH-RSA-DES-CBC-SHA");
testO2JMapping("EDH_DSS_WITH_DES_CBC_SHA", "EDH-DSS-DES-CBC-SHA");
testO2JMapping("RSA_WITH_DES_CBC_MD5", "DES-CBC-MD5");
testO2JMapping("EDH_RSA_EXPORT_WITH_DES_CBC_40_SHA", "EXP-EDH-RSA-DES-CBC-SHA");
testO2JMapping("EDH_DSS_EXPORT_WITH_DES_CBC_40_SHA", "EXP-EDH-DSS-DES-CBC-SHA");
// For historical reasons the CHACHA20 ciphers do not follow OpenSSL's custom naming
// convention and omits the HMAC algorithm portion of the name.
testO2JMapping("ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", "ECDHE-RSA-CHACHA20-POLY1305");
testO2JMapping("ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "ECDHE-ECDSA-CHACHA20-POLY1305");
testO2JMapping("DHE_RSA_WITH_CHACHA20_POLY1305_SHA256", "DHE-RSA-CHACHA20-POLY1305");
testO2JMapping("PSK_WITH_CHACHA20_POLY1305_SHA256", "PSK-CHACHA20-POLY1305");
testO2JMapping("ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256", "ECDHE-PSK-CHACHA20-POLY1305");
testO2JMapping("DHE_PSK_WITH_CHACHA20_POLY1305_SHA256", "DHE-PSK-CHACHA20-POLY1305");
testO2JMapping("RSA_PSK_WITH_CHACHA20_POLY1305_SHA256", "RSA-PSK-CHACHA20-POLY1305");
}
private static void testO2JMapping(String javaCipherSuite, String openSslCipherSuite) {
final String actual = CipherSuiteConverter.toJavaUncached(openSslCipherSuite);
logger.info("{} => {}", openSslCipherSuite, actual);
assertEquals(javaCipherSuite, actual);
}
@Test
public void testCachedJ2OMappings() {
testCachedJ2OMapping("TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", "ECDHE-ECDSA-AES128-SHA256");
}
@Test
public void testUnknownOpenSSLCiphersToJava() {
testUnknownOpenSSLCiphersToJava("(NONE)");
testUnknownOpenSSLCiphersToJava("unknown");
testUnknownOpenSSLCiphersToJava("");
}
@Test
public void testUnknownJavaCiphersToOpenSSL() {
testUnknownJavaCiphersToOpenSSL("(NONE)");
testUnknownJavaCiphersToOpenSSL("unknown");
testUnknownJavaCiphersToOpenSSL("");
}
private static void testUnknownOpenSSLCiphersToJava(String openSslCipherSuite) {
CipherSuiteConverter.clearCache();
assertNull(CipherSuiteConverter.toJava(openSslCipherSuite, "TLS"));
assertNull(CipherSuiteConverter.toJava(openSslCipherSuite, "SSL"));
}
private static void testUnknownJavaCiphersToOpenSSL(String javaCipherSuite) {
CipherSuiteConverter.clearCache();
assertNull(CipherSuiteConverter.toOpenSsl(javaCipherSuite, false));
assertNull(CipherSuiteConverter.toOpenSsl(javaCipherSuite, true));
}
private static void testCachedJ2OMapping(String javaCipherSuite, String openSslCipherSuite) {
CipherSuiteConverter.clearCache();
// For TLSv1.3 this should make no diffierence if boringSSL is true or false
final String actual1 = CipherSuiteConverter.toOpenSsl(javaCipherSuite, false);
assertEquals(openSslCipherSuite, actual1);
final String actual2 = CipherSuiteConverter.toOpenSsl(javaCipherSuite, true);
assertEquals(actual1, actual2);
// Ensure that the cache entries have been created.
assertTrue(CipherSuiteConverter.isJ2OCached(javaCipherSuite, actual1));
assertTrue(CipherSuiteConverter.isO2JCached(actual1, "", javaCipherSuite.substring(4)));
assertTrue(CipherSuiteConverter.isO2JCached(actual1, "SSL", "SSL_" + javaCipherSuite.substring(4)));
assertTrue(CipherSuiteConverter.isO2JCached(actual1, "TLS", "TLS_" + javaCipherSuite.substring(4)));
final String actual3 = CipherSuiteConverter.toOpenSsl(javaCipherSuite, false);
assertEquals(openSslCipherSuite, actual3);
// Test if the returned cipher strings are identical,
// so that the TLS sessions with the same cipher suite do not create many strings.
assertSame(actual1, actual3);
}
@Test
public void testCachedO2JMappings() {
testCachedO2JMapping("ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", "ECDHE-ECDSA-AES128-SHA256");
}
private static void testCachedO2JMapping(String javaCipherSuite, String openSslCipherSuite) {
CipherSuiteConverter.clearCache();
final String tlsExpected = "TLS_" + javaCipherSuite;
final String sslExpected = "SSL_" + javaCipherSuite;
final String tlsActual1 = CipherSuiteConverter.toJava(openSslCipherSuite, "TLS");
final String sslActual1 = CipherSuiteConverter.toJava(openSslCipherSuite, "SSL");
assertEquals(tlsExpected, tlsActual1);
assertEquals(sslExpected, sslActual1);
// Ensure that the cache entries have been created.
assertTrue(CipherSuiteConverter.isO2JCached(openSslCipherSuite, "", javaCipherSuite));
assertTrue(CipherSuiteConverter.isO2JCached(openSslCipherSuite, "SSL", sslExpected));
assertTrue(CipherSuiteConverter.isO2JCached(openSslCipherSuite, "TLS", tlsExpected));
assertTrue(CipherSuiteConverter.isJ2OCached(tlsExpected, openSslCipherSuite));
assertTrue(CipherSuiteConverter.isJ2OCached(sslExpected, openSslCipherSuite));
final String tlsActual2 = CipherSuiteConverter.toJava(openSslCipherSuite, "TLS");
final String sslActual2 = CipherSuiteConverter.toJava(openSslCipherSuite, "SSL");
assertEquals(tlsExpected, tlsActual2);
assertEquals(sslExpected, sslActual2);
// Test if the returned cipher strings are identical,
// so that the TLS sessions with the same cipher suite do not create many strings.
assertSame(tlsActual1, tlsActual2);
assertSame(sslActual1, sslActual2);
}
@Test
public void testTlsv13Mappings() {
CipherSuiteConverter.clearCache();
assertEquals("TLS_AES_128_GCM_SHA256",
CipherSuiteConverter.toJava("TLS_AES_128_GCM_SHA256", "TLS"));
assertNull(CipherSuiteConverter.toJava("TLS_AES_128_GCM_SHA256", "SSL"));
assertEquals("TLS_AES_256_GCM_SHA384",
CipherSuiteConverter.toJava("TLS_AES_256_GCM_SHA384", "TLS"));
assertNull(CipherSuiteConverter.toJava("TLS_AES_256_GCM_SHA384", "SSL"));
assertEquals("TLS_CHACHA20_POLY1305_SHA256",
CipherSuiteConverter.toJava("TLS_CHACHA20_POLY1305_SHA256", "TLS"));
assertNull(CipherSuiteConverter.toJava("TLS_CHACHA20_POLY1305_SHA256", "SSL"));
// BoringSSL use different cipher naming then OpenSSL so we need to test for both
assertEquals("TLS_AES_128_GCM_SHA256",
CipherSuiteConverter.toOpenSsl("TLS_AES_128_GCM_SHA256", false));
assertEquals("TLS_AES_256_GCM_SHA384",
CipherSuiteConverter.toOpenSsl("TLS_AES_256_GCM_SHA384", false));
assertEquals("TLS_CHACHA20_POLY1305_SHA256",
CipherSuiteConverter.toOpenSsl("TLS_CHACHA20_POLY1305_SHA256", false));
assertEquals("AEAD-AES128-GCM-SHA256",
CipherSuiteConverter.toOpenSsl("TLS_AES_128_GCM_SHA256", true));
assertEquals("AEAD-AES256-GCM-SHA384",
CipherSuiteConverter.toOpenSsl("TLS_AES_256_GCM_SHA384", true));
assertEquals("AEAD-CHACHA20-POLY1305-SHA256",
CipherSuiteConverter.toOpenSsl("TLS_CHACHA20_POLY1305_SHA256", true));
}
}
| CipherSuiteConverterTest |
java | spring-projects__spring-boot | build-plugin/spring-boot-gradle-plugin/src/test/java/org/springframework/boot/gradle/testkit/PluginClasspathGradleBuild.java | {
"start": 2253,
"end": 7076
} | class ____ extends GradleBuild {
private boolean kotlin;
public PluginClasspathGradleBuild(BuildOutput buildOutput) {
super(buildOutput);
}
public PluginClasspathGradleBuild(BuildOutput buildOutput, Dsl dsl) {
super(buildOutput, dsl);
}
public PluginClasspathGradleBuild kotlin() {
this.kotlin = true;
return this;
}
@Override
public GradleRunner prepareRunner(String... arguments) throws IOException {
return super.prepareRunner(arguments).withPluginClasspath(pluginClasspath());
}
private List<File> pluginClasspath() {
List<File> classpath = new ArrayList<>();
classpath.add(new File("bin/main"));
classpath.add(new File("build/classes/java/main"));
classpath.add(new File("build/resources/main"));
classpath.add(new File(pathOfJarContaining(Layers.class)));
classpath.add(new File(pathOfJarContaining(ClassVisitor.class)));
classpath.add(new File(pathOfJarContaining(DependencyManagementPlugin.class)));
if (this.kotlin) {
classpath.add(new File(pathOfJarContaining("org.jetbrains.kotlin.cli.common.PropertiesKt")));
classpath.add(new File(pathOfJarContaining(KotlinProject.class)));
classpath.add(new File(pathOfJarContaining(KotlinToolingVersion.class)));
classpath.add(new File(pathOfJarContaining("org.jetbrains.kotlin.build.report.metrics.BuildTime")));
classpath.add(new File(pathOfJarContaining("org.jetbrains.kotlin.buildtools.api.CompilationService")));
classpath.add(new File(pathOfJarContaining("org.jetbrains.kotlin.daemon.client.KotlinCompilerClient")));
classpath.add(new File(pathOfJarContaining("org.jetbrains.kotlin.konan.library.KonanLibrary")));
classpath.add(new File(pathOfJarContaining(KotlinCompilerPluginSupportPlugin.class)));
classpath.add(new File(pathOfJarContaining(LanguageSettings.class)));
classpath.add(new File(pathOfJarContaining(BuildUidService.class)));
}
classpath.add(new File(pathOfJarContaining("org.apache.commons.lang3.ArrayFill")));
classpath.add(new File(pathOfJarContaining("org.apache.commons.io.Charsets")));
classpath.add(new File(pathOfJarContaining(ArchiveEntry.class)));
classpath.add(new File(pathOfJarContaining(BuildRequest.class)));
classpath.add(new File(pathOfJarContaining(HttpClientConnectionManager.class)));
classpath.add(new File(pathOfJarContaining(HttpRequest.class)));
classpath.add(new File(pathOfJarContaining(HttpVersionPolicy.class)));
classpath.add(new File(pathOfJarContaining(JacksonModule.class)));
classpath.add(new File(pathOfJarContaining(JsonParser.class)));
classpath.add(new File(pathOfJarContaining("com.github.openjson.JSONObject")));
classpath.add(new File(pathOfJarContaining(JsonView.class)));
classpath.add(new File(pathOfJarContaining(Platform.class)));
classpath.add(new File(pathOfJarContaining(Toml.class)));
classpath.add(new File(pathOfJarContaining(Lexer.class)));
classpath.add(new File(pathOfJarContaining("org.graalvm.buildtools.gradle.NativeImagePlugin")));
classpath.add(new File(pathOfJarContaining("org.graalvm.reachability.GraalVMReachabilityMetadataRepository")));
classpath.add(new File(pathOfJarContaining("org.graalvm.buildtools.utils.SharedConstants")));
// Cyclonedx dependencies
classpath.add(new File(pathOfJarContaining(CyclonedxPlugin.class)));
classpath.add(new File(pathOfJarContaining("com.ctc.wstx.api.WriterConfig")));
classpath.add(new File(pathOfJarContaining("com.fasterxml.jackson.core.Versioned")));
classpath.add(new File(pathOfJarContaining("com.fasterxml.jackson.databind.JsonSerializer")));
classpath.add(new File(pathOfJarContaining("com.fasterxml.jackson.dataformat.xml.ser.ToXmlGenerator")));
classpath.add(new File(pathOfJarContaining("com.github.packageurl.MalformedPackageURLException")));
classpath.add(new File(pathOfJarContaining("com.google.common.collect.ImmutableMap")));
classpath.add(new File(pathOfJarContaining("com.networknt.schema.resource.SchemaMappers")));
classpath.add(new File(pathOfJarContaining("org.apache.commons.collections4.CollectionUtils")));
classpath.add(new File(pathOfJarContaining("org.apache.maven.model.building.ModelBuildingException")));
classpath.add(new File(pathOfJarContaining("org.codehaus.plexus.util.xml.pull.XmlPullParserException")));
classpath.add(new File(pathOfJarContaining("org.codehaus.stax2.ri.Stax2WriterAdapter")));
classpath.add(new File(pathOfJarContaining("org.cyclonedx.model.ExternalReference")));
return classpath;
}
private String pathOfJarContaining(String className) {
try {
return pathOfJarContaining(Class.forName(className));
}
catch (ClassNotFoundException ex) {
throw new IllegalArgumentException(ex);
}
}
private String pathOfJarContaining(Class<?> type) {
return type.getProtectionDomain().getCodeSource().getLocation().getPath();
}
}
| PluginClasspathGradleBuild |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/cluster/ClusteredRoutePolicyFactoryTest.java | {
"start": 1445,
"end": 5119
} | class ____ extends ContextTestSupport {
private TestClusterService cs;
@Override
protected CamelContext createCamelContext() throws Exception {
CamelContext context = super.createCamelContext();
cs = new TestClusterService("my-cluster-service");
context.addService(cs);
ClusteredRoutePolicyFactory factory = ClusteredRoutePolicyFactory.forNamespace("my-ns");
context.addRoutePolicyFactory(factory);
return context;
}
@Test
public void testClusteredRoutePolicyFactory() throws Exception {
// route is stopped as we are not leader yet
assertEquals(ServiceStatus.Stopped, context.getRouteController().getRouteStatus("foo"));
MockEndpoint mock = getMockEndpoint("mock:foo");
mock.expectedBodiesReceived("Hello Foo");
cs.getView().setLeader(true);
template.sendBody("seda:foo", "Hello Foo");
assertMockEndpointsSatisfied();
assertEquals(ServiceStatus.Started, context.getRouteController().getRouteStatus("foo"));
}
@Test
public void testClusteredRoutePolicyFactoryAddRoute() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("seda:bar").routeId("bar")
.to("mock:bar");
}
});
// route is stopped as we are not leader yet
assertEquals(ServiceStatus.Stopped, context.getRouteController().getRouteStatus("foo"));
assertEquals(ServiceStatus.Stopped, context.getRouteController().getRouteStatus("bar"));
getMockEndpoint("mock:foo").expectedBodiesReceived("Hello Foo");
getMockEndpoint("mock:bar").expectedBodiesReceived("Hello Bar");
cs.getView().setLeader(true);
template.sendBody("seda:foo", "Hello Foo");
template.sendBody("seda:bar", "Hello Bar");
assertMockEndpointsSatisfied();
assertEquals(ServiceStatus.Started, context.getRouteController().getRouteStatus("foo"));
assertEquals(ServiceStatus.Started, context.getRouteController().getRouteStatus("bar"));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("seda:foo").routeId("foo")
.to("mock:foo");
}
};
}
@Test
public void testClusteredRoutePolicyFactoryAddRouteAlreadyLeader() throws Exception {
cs.getView().setLeader(true);
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("seda:bar").routeId("bar")
.to("mock:bar");
}
});
// route is started as we are leader
assertEquals(ServiceStatus.Started, context.getRouteController().getRouteStatus("foo"));
assertEquals(ServiceStatus.Started, context.getRouteController().getRouteStatus("bar"));
getMockEndpoint("mock:foo").expectedBodiesReceived("Hello Foo");
getMockEndpoint("mock:bar").expectedBodiesReceived("Hello Bar");
template.sendBody("seda:foo", "Hello Foo");
template.sendBody("seda:bar", "Hello Bar");
assertMockEndpointsSatisfied();
assertEquals(ServiceStatus.Started, context.getRouteController().getRouteStatus("foo"));
assertEquals(ServiceStatus.Started, context.getRouteController().getRouteStatus("bar"));
}
// *********************************
// Helpers
// *********************************
private static | ClusteredRoutePolicyFactoryTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/PatternMatchingInstanceofTest.java | {
"start": 12277,
"end": 12692
} | class ____ {
void test(Object o) {
// No finding here, but also no crash.
if (o instanceof Foo(int x, int y)) {}
}
}
""")
.expectUnchanged()
.doTest();
}
@Test
public void newVariableNotInstantlyAssigned_pleasantFix() {
helper
.addInputLines(
"Test.java",
"""
| Test |
java | hibernate__hibernate-orm | hibernate-envers/src/main/java/org/hibernate/envers/configuration/internal/metadata/reader/AuditedPropertiesReader.java | {
"start": 18191,
"end": 19129
} | class ____.
final String embeddedName = propertiesGroupMapping.get( attributeName );
if ( !auditedPropertiesHolder.contains( embeddedName ) ) {
// Manage properties mapped within <properties> tag.
final Value propertyValue = persistentPropertiesSource.getProperty( embeddedName ).getValue();
this.addFromPropertiesGroup(
embeddedName,
memberDetails,
accessType,
(Component) propertyValue,
allClassAudited
);
}
}
}
private void addFromPropertiesGroup(
String embeddedName,
MemberDetails memberDetails,
String accessType,
Component propertyValue,
Audited allClassAudited) {
final ComponentAuditingData componentData = new ComponentAuditingData();
final boolean isAudited = fillPropertyData( memberDetails, componentData, accessType, allClassAudited );
if ( isAudited ) {
// EntityPersister.getPropertyNames() returns name of embedded component instead of | field |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webflux/src/main/java/org/springframework/cloud/gateway/filter/ratelimit/AbstractRateLimiter.java | {
"start": 1095,
"end": 2733
} | class ____<C> extends AbstractStatefulConfigurable<C>
implements RateLimiter<C>, ApplicationListener<FilterArgsEvent> {
private String configurationPropertyName;
private @Nullable ConfigurationService configurationService;
protected AbstractRateLimiter(Class<C> configClass, String configurationPropertyName,
@Nullable ConfigurationService configurationService) {
super(configClass);
this.configurationPropertyName = configurationPropertyName;
this.configurationService = configurationService;
}
protected String getConfigurationPropertyName() {
return configurationPropertyName;
}
protected void setConfigurationService(ConfigurationService configurationService) {
this.configurationService = configurationService;
}
@Override
public void onApplicationEvent(FilterArgsEvent event) {
Map<String, Object> args = event.getArgs();
if (args.isEmpty() || !hasRelevantKey(args)) {
return;
}
String routeId = event.getRouteId();
C routeConfig = newConfig();
if (this.configurationService != null) {
this.configurationService.with(routeConfig)
.name(this.configurationPropertyName)
.normalizedProperties(args)
.bind();
}
getConfig().put(routeId, routeConfig);
}
private boolean hasRelevantKey(Map<String, Object> args) {
return args.keySet().stream().anyMatch(key -> key.startsWith(configurationPropertyName + "."));
}
@Override
public String toString() {
return new ToStringCreator(this).append("configurationPropertyName", configurationPropertyName)
.append("config", getConfig())
.append("configClass", getConfigClass())
.toString();
}
}
| AbstractRateLimiter |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/vertx/src/test/java/org/jboss/resteasy/reactive/server/vertx/test/response/ExceptionInWriterTest.java | {
"start": 1774,
"end": 2070
} | class ____ {
private final String message;
public Greeting(String message) {
this.message = message;
}
public String getMessage() {
return message;
}
}
@Provider
@Produces(MediaType.TEXT_PLAIN)
public static | Greeting |
java | google__error-prone | test_helpers/src/test/java/com/google/errorprone/BugCheckerRefactoringTestHelperTest.java | {
"start": 3175,
"end": 3554
} | class ____ {
public Object foo() {
Integer i = 1 + 2;
return i;
}
}
""")
.doTest());
}
@Test
public void replaceTextMatch() {
helper
.addInputLines(
"in/Test.java",
"""
public | Test |
java | google__dagger | javatests/dagger/internal/codegen/ModuleFactoryGeneratorTest.java | {
"start": 7392,
"end": 8020
} | class ____<A> {}");
daggerCompiler(moduleFile)
.compile(
subject -> {
subject.hasErrorCount(1);
subject.hasErrorContaining("Modules with type parameters must be abstract")
.onSource(moduleFile)
.onLine(6);
});
}
@Test public void provideOverriddenByNoProvide() {
Source parent =
CompilerTests.javaSource(
"test.Parent",
"package test;",
"",
"import dagger.Module;",
"import dagger.Provides;",
"",
"@Module",
" | TestModule |
java | grpc__grpc-java | protobuf-lite/src/main/java/io/grpc/protobuf/lite/ProtoInputStream.java | {
"start": 1027,
"end": 3352
} | class ____ extends InputStream implements Drainable, KnownLength {
// ProtoInputStream is first initialized with a *message*. *partial* is initially null.
// Once there has been a read operation on this stream, *message* is serialized to *partial* and
// set to null.
@Nullable private MessageLite message;
private final Parser<?> parser;
@Nullable private ByteArrayInputStream partial;
ProtoInputStream(MessageLite message, Parser<?> parser) {
this.message = message;
this.parser = parser;
}
@Override
public int drainTo(OutputStream target) throws IOException {
int written;
if (message != null) {
written = message.getSerializedSize();
message.writeTo(target);
message = null;
} else if (partial != null) {
written = (int) ProtoLiteUtils.copy(partial, target);
partial = null;
} else {
written = 0;
}
return written;
}
@Override
public int read() {
if (message != null) {
partial = new ByteArrayInputStream(message.toByteArray());
message = null;
}
if (partial != null) {
return partial.read();
}
return -1;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
if (message != null) {
int size = message.getSerializedSize();
if (size == 0) {
message = null;
partial = null;
return -1;
}
if (len >= size) {
// This is the only case that is zero-copy.
CodedOutputStream stream = CodedOutputStream.newInstance(b, off, size);
message.writeTo(stream);
stream.flush();
stream.checkNoSpaceLeft();
message = null;
partial = null;
return size;
}
partial = new ByteArrayInputStream(message.toByteArray());
message = null;
}
if (partial != null) {
return partial.read(b, off, len);
}
return -1;
}
@Override
public int available() {
if (message != null) {
return message.getSerializedSize();
} else if (partial != null) {
return partial.available();
}
return 0;
}
MessageLite message() {
if (message == null) {
throw new IllegalStateException("message not available");
}
return message;
}
Parser<?> parser() {
return parser;
}
}
| ProtoInputStream |
java | elastic__elasticsearch | qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeSearchableSnapshotIndexCompatibilityIT.java | {
"start": 889,
"end": 6259
} | class ____ extends RollingUpgradeIndexCompatibilityTestCase {
static {
clusterConfig = config -> config.setting("xpack.license.self_generated.type", "trial")
.setting("xpack.searchable.snapshot.shared_cache.size", "16MB")
.setting("xpack.searchable.snapshot.shared_cache.region_size", "256KB");
}
public RollingUpgradeSearchableSnapshotIndexCompatibilityIT(List<Version> nodesVersion) {
super(nodesVersion);
}
/**
* Creates an index and a snapshot on N-2, then mounts the snapshot during rolling upgrades.
*/
public void testMountSearchableSnapshot() throws Exception {
final String repository = suffix("repository");
final String snapshot = suffix("snapshot");
final String index = suffix("index-rolling-upgrade");
final var mountedIndex = suffix("index-rolling-upgrade-mounted");
final int numDocs = 3145;
if (isFullyUpgradedTo(VERSION_MINUS_2)) {
logger.debug("--> registering repository [{}]", repository);
registerRepository(client(), repository, FsRepository.TYPE, true, repositorySettings());
logger.debug("--> creating index [{}]", index);
createIndex(
client(),
index,
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()
);
logger.debug("--> indexing [{}] docs in [{}]", numDocs, index);
indexDocs(index, numDocs);
logger.debug("--> creating snapshot [{}]", snapshot);
createSnapshot(client(), repository, snapshot, true);
logger.debug("--> deleting index [{}]", index);
deleteIndex(index);
return;
}
boolean success = false;
try {
logger.debug("--> mounting index [{}] as [{}]", index, mountedIndex);
mountIndex(repository, snapshot, index, randomBoolean(), mountedIndex);
ensureGreen(mountedIndex);
updateRandomIndexSettings(mountedIndex);
updateRandomMappings(mountedIndex);
assertThat(indexVersion(mountedIndex), equalTo(VERSION_MINUS_2));
assertDocCount(client(), mountedIndex, numDocs);
logger.debug("--> closing mounted index [{}]", mountedIndex);
closeIndex(mountedIndex);
ensureGreen(mountedIndex);
logger.debug("--> re-opening index [{}]", mountedIndex);
openIndex(mountedIndex);
ensureGreen(mountedIndex);
logger.debug("--> deleting mounted index [{}]", mountedIndex);
deleteIndex(mountedIndex);
success = true;
} finally {
if (success == false) {
try {
client().performRequest(new Request("DELETE", "/" + mountedIndex));
} catch (ResponseException e) {
logger.warn("Failed to delete mounted index [" + mountedIndex + ']', e);
}
}
}
}
/**
* Creates an index and a snapshot on N-2, mounts the snapshot and ensures it remains searchable during rolling upgrades.
*/
public void testSearchableSnapshotUpgrade() throws Exception {
final String mountedIndex = suffix("index-rolling-upgraded-mounted");
final String repository = suffix("repository");
final String snapshot = suffix("snapshot");
final String index = suffix("index-rolling-upgraded");
final int numDocs = 2143;
if (isFullyUpgradedTo(VERSION_MINUS_2)) {
logger.debug("--> registering repository [{}]", repository);
registerRepository(client(), repository, FsRepository.TYPE, true, repositorySettings());
logger.debug("--> creating index [{}]", index);
createIndex(
client(),
index,
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()
);
logger.debug("--> indexing [{}] docs in [{}]", numDocs, index);
indexDocs(index, numDocs);
logger.debug("--> creating snapshot [{}]", snapshot);
createSnapshot(client(), repository, snapshot, true);
logger.debug("--> deleting index [{}]", index);
deleteIndex(index);
logger.debug("--> mounting index [{}] as [{}]", index, mountedIndex);
mountIndex(repository, snapshot, index, randomBoolean(), mountedIndex);
}
ensureGreen(mountedIndex);
if (isIndexClosed(mountedIndex)) {
logger.debug("--> re-opening index [{}] after upgrade", mountedIndex);
openIndex(mountedIndex);
ensureGreen(mountedIndex);
}
updateRandomIndexSettings(mountedIndex);
updateRandomMappings(mountedIndex);
assertThat(indexVersion(mountedIndex), equalTo(VERSION_MINUS_2));
assertDocCount(client(), mountedIndex, numDocs);
if (randomBoolean()) {
logger.debug("--> random closing of index [{}] before upgrade", mountedIndex);
closeIndex(mountedIndex);
ensureGreen(mountedIndex);
}
}
}
| RollingUpgradeSearchableSnapshotIndexCompatibilityIT |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/streaming/runtime/operators/TimestampsAndWatermarksOperatorTest.java | {
"start": 17467,
"end": 17944
} | class ____
implements WatermarkGenerator<Tuple2<Boolean, Long>>, Serializable {
@Override
public void onEvent(
Tuple2<Boolean, Long> event, long eventTimestamp, WatermarkOutput output) {
if (event.f0) {
output.emitWatermark(new Watermark(event.f1));
}
}
@Override
public void onPeriodicEmit(WatermarkOutput output) {}
}
private static | PunctuatedWatermarkGenerator |
java | quarkusio__quarkus | test-framework/observability/src/main/java/io/quarkus/observability/test/utils/QueryResult.java | {
"start": 996,
"end": 1391
} | class ____ {
public Metric metric;
public List<String> value;
// getters and setters
@Override
public String toString() {
return "ResultItem{" +
"metric=" + metric +
", value=" + value +
'}';
}
}
@JsonIgnoreProperties(ignoreUnknown = true)
public static | ResultItem |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableCollectSingle.java | {
"start": 2290,
"end": 4211
} | class ____<T, U> implements FlowableSubscriber<T>, Disposable {
final SingleObserver<? super U> downstream;
final BiConsumer<? super U, ? super T> collector;
final U u;
Subscription upstream;
boolean done;
CollectSubscriber(SingleObserver<? super U> actual, U u, BiConsumer<? super U, ? super T> collector) {
this.downstream = actual;
this.collector = collector;
this.u = u;
}
@Override
public void onSubscribe(Subscription s) {
if (SubscriptionHelper.validate(this.upstream, s)) {
this.upstream = s;
downstream.onSubscribe(this);
s.request(Long.MAX_VALUE);
}
}
@Override
public void onNext(T t) {
if (done) {
return;
}
try {
collector.accept(u, t);
} catch (Throwable e) {
Exceptions.throwIfFatal(e);
upstream.cancel();
onError(e);
}
}
@Override
public void onError(Throwable t) {
if (done) {
RxJavaPlugins.onError(t);
return;
}
done = true;
upstream = SubscriptionHelper.CANCELLED;
downstream.onError(t);
}
@Override
public void onComplete() {
if (done) {
return;
}
done = true;
upstream = SubscriptionHelper.CANCELLED;
downstream.onSuccess(u);
}
@Override
public void dispose() {
upstream.cancel();
upstream = SubscriptionHelper.CANCELLED;
}
@Override
public boolean isDisposed() {
return upstream == SubscriptionHelper.CANCELLED;
}
}
}
| CollectSubscriber |
java | apache__camel | components/camel-kubernetes/src/test/java/org/apache/camel/component/kubernetes/producer/KubernetesCustomResourcesProducerTest.java | {
"start": 2331,
"end": 12396
} | class ____ extends KubernetesTestSupport {
private static String githubSourceString;
KubernetesMockServer server;
NamespacedKubernetesClient client;
@BeforeAll
public static void readResource() throws IOException {
try (InputStream stream = KubernetesCustomResourcesProducerTest.class.getResourceAsStream("sample-cr.json")) {
githubSourceString = IOHelper.loadText(stream);
}
}
@BindToRegistry("kubernetesClient")
public KubernetesClient getClient() {
return client;
}
private String setupGithubSourceList() {
GenericKubernetesResourceList list = new GenericKubernetesResourceList();
list.getItems().add(Serialization.unmarshal(githubSourceString, GenericKubernetesResource.class));
return Serialization.asJson(list);
}
@Test
@Order(1)
void createTest() {
server.expect().post().withPath("/apis/sources.knative.dev/v1alpha1/namespaces/testnamespace/githubsources")
.andReturn(200, githubSourceString).once();
server.expect().delete().withPath("/apis/sources.knative.dev/v1alpha1/namespaces/testnamespace/githubsources/samplecr")
.andReturn(200, githubSourceString).once();
Exchange ex = template.request("direct:createCustomResource", exchange -> {
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_INSTANCE_NAME, "samplecr");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_NAMESPACE_NAME, "testnamespace");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_NAME, "githubsources.sources.knative.dev");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_GROUP, "sources.knative.dev");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_SCOPE, "Namespaced");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_VERSION, "v1alpha1");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_PLURAL, "githubsources");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_INSTANCE, githubSourceString);
});
assertFalse(ex.isFailed());
assertNull(ex.getException());
assertNotNull(ex.getMessage());
assertNotNull(ex.getMessage().getBody());
}
@Test
@Order(0)
void updateTest() {
server.expect().get().withPath("/apis/sources.knative.dev/v1alpha1/namespaces/testnamespace/githubsources/samplecr")
.andReturn(200, githubSourceString).once();
server.expect().put().withPath("/apis/sources.knative.dev/v1alpha1/namespaces/testnamespace/githubsources/samplecr")
.andReturn(200, githubSourceString).once();
Exchange ex = template.request("direct:updateCustomResource", exchange -> {
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_INSTANCE_NAME, "samplecr");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_NAMESPACE_NAME, "testnamespace");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_NAME, "githubsources.sources.knative.dev");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_GROUP, "sources.knative.dev");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_SCOPE, "Namespaced");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_VERSION, "v1alpha1");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_PLURAL, "githubsources");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_INSTANCE, githubSourceString);
});
assertFalse(ex.isFailed());
assertNull(ex.getException());
assertNotNull(ex.getMessage());
assertNotNull(ex.getMessage().getBody());
}
@Test
@Order(2)
void listTest() throws Exception {
server.expect().get().withPath("/apis/sources.knative.dev/v1alpha1/namespaces/testnamespace/githubsources")
.andReturn(200, setupGithubSourceList()).once();
Exchange ex = template.request("direct:listCustomResources", exchange -> {
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_NAMESPACE_NAME, "testnamespace");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_NAME, "githubsources.sources.knative.dev");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_GROUP, "sources.knative.dev");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_SCOPE, "Namespaced");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_VERSION, "v1alpha1");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_PLURAL, "githubsources");
});
assertFalse(ex.isFailed());
assertNull(ex.getException());
List<?> result = ex.getMessage().getBody(List.class);
assertEquals(1, result.size());
}
@Test
@Order(3)
void listByLabelsTest() throws Exception {
server.expect().get()
.withPath("/apis/sources.knative.dev/v1alpha1/namespaces/testnamespace/githubsources?labelSelector="
+ toUrlEncoded("key1=value1,key2=value2"))
.andReturn(200, setupGithubSourceList()).once();
Exchange ex = template.request("direct:listCustomResourcesByLabels", exchange -> {
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_NAMESPACE_NAME, "testnamespace");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_NAME, "githubsources.sources.knative.dev");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_GROUP, "sources.knative.dev");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_SCOPE, "Namespaced");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_VERSION, "v1alpha1");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_PLURAL, "githubsources");
Map<String, String> labels = new HashMap<>();
labels.put("key1", "value1");
labels.put("key2", "value2");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_LABELS, labels);
});
assertFalse(ex.isFailed());
assertNull(ex.getException());
List<?> result = ex.getMessage().getBody(List.class);
assertEquals(1, result.size());
}
@Test
@Order(4)
void deleteTest() {
server.expect().post().withPath("/apis/sources.knative.dev/v1alpha1/namespaces/testnamespace/githubsources")
.andReturn(200, githubSourceString).once();
server.expect().delete().withPath("/apis/sources.knative.dev/v1alpha1/namespaces/testnamespace/githubsources/samplecr")
.andReturn(200, githubSourceString).once();
Exchange ex3 = template.request("direct:deleteCustomResource", exchange -> {
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_INSTANCE_NAME, "samplecr");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_NAMESPACE_NAME, "testnamespace");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_NAME, "githubsources.sources.knative.dev");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_GROUP, "sources.knative.dev");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_SCOPE, "Namespaced");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_VERSION, "v1alpha1");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_PLURAL, "githubsources");
});
assertNotNull(ex3.getMessage());
assertTrue(ex3.getMessage().getHeader(KubernetesConstants.KUBERNETES_DELETE_RESULT, Boolean.class));
}
@Test
@Order(5)
void testListNotFound() {
server.expect().get().withPath("/apis/sources.knative.dev/v1alpha1/namespaces/testnamespace/githubsources")
.andReturn(404, "").once();
Exchange ex4 = template.request("direct:listCustomResources", exchange -> {
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_NAMESPACE_NAME, "testnamespace");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_NAME, "githubsources.sources.knative.dev");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_GROUP, "sources.knative.dev");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_SCOPE, "Namespaced");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_VERSION, "v1alpha1");
exchange.getIn().setHeader(KubernetesConstants.KUBERNETES_CRD_PLURAL, "githubsources");
});
assertNotNull(ex4.getMessage());
assertNull(ex4.getMessage().getBody());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:listCustomResources").toF(
"kubernetes-custom-resources:///?kubernetesClient=#kubernetesClient&operation=listCustomResources");
from("direct:listCustomResourcesByLabels").toF(
"kubernetes-custom-resources:///?kubernetesClient=#kubernetesClient&operation=listCustomResourcesByLabels");
from("direct:deleteCustomResource").toF(
"kubernetes-custom-resources:///?kubernetesClient=#kubernetesClient&operation=deleteCustomResource");
from("direct:createCustomResource").toF(
"kubernetes-custom-resources:///?kubernetesClient=#kubernetesClient&operation=createCustomResource");
from("direct:updateCustomResource").toF(
"kubernetes-custom-resources:///?kubernetesClient=#kubernetesClient&operation=updateCustomResource");
}
};
}
}
| KubernetesCustomResourcesProducerTest |
java | apache__kafka | clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/BasicOAuthBearerTokenTest.java | {
"start": 1340,
"end": 3383
} | class ____ {
@Test
public void basic() {
OAuthBearerToken token = new BasicOAuthBearerToken("not.valid.token",
Collections.emptySet(),
0L,
"jdoe",
0L);
assertEquals("not.valid.token", token.value());
assertTrue(token.scope().isEmpty());
assertEquals(0L, token.lifetimeMs());
assertEquals("jdoe", token.principalName());
assertEquals(0L, token.startTimeMs());
}
@Test
public void negativeLifetime() {
OAuthBearerToken token = new BasicOAuthBearerToken("not.valid.token",
Collections.emptySet(),
-1L,
"jdoe",
0L);
assertEquals("not.valid.token", token.value());
assertTrue(token.scope().isEmpty());
assertEquals(-1L, token.lifetimeMs());
assertEquals("jdoe", token.principalName());
assertEquals(0L, token.startTimeMs());
}
@Test
public void noErrorIfModifyScope() {
// Start with a basic set created by the caller.
SortedSet<String> callerSet = new TreeSet<>(Arrays.asList("a", "b", "c"));
OAuthBearerToken token = new BasicOAuthBearerToken("not.valid.token",
callerSet,
0L,
"jdoe",
0L);
// Make sure it all looks good
assertNotNull(token.scope());
assertEquals(3, token.scope().size());
// Add a value to the caller's set and note that it changes the token's scope set.
// Make sure to make it read-only when it's passed in.
callerSet.add("d");
assertTrue(token.scope().contains("d"));
// Similarly, removing a value from the caller's will affect the token's scope set.
// Make sure to make it read-only when it's passed in.
callerSet.remove("c");
assertFalse(token.scope().contains("c"));
// Ensure that attempting to change the token's scope set directly will not throw any error.
token.scope().clear();
}
}
| BasicOAuthBearerTokenTest |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/builders/HttpConfigurationTests.java | {
"start": 2781,
"end": 4484
} | class ____ {
public final SpringTestContext spring = new SpringTestContext(this);
@Autowired
private MockMvc mockMvc;
@Test
public void configureWhenAddFilterUnregisteredThenThrowsBeanCreationException() {
assertThatExceptionOfType(BeanCreationException.class)
.isThrownBy(() -> this.spring.register(UnregisteredFilterConfig.class).autowire())
.withMessageContaining("The Filter class " + UnregisteredFilter.class.getName()
+ " does not have a registered order and cannot be added without a specified order."
+ " Consider using addFilterBefore or addFilterAfter instead.");
}
// https://github.com/spring-projects/spring-security-javaconfig/issues/104
@Test
public void configureWhenAddFilterCasAuthenticationFilterThenFilterAdded() throws Exception {
CasAuthenticationFilterConfig.CAS_AUTHENTICATION_FILTER = spy(new CasAuthenticationFilter());
this.spring.register(CasAuthenticationFilterConfig.class).autowire();
this.mockMvc.perform(get("/"));
verify(CasAuthenticationFilterConfig.CAS_AUTHENTICATION_FILTER).doFilter(any(ServletRequest.class),
any(ServletResponse.class), any(FilterChain.class));
}
@Test
public void configureWhenConfigIsRequestMatchersJavadocThenAuthorizationApplied() throws Exception {
this.spring.register(RequestMatcherRegistryConfigs.class).autowire();
this.mockMvc.perform(get("/oauth/a")).andExpect(status().isUnauthorized());
this.mockMvc.perform(get("/oauth/b")).andExpect(status().isUnauthorized());
this.mockMvc.perform(get("/api/a")).andExpect(status().isUnauthorized());
this.mockMvc.perform(get("/api/b")).andExpect(status().isUnauthorized());
}
@Configuration
@EnableWebSecurity
static | HttpConfigurationTests |
java | alibaba__nacos | api/src/main/java/com/alibaba/nacos/api/ai/model/mcp/registry/Icon.java | {
"start": 4179,
"end": 5155
} | enum ____ {
/**
* Light theme.
*/
LIGHT("light"),
/**
* Dark theme.
*/
DARK("dark");
private final String value;
/**
* Constructor.
*
* @param value value
*/
Theme(String value) {
this.value = value;
}
/**
* Get value.
*
* @return value
*/
@JsonValue
public String getValue() {
return value;
}
/**
* Create from value.
*
* @param value value
* @return Theme
*/
@JsonCreator
public static Theme fromValue(String value) {
for (Theme t : Theme.values()) {
if (t.value.equalsIgnoreCase(value)) {
return t;
}
}
throw new IllegalArgumentException("Unknown theme: " + value);
}
}
} | Theme |
java | apache__spark | sql/catalyst/src/main/java/org/apache/spark/sql/connector/read/Scan.java | {
"start": 2050,
"end": 2625
} | interface ____ {
/**
* Returns the actual schema of this data source scan, which may be different from the physical
* schema of the underlying storage, as column pruning or other optimizations may happen.
*/
StructType readSchema();
/**
* A description string of this scan, which may includes information like: what filters are
* configured for this scan, what's the value of some important options like path, etc. The
* description doesn't need to include {@link #readSchema()}, as Spark already knows it.
* <p>
* By default this returns the | Scan |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/mapper/blockloader/GeoPointFieldBlockLoaderTests.java | {
"start": 956,
"end": 8480
} | class ____ extends BlockLoaderTestCase {
public GeoPointFieldBlockLoaderTests(BlockLoaderTestCase.Params params) {
super("geo_point", params);
}
@Override
@SuppressWarnings("unchecked")
protected Object expected(Map<String, Object> fieldMapping, Object values, TestContext testContext) {
var nullValue = switch (fieldMapping.get("null_value")) {
case String s -> convert(s, null, false);
case null -> null;
default -> throw new IllegalStateException("Unexpected null_value format");
};
// read from doc_values
boolean preferToLoadFromDocValues = params.preference() == MappedFieldType.FieldExtractPreference.DOC_VALUES;
boolean noPreference = params.preference() == MappedFieldType.FieldExtractPreference.NONE;
if (hasDocValues(fieldMapping, true)) {
if (preferToLoadFromDocValues) {
return longValues(values, nullValue, testContext.isMultifield());
} else if (noPreference && params.syntheticSource()) {
return bytesRefWkbValues(values, nullValue, testContext.isMultifield());
}
}
// stored source is used
if (params.syntheticSource() == false) {
return exactValuesFromSource(values, nullValue, false);
}
// Usually implementation of block loader from source adjusts values read from source
// so that they look the same as doc_values would (like reducing precision).
// geo_point does not do that and because of that we need to handle all these cases below.
// If we are reading from stored source or fallback synthetic source we get the same exact data as source.
// But if we are using "normal" synthetic source we get lesser precision data from doc_values.
// That is unless "synthetic_source_keep" forces fallback synthetic source again.
if (testContext.forceFallbackSyntheticSource()) {
return exactValuesFromSource(values, nullValue, false);
}
String syntheticSourceKeep = (String) fieldMapping.getOrDefault("synthetic_source_keep", "none");
if (syntheticSourceKeep.equals("all")) {
return exactValuesFromSource(values, nullValue, false);
}
// synthetic source and doc_values are present
if (hasDocValues(fieldMapping, true)) {
return bytesRefWkbValues(values, nullValue, false);
}
// synthetic source is enabled, but no doc_values are present, so fallback to ignored source
return exactValuesFromSource(values, nullValue, false);
}
/**
* Use when values are stored as points encoded as longs.
*/
@SuppressWarnings("unchecked")
private Object longValues(Object values, GeoPoint nullValue, boolean needsMultifieldAdjustment) {
if (values instanceof List<?> == false) {
var point = convert(values, nullValue, needsMultifieldAdjustment);
return point != null ? point.getEncoded() : null;
}
var resultList = ((List<Object>) values).stream()
.map(v -> convert(v, nullValue, needsMultifieldAdjustment))
.filter(Objects::nonNull)
.map(GeoPoint::getEncoded)
.sorted()
.toList();
return maybeFoldList(resultList);
}
/**
* Use when values are stored as WKB encoded points.
*/
@SuppressWarnings("unchecked")
private Object bytesRefWkbValues(Object values, GeoPoint nullValue, boolean needsMultifieldAdjustment) {
if (values instanceof List<?> == false) {
return toWKB(normalize(convert(values, nullValue, needsMultifieldAdjustment)));
}
var resultList = ((List<Object>) values).stream()
.map(v -> convert(v, nullValue, needsMultifieldAdjustment))
.filter(Objects::nonNull)
.sorted(Comparator.comparingLong(GeoPoint::getEncoded))
.map(p -> toWKB(normalize(p)))
.toList();
return maybeFoldList(resultList);
}
@SuppressWarnings("unchecked")
private Object exactValuesFromSource(Object value, GeoPoint nullValue, boolean needsMultifieldAdjustment) {
if (value instanceof List<?> == false) {
return toWKB(convert(value, nullValue, needsMultifieldAdjustment));
}
var resultList = ((List<Object>) value).stream()
.map(v -> convert(v, nullValue, needsMultifieldAdjustment))
.filter(Objects::nonNull)
.map(this::toWKB)
.toList();
return maybeFoldList(resultList);
}
@SuppressWarnings("unchecked")
private GeoPoint convert(Object value, GeoPoint nullValue, boolean needsMultifieldAdjustment) {
if (value == null) {
if (nullValue == null) {
return null;
}
return possiblyAdjustMultifieldValue(nullValue, needsMultifieldAdjustment);
}
if (value instanceof String s) {
try {
return possiblyAdjustMultifieldValue(new GeoPoint(s), needsMultifieldAdjustment);
} catch (Exception e) {
return null;
}
}
if (value instanceof Map<?, ?> m) {
if (m.get("type") != null) {
var coordinates = (List<Double>) m.get("coordinates");
// Order is GeoJSON is lon,lat
return possiblyAdjustMultifieldValue(new GeoPoint(coordinates.get(1), coordinates.get(0)), needsMultifieldAdjustment);
} else {
return possiblyAdjustMultifieldValue(new GeoPoint((Double) m.get("lat"), (Double) m.get("lon")), needsMultifieldAdjustment);
}
}
// Malformed values are excluded
return null;
}
private GeoPoint possiblyAdjustMultifieldValue(GeoPoint point, boolean isMultifield) {
// geo_point multifields are parsed from a geohash representation of the original point (GeoPointFieldMapper#index)
// and it's not exact.
// So if this is a multifield we need another adjustment here.
// Note that this does not apply to block loader from source because in this case we parse raw original values.
// Same thing happens with synthetic source since it is generated from the parent field data that didn't go through multi field
// parsing logic.
if (isMultifield) {
return point.resetFromString(point.geohash());
}
return point;
}
/**
* Normalizes the given point by forcing it to be encoded and then decoded, similarly to how actual block loaders work when they read
* values. During encoding/decoding, some precision may be lost, so the lat/lon coordinates may change. Without this, the point returned
* by the block loader will be ever so slightly different from the original point. This will cause the tests to fail. This method
* exists to essentially mimic what happens to the point when it gets stored and then later loaded back.
*/
private GeoPoint normalize(GeoPoint point) {
if (point == null) {
return null;
}
return point.resetFromEncoded(point.getEncoded());
}
private BytesRef toWKB(GeoPoint point) {
if (point == null) {
return null;
}
return new BytesRef(WellKnownBinary.toWKB(new Point(point.getX(), point.getY()), ByteOrder.LITTLE_ENDIAN));
}
}
| GeoPointFieldBlockLoaderTests |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/assumptions/BDDAssumptionsTest.java | {
"start": 16663,
"end": 17101
} | class ____ {
private final Yoda[] actual = { new Yoda(), new Yoda() };
@Test
void should_run_test_when_assumption_passes() {
thenCode(() -> given(actual).isNotEmpty()).doesNotThrowAnyException();
}
@Test
void should_ignore_test_when_assumption_fails() {
expectAssumptionNotMetException(() -> given(actual).isNullOrEmpty());
}
}
@Nested
| BDDAssumptions_given_array_T_Test |
java | netty__netty | microbench/src/main/java/io/netty/microbench/stomp/StompEncoderBenchmark.java | {
"start": 2017,
"end": 3987
} | class ____ extends AbstractMicrobenchmark {
private StompSubframeEncoder stompEncoder;
private ByteBuf content;
private StompFrame stompFrame;
private ChannelHandlerContext context;
@Param({ "true", "false" })
public boolean pooledAllocator;
@Param({ "true", "false" })
public boolean voidPromise;
@Param
public ExampleStompHeadersSubframe.HeadersType headersType;
@Param({ "0", "100", "1000" })
public int contentLength;
@Setup(Level.Trial)
public void setup() {
byte[] bytes = new byte[contentLength];
ThreadLocalRandom.current().nextBytes(bytes);
content = Unpooled.wrappedBuffer(bytes);
ByteBuf testContent = Unpooled.unreleasableBuffer(content.asReadOnly());
StompHeadersSubframe headersSubframe = ExampleStompHeadersSubframe.EXAMPLES.get(headersType);
stompFrame = new DefaultStompFrame(headersSubframe.command(), testContent);
stompFrame.headers().setAll(headersSubframe.headers());
stompEncoder = new StompSubframeEncoder();
context = new EmbeddedChannelWriteReleaseHandlerContext(
pooledAllocator? PooledByteBufAllocator.DEFAULT : UnpooledByteBufAllocator.DEFAULT, stompEncoder) {
@Override
protected void handleException(Throwable t) {
handleUnexpectedException(t);
}
};
}
@TearDown(Level.Trial)
public void teardown() {
content.release();
content = null;
}
@Benchmark
public void writeStompFrame() throws Exception {
stompEncoder.write(context, stompFrame.retain(), newPromise());
}
private ChannelPromise newPromise() {
return voidPromise? context.voidPromise() : context.newPromise();
}
@Override
protected ChainedOptionsBuilder newOptionsBuilder() throws Exception {
return super.newOptionsBuilder().addProfiler(GCProfiler.class);
}
}
| StompEncoderBenchmark |
java | elastic__elasticsearch | libs/plugin-scanner/src/main/java/org/elasticsearch/plugin/scanner/NamedComponentScanner.java | {
"start": 1045,
"end": 2129
} | class ____ {
// main method to be used by gradle build plugin
public static void main(String[] args) throws IOException {
List<ClassReader> classReaders = ClassReaders.ofClassPath();
Map<String, Map<String, String>> namedComponentsMap = scanForNamedClasses(classReaders);
Path outputFile = Path.of(args[0]);
NamedComponentScanner.writeToFile(namedComponentsMap, outputFile);
}
// scope for testing
public static void writeToFile(Map<String, Map<String, String>> namedComponentsMap, Path outputFile) throws IOException {
Files.createDirectories(outputFile.getParent());
try (OutputStream outputStream = Files.newOutputStream(outputFile)) {
try (XContentBuilder namedComponents = XContentFactory.jsonBuilder(outputStream)) {
namedComponents.startObject();
for (Map.Entry<String, Map<String, String>> extensibleToComponents : namedComponentsMap.entrySet()) {
namedComponents.startObject(extensibleToComponents.getKey());// extensible | NamedComponentScanner |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/commons/util/ReflectionUtilsTests.java | {
"start": 81097,
"end": 81357
} | class ____ extends SuperclassWithFieldAndFieldFromInterface
implements InterfaceWithField {
static final String subPath = "sub";
}
}
// -------------------------------------------------------------------------
| SubclassWithFieldAndFieldFromInterface |
java | google__guava | android/guava/src/com/google/common/collect/MapMakerInternalMap.java | {
"start": 23901,
"end": 24519
} | class ____<K, V, E extends InternalEntry<K, V, E>>
extends WeakReference<K> implements InternalEntry<K, V, E> {
final int hash;
AbstractWeakKeyEntry(ReferenceQueue<K> queue, K key, int hash) {
super(key, queue);
this.hash = hash;
}
@Override
public final K getKey() {
return get();
}
@Override
public final int getHash() {
return hash;
}
@Override
public @Nullable E getNext() {
return null;
}
}
/** Concrete implementation of {@link InternalEntry} for weak keys and {@link Dummy} values. */
private static | AbstractWeakKeyEntry |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/parse/HelpOperationParseStrategy.java | {
"start": 1072,
"end": 1551
} | class ____ extends AbstractRegexParseStrategy {
static final HelpOperationParseStrategy INSTANCE = new HelpOperationParseStrategy();
private HelpOperationParseStrategy() {
super(Pattern.compile("HELP\\s*;?", DEFAULT_PATTERN_FLAGS));
}
@Override
public Operation convert(String statement) {
return new HelpOperation();
}
@Override
public String[] getHints() {
return new String[] {"HELP"};
}
}
| HelpOperationParseStrategy |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/EnrichNoCacheTest.java | {
"start": 1209,
"end": 3261
} | class ____ extends ContextTestSupport {
@Test
public void testNoCache() throws Exception {
assertEquals(1, context.getEndpointRegistry().size());
sendBody("foo", "mock:x");
sendBody("foo", "mock:y");
sendBody("foo", "mock:z");
sendBody("bar", "mock:x");
sendBody("bar", "mock:y");
sendBody("bar", "mock:z");
// make sure its using an empty producer cache as the cache is disabled
List<Processor> list = getProcessors("foo");
Enricher ep = (Enricher) list.get(0);
assertNotNull(ep);
assertEquals(-1, ep.getCacheSize());
// check no additional endpoints added as cache was disabled
assertEquals(1, context.getEndpointRegistry().size());
// now send again with mocks which then add endpoints
MockEndpoint x = getMockEndpoint("mock:x");
MockEndpoint y = getMockEndpoint("mock:y");
MockEndpoint z = getMockEndpoint("mock:z");
x.expectedBodiesReceived("foo", "bar");
y.expectedBodiesReceived("foo", "bar");
z.expectedBodiesReceived("foo", "bar");
assertEquals(4, context.getEndpointRegistry().size());
sendBody("foo", "mock:x");
sendBody("foo", "mock:y");
sendBody("foo", "mock:z");
sendBody("bar", "mock:x");
sendBody("bar", "mock:y");
sendBody("bar", "mock:z");
// should not register as new endpoint so we keep at 4
sendBody("dummy", "mock:dummy");
assertMockEndpointsSatisfied();
assertEquals(4, context.getEndpointRegistry().size());
}
protected void sendBody(String body, String uri) {
template.sendBodyAndHeader("direct:a", body, "myHeader", uri);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("direct:a")
.enrich().header("myHeader").cacheSize(-1).end().id("foo");
}
};
}
}
| EnrichNoCacheTest |
java | apache__spark | sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/PlainSaslHelper.java | {
"start": 3255,
"end": 4540
} | class ____ implements CallbackHandler {
private final AuthMethods authMethod;
PlainServerCallbackHandler(String authMethodStr) throws AuthenticationException {
authMethod = AuthMethods.getValidAuthMethod(authMethodStr);
}
@Override
public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
String username = null;
String password = null;
AuthorizeCallback ac = null;
for (Callback callback : callbacks) {
if (callback instanceof NameCallback) {
NameCallback nc = (NameCallback) callback;
username = nc.getName();
} else if (callback instanceof PasswordCallback) {
PasswordCallback pc = (PasswordCallback) callback;
password = new String(pc.getPassword());
} else if (callback instanceof AuthorizeCallback) {
ac = (AuthorizeCallback) callback;
} else {
throw new UnsupportedCallbackException(callback);
}
}
PasswdAuthenticationProvider provider =
AuthenticationProviderFactory.getAuthenticationProvider(authMethod);
provider.Authenticate(username, password);
if (ac != null) {
ac.setAuthorized(true);
}
}
}
public static | PlainServerCallbackHandler |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/sagemaker/model/SageMakerConfiguration.java | {
"start": 724,
"end": 1425
} | class ____ implements CheckedSupplier<Map<String, SettingsConfiguration>, RuntimeException> {
private final SageMakerSchemas schemas;
public SageMakerConfiguration(SageMakerSchemas schemas) {
this.schemas = schemas;
}
@Override
public Map<String, SettingsConfiguration> get() {
return Stream.of(
AwsSecretSettings.configuration(schemas.supportedTaskTypes()),
SageMakerServiceSettings.configuration(schemas.supportedTaskTypes()),
SageMakerTaskSettings.configuration(schemas.supportedTaskTypes())
).flatMap(Function.identity()).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
}
| SageMakerConfiguration |
java | elastic__elasticsearch | build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/packer/CacheCacheableTestFixtures.java | {
"start": 2043,
"end": 2167
} | interface ____ extends WorkParameters {
ConfigurableFileCollection getClasspath();
}
abstract static | Parameters |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/core/masterreplica/MasterReplicaSentinelSslIntegrationTests.java | {
"start": 1110,
"end": 2714
} | class ____ extends TestSupport {
private final ClientResources clientResources;
private static File truststoreFile;
private static Map<Integer, Integer> portMap = new HashMap<>();
static {
portMap.put(26379, 26822);
portMap.put(6482, 8443);
portMap.put(6483, 8444);
}
@Inject
MasterReplicaSentinelSslIntegrationTests(ClientResources clientResources) {
this.clientResources = clientResources.mutate()
.socketAddressResolver(MappingSocketAddressResolver.create(DnsResolvers.UNRESOLVED, hostAndPort -> {
int port = hostAndPort.getPort();
if (portMap.containsKey(port)) {
return HostAndPort.of(hostAndPort.getHostText(), portMap.get(port));
}
return hostAndPort;
})).build();
}
@Test
void testMasterReplicaSentinelBasic() {
RedisClient client = RedisClient.create(clientResources);
RedisURI redisURI = RedisURI.create("rediss-sentinel://" + TestSettings.host() + ":26379?sentinelMasterId=mymaster");
redisURI.setVerifyPeer(false);
StatefulRedisMasterReplicaConnection<String, String> connection = MasterReplica.connect(client, StringCodec.UTF8,
redisURI);
connection.setReadFrom(ReadFrom.REPLICA);
connection.sync().set(key, value);
connection.sync().set(key, value);
connection.sync().get(key);
connection.close();
FastShutdown.shutdown(client);
}
}
| MasterReplicaSentinelSslIntegrationTests |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/main/java/org/springframework/data/jpa/repository/query/JpqlQueryBuilder.java | {
"start": 22858,
"end": 23217
} | class ____ extends RenderContext {
ConstructorContext(RenderContext rootContext) {
super(rootContext.aliases);
}
@Override
public boolean isConstructorContext() {
return true;
}
}
/**
* An origin that is used to select data from. selection origins are used with paths to define where a path is
* anchored.
*/
public | ConstructorContext |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/method/configuration/PrePostReactiveMethodSecurityConfigurationTests.java | {
"start": 23506,
"end": 24502
} | class ____ {
@RequireRole(role = "#role")
Mono<Boolean> hasRole(String role) {
return Mono.just(true);
}
@RequireRole(role = "'USER'")
Mono<Boolean> hasUserRole() {
return Mono.just(true);
}
@PreAuthorize("hasRole({role})")
Mono<Void> placeholdersOnlyResolvedByMetaAnnotations() {
return Mono.empty();
}
@HasClaim(claim = "message:read", roles = { "'ADMIN'" })
Mono<String> readMessage() {
return Mono.just("message");
}
@ResultStartsWith("dave")
Mono<String> startsWithDave(String value) {
return Mono.just(value);
}
@ParameterContains("dave")
Flux<String> parametersContainDave(Flux<String> list) {
return list;
}
@ResultContains("dave")
Flux<String> resultsContainDave(Flux<String> list) {
return list;
}
@RestrictedAccess(entityClass = EntityClass.class)
Mono<String> getIdPath(String id) {
return Mono.just(id);
}
}
@Retention(RetentionPolicy.RUNTIME)
@PreAuthorize("hasRole({idPath})")
@ | MetaAnnotationService |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCategoriesAction.java | {
"start": 1120,
"end": 3199
} | class ____ extends HandledTransportAction<GetCategoriesAction.Request, GetCategoriesAction.Response> {
private final JobResultsProvider jobResultsProvider;
private final Client client;
private final JobManager jobManager;
private final ClusterService clusterService;
@Inject
public TransportGetCategoriesAction(
TransportService transportService,
ActionFilters actionFilters,
JobResultsProvider jobResultsProvider,
Client client,
JobManager jobManager,
ClusterService clusterService
) {
super(
GetCategoriesAction.NAME,
transportService,
actionFilters,
GetCategoriesAction.Request::new,
EsExecutors.DIRECT_EXECUTOR_SERVICE
);
this.jobResultsProvider = jobResultsProvider;
this.client = client;
this.jobManager = jobManager;
this.clusterService = clusterService;
}
@Override
protected void doExecute(Task task, GetCategoriesAction.Request request, ActionListener<GetCategoriesAction.Response> listener) {
TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId());
jobManager.jobExists(request.getJobId(), parentTaskId, ActionListener.wrap(jobExists -> {
Integer from = request.getPageParams() != null ? request.getPageParams().getFrom() : null;
Integer size = request.getPageParams() != null ? request.getPageParams().getSize() : null;
jobResultsProvider.categoryDefinitions(
request.getJobId(),
request.getCategoryId(),
request.getPartitionFieldValue(),
true,
from,
size,
r -> listener.onResponse(new GetCategoriesAction.Response(r)),
listener::onFailure,
(CancellableTask) task,
parentTaskId,
new ParentTaskAssigningClient(client, parentTaskId)
);
}, listener::onFailure));
}
}
| TransportGetCategoriesAction |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/validation/BoundInterceptorFinalTest.java | {
"start": 414,
"end": 876
} | class ____ {
@RegisterExtension
public ArcTestContainer container = ArcTestContainer.builder()
.beanClasses(Unproxyable.class, Simple.class, SimpleInterceptor.class).shouldFail().build();
@Test
public void testFailure() {
Throwable error = container.getFailure();
assertNotNull(error);
assertTrue(error instanceof DeploymentException);
}
@Dependent
@Simple
static final | BoundInterceptorFinalTest |
java | apache__dubbo | dubbo-config/dubbo-config-spring/src/main/java/org/apache/dubbo/config/spring/ReferenceBean.java | {
"start": 5284,
"end": 9075
} | interface ____ name
*/
// 'interfaceName' field for compatible with seata-1.4.0:
// io.seata.rm.tcc.remoting.parser.DubboRemotingParser#getServiceDesc()
private String interfaceName;
// proxy style
private String proxy;
// from annotation attributes
private Map<String, Object> referenceProps;
// from xml bean definition
private MutablePropertyValues propertyValues;
// actual reference config
private volatile ReferenceConfig referenceConfig;
// ReferenceBeanManager
private ReferenceBeanManager referenceBeanManager;
// Registration sources of this reference, may be xml file or annotation location
private List<Map<String, Object>> sources = new ArrayList<>();
public ReferenceBean() {
super();
}
public ReferenceBean(Map<String, Object> referenceProps) {
this.referenceProps = referenceProps;
}
@Override
public void setApplicationContext(ApplicationContext applicationContext) {
this.applicationContext = applicationContext;
}
@Override
public void setBeanClassLoader(ClassLoader classLoader) {
this.beanClassLoader = classLoader;
}
@Override
public void setBeanName(String name) {
this.setId(name);
}
/**
* Create bean instance.
*
* <p></p>
* Why we need a lazy proxy?
* <p>
* <p/>
* When Spring searches beans by type, if Spring cannot determine the type of a factory bean, it may try to initialize it.
* The ReferenceBean is also a FactoryBean.
* <br/>
* (This has already been resolved by decorating the BeanDefinition: {@link DubboBeanDefinitionParser#configReferenceBean})
* <p>
* <p/>
* In addition, if some ReferenceBeans are dependent on beans that are initialized very early,
* and dubbo config beans are not ready yet, there will be many unexpected problems if initializing the dubbo reference immediately.
* <p>
* <p/>
* When it is initialized, only a lazy proxy object will be created,
* and dubbo reference-related resources will not be initialized.
* <br/>
* In this way, the influence of Spring is eliminated, and the dubbo configuration initialization is controllable.
*
* @see DubboConfigBeanInitializer
* @see ReferenceBeanManager#initReferenceBean(ReferenceBean)
* @see DubboBeanDefinitionParser#configReferenceBean
*/
@Override
public T getObject() {
if (lazyProxy == null) {
createLazyProxy();
}
return (T) lazyProxy;
}
@Override
public Class<?> getObjectType() {
return getInterfaceClass();
}
@Override
@Parameter(excluded = true)
public boolean isSingleton() {
return true;
}
@Override
public void afterPropertiesSet() throws Exception {
ConfigurableListableBeanFactory beanFactory = getBeanFactory();
// pre init xml reference bean or @DubboReference annotation
Assert.notEmptyString(getId(), "The id of ReferenceBean cannot be empty");
BeanDefinition beanDefinition = beanFactory.getBeanDefinition(getId());
if (AotWithSpringDetector.useGeneratedArtifacts()) {
this.interfaceClass =
(Class<?>) beanDefinition.getPropertyValues().get(ReferenceAttributes.INTERFACE_CLASS);
this.interfaceName = (String) beanDefinition.getPropertyValues().get(ReferenceAttributes.INTERFACE_NAME);
} else {
this.interfaceClass = (Class<?>) beanDefinition.getAttribute(ReferenceAttributes.INTERFACE_CLASS);
this.interfaceName = (String) beanDefinition.getAttribute(ReferenceAttributes.INTERFACE_NAME);
}
Assert.notNull(this.interfaceClass, "The | class |
java | netty__netty | example/src/main/java/io/netty/example/mqtt/heartBeat/MqttHeartBeatBroker.java | {
"start": 1282,
"end": 2506
} | class ____ {
private MqttHeartBeatBroker() {
}
public static void main(String[] args) throws Exception {
EventLoopGroup group = new MultiThreadIoEventLoopGroup(NioIoHandler.newFactory());
try {
ServerBootstrap b = new ServerBootstrap();
b.group(group);
b.option(ChannelOption.SO_BACKLOG, 1024);
b.channel(NioServerSocketChannel.class);
b.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) throws Exception {
ch.pipeline().addLast("encoder", MqttEncoder.INSTANCE);
ch.pipeline().addLast("decoder", new MqttDecoder());
ch.pipeline().addLast("heartBeatHandler", new IdleStateHandler(45, 0, 0, TimeUnit.SECONDS));
ch.pipeline().addLast("handler", MqttHeartBeatBrokerHandler.INSTANCE);
}
});
ChannelFuture f = b.bind(1883).sync();
System.out.println("Broker initiated...");
f.channel().closeFuture().sync();
} finally {
group.shutdownGracefully();
}
}
}
| MqttHeartBeatBroker |
java | alibaba__fastjson | src/main/java/com/alibaba/fastjson/asm/ClassWriter.java | {
"start": 7018,
"end": 7075
} | class ____.
*
* @return the bytecode of the | writer |
java | junit-team__junit5 | junit-platform-engine/src/main/java/org/junit/platform/engine/support/descriptor/ClassSource.java | {
"start": 2372,
"end": 3927
} | class ____; must not be {@code null} or blank
* @param filePosition the position in the source file; may be {@code null}
*/
public static ClassSource from(String className, @Nullable FilePosition filePosition) {
return new ClassSource(className, filePosition);
}
/**
* Create a new {@code ClassSource} using the supplied {@linkplain Class class}.
*
* @param javaClass the Java class; must not be {@code null}
*/
public static ClassSource from(Class<?> javaClass) {
return new ClassSource(javaClass);
}
/**
* Create a new {@code ClassSource} using the supplied {@linkplain Class class}
* and {@linkplain FilePosition file position}.
*
* @param javaClass the Java class; must not be {@code null}
* @param filePosition the position in the Java source file; may be {@code null}
*/
public static ClassSource from(Class<?> javaClass, FilePosition filePosition) {
return new ClassSource(javaClass, filePosition);
}
/**
* Create a new {@code ClassSource} from the supplied {@link URI}.
*
* <p>URIs should be formatted as {@code class:fully.qualified.class.Name}.
* The {@linkplain URI#getQuery() query} component of the {@code URI}, if
* present, will be used to retrieve the {@link FilePosition} via
* {@link FilePosition#fromQuery(String)}. For example, line 42 and column
* 13 can be referenced in class {@code org.example.MyType} via the following
* URI: {@code class:com.example.MyType?line=42&column=13}. The URI fragment,
* if present, will be ignored.
*
* @param uri the {@code URI} for the | name |
java | apache__kafka | connect/runtime/src/test/java/org/apache/kafka/connect/cli/AbstractConnectCliTest.java | {
"start": 5466,
"end": 6767
} | class ____ extends AbstractConnectCli<Herder, DistributedConfig> {
TestConnectCli() {
super();
}
@Override
protected String usage() {
return "test";
}
@Override
protected Herder createHerder(DistributedConfig config, String workerId, Plugins plugins,
ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy,
RestServer restServer,
RestClient restClient) {
// Reaching createHerder means createConfig succeeded, indicating correct order was maintained
throw new ExpectedException();
}
@Override
protected DistributedConfig createConfig(Map<String, String> workerProps) {
DistributedConfig config = new DistributedConfig(workerProps);
// Mock kafkaClusterId() to avoid connecting to Kafka broker
DistributedConfig spyConfig = spy(config);
doReturn("test-cluster-id").when(spyConfig).kafkaClusterId();
return spyConfig;
}
}
/**
* ClassLoader that cannot load a specific class, simulating plugin classes only in plugin.path.
*/
private static | TestConnectCli |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/hql/Animal.java | {
"start": 313,
"end": 1823
} | class ____ {
private Long id;
private float bodyWeight;
private Set offspring;
private Animal mother;
private Animal father;
private String description;
private Zoo zoo;
private String serialNumber;
public Animal() {
}
public Animal(String description, float bodyWeight) {
this.description = description;
this.bodyWeight = bodyWeight;
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public float getBodyWeight() {
return bodyWeight;
}
public void setBodyWeight(float bodyWeight) {
this.bodyWeight = bodyWeight;
}
public Set getOffspring() {
return offspring;
}
public void addOffspring(Animal offspring) {
if ( this.offspring == null ) {
this.offspring = new HashSet();
}
this.offspring.add( offspring );
}
public void setOffspring(Set offspring) {
this.offspring = offspring;
}
public Animal getMother() {
return mother;
}
public void setMother(Animal mother) {
this.mother = mother;
}
public Animal getFather() {
return father;
}
public void setFather(Animal father) {
this.father = father;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Zoo getZoo() {
return zoo;
}
public void setZoo(Zoo zoo) {
this.zoo = zoo;
}
public String getSerialNumber() {
return serialNumber;
}
public void setSerialNumber(String serialNumber) {
this.serialNumber = serialNumber;
}
}
| Animal |
java | apache__flink | flink-core/src/test/java/org/apache/flink/util/ExceptionUtilsTest.java | {
"start": 1064,
"end": 6038
} | class ____ {
@Test
void testStringifyNullException() {
assertThat(ExceptionUtils.STRINGIFIED_NULL_EXCEPTION)
.isEqualTo(ExceptionUtils.stringifyException(null));
}
@Test
void testJvmFatalError() {
// not all errors are fatal
assertThat(ExceptionUtils.isJvmFatalError(new Error())).isFalse();
// linkage errors are not fatal
assertThat(ExceptionUtils.isJvmFatalError(new LinkageError())).isFalse();
// some errors are fatal
assertThat(ExceptionUtils.isJvmFatalError(new InternalError())).isTrue();
assertThat(ExceptionUtils.isJvmFatalError(new UnknownError())).isTrue();
}
@Test
void testRethrowFatalError() {
// fatal error is rethrown
assertThatThrownBy(() -> ExceptionUtils.rethrowIfFatalError(new InternalError()))
.isInstanceOf(InternalError.class);
// non-fatal error is not rethrown
ExceptionUtils.rethrowIfFatalError(new NoClassDefFoundError());
}
@Test
void testFindThrowableByType() {
assertThat(
ExceptionUtils.findThrowable(
new RuntimeException(new IllegalStateException()),
IllegalStateException.class))
.isPresent();
}
@Test
void testExceptionStripping() {
final FlinkException expectedException = new FlinkException("test exception");
final Throwable strippedException =
ExceptionUtils.stripException(
new RuntimeException(new RuntimeException(expectedException)),
RuntimeException.class);
assertThat(strippedException).isEqualTo(expectedException);
}
@Test
void testInvalidExceptionStripping() {
final FlinkException expectedException =
new FlinkException(new RuntimeException(new FlinkException("inner exception")));
final Throwable strippedException =
ExceptionUtils.stripException(expectedException, RuntimeException.class);
assertThat(strippedException).isEqualTo(expectedException);
}
@Test
void testTryEnrichTaskExecutorErrorCanHandleNullValueWithoutCausingException() {
ExceptionUtils.tryEnrichOutOfMemoryError(null, "", "", "");
}
@Test
void testUpdateDetailMessageOfBasicThrowable() {
Throwable rootThrowable = new OutOfMemoryError("old message");
ExceptionUtils.updateDetailMessage(rootThrowable, t -> "new message");
assertThat(rootThrowable.getMessage()).isEqualTo("new message");
}
@Test
void testUpdateDetailMessageOfRelevantThrowableAsCause() {
Throwable oomCause =
new IllegalArgumentException("another message deep down in the cause tree");
Throwable oom = new OutOfMemoryError("old message").initCause(oomCause);
oom.setStackTrace(
new StackTraceElement[] {new StackTraceElement("class", "method", "file", 1)});
oom.addSuppressed(new NullPointerException());
Throwable rootThrowable = new IllegalStateException("another message", oom);
ExceptionUtils.updateDetailMessage(
rootThrowable,
t -> t.getClass().equals(OutOfMemoryError.class) ? "new message" : null);
assertThat(rootThrowable.getCause()).isSameAs(oom);
assertThat(rootThrowable.getCause().getMessage()).isEqualTo("new message");
assertThat(rootThrowable.getCause().getStackTrace()).isEqualTo(oom.getStackTrace());
assertThat(rootThrowable.getCause().getSuppressed()).isEqualTo(oom.getSuppressed());
assertThat(rootThrowable.getCause().getCause()).isSameAs(oomCause);
}
@Test
void testUpdateDetailMessageWithoutRelevantThrowable() {
Throwable originalThrowable =
new IllegalStateException(
"root message", new IllegalArgumentException("cause message"));
ExceptionUtils.updateDetailMessage(originalThrowable, t -> null);
assertThat(originalThrowable.getMessage()).isEqualTo("root message");
assertThat(originalThrowable.getCause().getMessage()).isEqualTo("cause message");
}
@Test
void testUpdateDetailMessageOfNullWithoutException() {
ExceptionUtils.updateDetailMessage(null, t -> "new message");
}
@Test
void testUpdateDetailMessageWithMissingPredicate() {
Throwable root = new Exception("old message");
ExceptionUtils.updateDetailMessage(root, null);
assertThat(root.getMessage()).isEqualTo("old message");
}
@Test
void testIsMetaspaceOutOfMemoryErrorCanHandleNullValue() {
assertThat(ExceptionUtils.isMetaspaceOutOfMemoryError(null)).isFalse();
}
@Test
void testIsDirectOutOfMemoryErrorCanHandleNullValue() {
assertThat(ExceptionUtils.isDirectOutOfMemoryError(null)).isFalse();
}
}
| ExceptionUtilsTest |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java | {
"start": 28663,
"end": 36710
} | class ____ extends LocalFileSystem {
private URI uriName = null;
Path home;
TestLFS() {
this(TEST_DIR);
}
TestLFS(final Path home) {
super(new RawLocalFileSystem() {
@Override
protected Path getInitialWorkingDirectory() {
return makeQualified(home);
}
@Override
public Path getHomeDirectory() {
return makeQualified(home);
}
});
this.home = home;
}
@Override
public Path getHomeDirectory() {
return home;
}
@Override
public URI getUri() {
if(uriName == null){
return super.getUri();
} else {
return uriName;
}
}
@Override
public String getScheme() {
return "testlfs";
}
public void setUri(String uri){
uriName = URI.create(uri);
}
}
/**
* test same file deletion - multiple time
* this is more of a performance test - shouldn't be run as a unit test
* @throws IOException
*/
public static void performanceTestDeleteSameFile() throws IOException{
Path base = TEST_DIR;
Configuration conf = new Configuration();
conf.setClass("fs.file.impl", TestLFS.class, FileSystem.class);
FileSystem fs = FileSystem.getLocal(conf);
conf.set("fs.defaultFS", fs.getUri().toString());
conf.setLong(FS_TRASH_INTERVAL_KEY, 10); //minutes..
FsShell shell = new FsShell();
shell.setConf(conf);
//Path trashRoot = null;
Path myPath = new Path(base, "test/mkdirs");
mkdir(fs, myPath);
// create a file in that directory.
Path myFile;
long start;
long first = 0;
int retVal = 0;
int factor = 10; // how much slower any of subsequent deletion can be
myFile = new Path(base, "test/mkdirs/myFile");
String [] args = new String[] {"-rm", myFile.toString()};
int iters = 1000;
for(int i=0;i<iters; i++) {
writeFile(fs, myFile, 10);
start = Time.now();
try {
retVal = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from Trash.run " +
e.getLocalizedMessage());
throw new IOException(e.getMessage());
}
assertTrue(retVal == 0);
long iterTime = Time.now() - start;
// take median of the first 10 runs
if(i<10) {
if(i==0) {
first = iterTime;
}
else {
first = (first + iterTime)/2;
}
}
// we don't want to print every iteration - let's do every 10th
int print_freq = iters/10;
if(i>10) {
if((i%print_freq) == 0)
System.out.println("iteration="+i+";res =" + retVal + "; start=" + start
+ "; iterTime = " + iterTime + " vs. firstTime=" + first);
long factoredTime = first*factor;
assertTrue(iterTime<factoredTime); //no more then twice of median first 10
}
}
}
public static void verifyMoveEmptyDirToTrash(FileSystem fs,
Configuration conf) throws IOException {
Path caseRoot = new Path(
GenericTestUtils.getTempPath("testUserTrash"));
Path testRoot = new Path(caseRoot, "trash-users");
Path emptyDir = new Path(testRoot, "empty-dir");
try (FileSystem fileSystem = fs){
fileSystem.mkdirs(emptyDir);
Trash trash = new Trash(fileSystem, conf);
// Make sure trash root is clean
Path trashRoot = trash.getCurrentTrashDir(emptyDir);
fileSystem.delete(trashRoot, true);
// Move to trash should be succeed
assertTrue(trash.moveToTrash(emptyDir), "Move an empty directory to trash failed");
// Verify the empty dir is removed
assertFalse(fileSystem.exists(emptyDir), "The empty directory still exists on file system");
emptyDir = fileSystem.makeQualified(emptyDir);
Path dirInTrash = Path.mergePaths(trashRoot, emptyDir);
assertTrue(fileSystem.exists(dirInTrash), "Directory wasn't moved to trash");
FileStatus[] flist = fileSystem.listStatus(dirInTrash);
assertTrue(flist!= null && flist.length == 0, "Directory is not empty");
}
}
/**
* Create a bunch of files and set with different permission, after
* moved to trash, verify the location in trash directory is expected
* and the permission is reserved.
*
* @throws IOException
*/
public static void verifyTrashPermission(FileSystem fs, Configuration conf)
throws IOException {
Path caseRoot = new Path(BASE_PATH.getPath(),
"testTrashPermission");
try (FileSystem fileSystem = fs){
Trash trash = new Trash(fileSystem, conf);
FileSystemTestWrapper wrapper =
new FileSystemTestWrapper(fileSystem);
short[] filePermssions = {
(short) 0600,
(short) 0644,
(short) 0660,
(short) 0700,
(short) 0750,
(short) 0755,
(short) 0775,
(short) 0777
};
for(int i=0; i<filePermssions.length; i++) {
// Set different permission to files
FsPermission fsPermission = new FsPermission(filePermssions[i]);
Path file = new Path(caseRoot, "file" + i);
byte[] randomBytes = new byte[new Random().nextInt(10)];
wrapper.writeFile(file, randomBytes);
wrapper.setPermission(file, fsPermission);
// Move file to trash
trash.moveToTrash(file);
// Verify the file is moved to trash, at expected location
Path trashDir = trash.getCurrentTrashDir(file);
if(!file.isAbsolute()) {
file = wrapper.makeQualified(file);
}
Path fileInTrash = Path.mergePaths(trashDir, file);
FileStatus fstat = wrapper.getFileStatus(fileInTrash);
assertTrue(wrapper.exists(fileInTrash), String.format("File %s is not moved to trash",
fileInTrash.toString()));
// Verify permission not change
assertTrue(fstat.getPermission().equals(fsPermission),
String.format("Expected file: %s is %s, but actual is %s",
fileInTrash.toString(),
fsPermission.toString(),
fstat.getPermission().toString()));
}
// Verify the trash directory can be removed
Path trashRoot = trash.getCurrentTrashDir();
assertTrue(wrapper.delete(trashRoot, true));
}
}
private void verifyDefaultPolicyIntervalValues(long trashInterval,
long checkpointInterval, long expectedInterval) throws IOException {
Configuration conf = new Configuration();
conf.setLong(FS_TRASH_INTERVAL_KEY, trashInterval);
conf.set("fs.trash.classname", TrashPolicyDefault.class.getName());
conf.setLong(FS_TRASH_CHECKPOINT_INTERVAL_KEY, checkpointInterval);
Trash trash = new Trash(conf);
Emptier emptier = (Emptier)trash.getEmptier();
assertEquals(expectedInterval, emptier.getEmptierInterval());
}
/**
* Launch the {@link Trash} emptier for given milliseconds,
* verify the number of checkpoints is expected.
*/
private void verifyAuditableTrashEmptier(Trash trash,
long timeAlive,
int expectedNumOfCheckpoints)
throws IOException {
Thread emptierThread = null;
try {
Runnable emptier = trash.getEmptier();
emptierThread = new SubjectInheritingThread(emptier);
emptierThread.start();
// Shutdown the emptier thread after a given time
Thread.sleep(timeAlive);
emptierThread.interrupt();
emptierThread.join();
AuditableTrashPolicy at = (AuditableTrashPolicy) trash.getTrashPolicy();
assertEquals(expectedNumOfCheckpoints, at.getNumberOfCheckpoints(),
String.format("Expected num of checkpoints is %s, but actual is %s",
expectedNumOfCheckpoints, at.getNumberOfCheckpoints()));
} catch (InterruptedException e) {
// Ignore
} finally {
// Avoid thread leak
if(emptierThread != null) {
emptierThread.interrupt();
}
}
}
// Test TrashPolicy. Don't care about implementation.
public static | TestLFS |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/android/WakelockReleasedDangerouslyTest.java | {
"start": 9642,
"end": 10182
} | class ____ {
void foo(WakeLock wakelock) {
wakelock.acquire(100);
try {
// BUG: Diagnostic contains: Wakelock
wakelock.release();
throw new MyOtherException();
} catch (MyOtherException e) {
}
}
}
""")
.doTest();
}
@Test
public void nestedCatch_shouldWarn() {
compilationHelper
.addSourceLines(
"MyOtherException.java", "public | TestApp |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/AbstractRequestMatcherRegistryAnyMatcherTests.java | {
"start": 1714,
"end": 3092
} | class ____ {
@Test
public void antMatchersCanNotWorkAfterAnyRequest() {
assertThatExceptionOfType(BeanCreationException.class)
.isThrownBy(() -> loadConfig(AntMatchersAfterAnyRequestConfig.class));
}
@Test
public void mvcMatchersCanNotWorkAfterAnyRequest() {
assertThatExceptionOfType(BeanCreationException.class)
.isThrownBy(() -> loadConfig(MvcMatchersAfterAnyRequestConfig.class));
}
@Test
public void regexMatchersCanNotWorkAfterAnyRequest() {
assertThatExceptionOfType(BeanCreationException.class)
.isThrownBy(() -> loadConfig(RegexMatchersAfterAnyRequestConfig.class));
}
@Test
public void anyRequestCanNotWorkAfterItself() {
assertThatExceptionOfType(BeanCreationException.class)
.isThrownBy(() -> loadConfig(AnyRequestAfterItselfConfig.class));
}
@Test
public void requestMatchersCanNotWorkAfterAnyRequest() {
assertThatExceptionOfType(BeanCreationException.class)
.isThrownBy(() -> loadConfig(RequestMatchersAfterAnyRequestConfig.class));
}
private void loadConfig(Class<?>... configs) {
AnnotationConfigWebApplicationContext context = new AnnotationConfigWebApplicationContext();
context.setAllowCircularReferences(false);
context.register(configs);
context.setServletContext(new MockServletContext());
context.refresh();
}
@Configuration
@EnableWebSecurity
static | AbstractRequestMatcherRegistryAnyMatcherTests |
java | quarkusio__quarkus | integration-tests/gradle/src/main/resources/test-fixtures-multi-module/library-1/src/main/java/org/example/MainLibrary1.java | {
"start": 29,
"end": 101
} | class ____ {
public void hereJustForDependency() {
}
} | MainLibrary1 |
java | dropwizard__dropwizard | dropwizard-validation/src/main/java/io/dropwizard/validation/ValidationMethod.java | {
"start": 743,
"end": 1285
} | interface ____ {
/**
* The validation message for this constraint.
*
* @return the message
*/
String message() default "is not valid";
/**
* The groups the constraint belongs to.
*
* @return an array of classes representing the groups
*/
Class<?>[] groups() default {};
/**
* The payloads of this constraint.
*
* @return the array of payload classes
*/
@SuppressWarnings("UnusedDeclaration") Class<? extends Payload>[] payload() default { };
}
| ValidationMethod |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.