language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | elastic__elasticsearch | build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/DockerIronBankElasticsearchDistributionType.java | {
"start": 597,
"end": 904
} | class ____ implements ElasticsearchDistributionType {
DockerIronBankElasticsearchDistributionType() {}
@Override
public String getName() {
return "dockerIronBank";
}
@Override
public boolean isDocker() {
return true;
}
}
| DockerIronBankElasticsearchDistributionType |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UnusedTypeParameterTest.java | {
"start": 1567,
"end": 1658
} | class ____<T> {}")
.addOutputLines(
"Test.java", //
"final | Test |
java | apache__flink | flink-formats/flink-avro/src/main/java/org/apache/flink/formats/avro/AvroFileFormatFactory.java | {
"start": 4662,
"end": 5841
} | class ____
implements BulkDecodingFormat<RowData>,
ProjectableDecodingFormat<BulkFormat<RowData, FileSourceSplit>> {
@Override
public BulkFormat<RowData, FileSourceSplit> createRuntimeDecoder(
DynamicTableSource.Context context,
DataType physicalDataType,
int[][] projections) {
// avro is a file format that keeps schemas in file headers,
// if the schema given to the reader is not equal to the schema in header,
// reader will automatically map the fields and give back records with our desired
// schema
//
// for detailed discussion see comments in https://github.com/apache/flink/pull/18657
DataType producedDataType = Projection.of(projections).project(physicalDataType);
return new AvroGenericRecordBulkFormat(
context, (RowType) producedDataType.getLogicalType().copy(false));
}
@Override
public ChangelogMode getChangelogMode() {
return ChangelogMode.insertOnly();
}
}
private static | AvroBulkDecodingFormat |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/DestroyMethodInferenceTests.java | {
"start": 6248,
"end": 6377
} | class ____ {
boolean closed = false;
public void explicitClose() {
closed = true;
}
}
static | WithExplicitDestroyMethod |
java | alibaba__nacos | client/src/test/java/com/alibaba/nacos/client/config/utils/ContentUtilsTest.java | {
"start": 984,
"end": 4133
} | class ____ {
@Test
void testVerifyIncrementPubContent() {
String content = "aabbb";
ContentUtils.verifyIncrementPubContent(content);
}
@Test
void testVerifyIncrementPubContentFail1() {
Throwable exception = assertThrows(IllegalArgumentException.class, () -> {
String content = null;
ContentUtils.verifyIncrementPubContent(content);
});
assertTrue(exception.getMessage().contains("publish/delete content can not be null"));
}
@Test
void testVerifyIncrementPubContentFail2() {
Throwable exception = assertThrows(IllegalArgumentException.class, () -> {
String content = "aa\rbbb";
ContentUtils.verifyIncrementPubContent(content);
});
assertTrue(exception.getMessage().contains("publish/delete content can not contain return and linefeed"));
}
@Test
void testVerifyIncrementPubContentFail3() {
Throwable exception = assertThrows(IllegalArgumentException.class, () -> {
String content = "";
ContentUtils.verifyIncrementPubContent(content);
});
assertTrue(exception.getMessage().contains("publish/delete content can not be null"));
}
@Test
void testVerifyIncrementPubContentFail4() {
Throwable exception = assertThrows(IllegalArgumentException.class, () -> {
String content = "aa" + WORD_SEPARATOR + "bbb";
ContentUtils.verifyIncrementPubContent(content);
});
assertTrue(exception.getMessage().contains("publish/delete content can not contain(char)2"));
}
@Test
void testGetContentIdentity() {
String content = "aa" + WORD_SEPARATOR + "bbb";
String content1 = ContentUtils.getContentIdentity(content);
assertEquals("aa", content1);
}
@Test
void testGetContentIdentityFail() {
assertThrows(IllegalArgumentException.class, () -> {
String content = "aabbb";
ContentUtils.getContentIdentity(content);
});
}
@Test
void testGetContent() {
String content = "aa" + WORD_SEPARATOR + "bbb";
String content1 = ContentUtils.getContent(content);
assertEquals("bbb", content1);
}
@Test
void testGetContentFail() {
assertThrows(IllegalArgumentException.class, () -> {
String content = "aabbb";
ContentUtils.getContent(content);
});
}
@Test
void testTruncateContent() {
String content = "aa";
String actual = ContentUtils.truncateContent(content);
assertEquals(content, actual);
}
@Test
void testTruncateLongContent() {
char[] arr = new char[101];
Arrays.fill(arr, 'a');
String content = new String(arr);
String actual = ContentUtils.truncateContent(content);
assertEquals(content.substring(0, 100) + "...", actual);
}
@Test
void testTruncateContentNull() {
String actual = ContentUtils.truncateContent(null);
assertEquals("", actual);
}
} | ContentUtilsTest |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/CloseablePathTests.java | {
"start": 10136,
"end": 17295
} | class ____ {
private final TempDirFactory factory = spy(TempDirFactory.Standard.INSTANCE);
@AfterEach
void cleanupTempDirectory() throws IOException {
if (closeablePath != null) {
deleteIfExists(closeablePath.get());
}
}
@DisplayName("is done for a cleanup mode of ALWAYS")
@ParameterizedTest
@ElementTypeSource
void always(Class<?> elementType, @TrackLogRecords LogRecordListener listener) throws IOException {
reset(factory);
closeablePath = TempDirectory.createTempDir(factory, ALWAYS, elementType, elementContext, extensionContext);
assertThat(closeablePath.get()).isDirectory();
closeablePath.close();
verify(factory).close();
assertThat(closeablePath.get()).doesNotExist();
assertThat(listener.stream(Level.INFO)).map(LogRecord::getMessage)//
.noneMatch(m -> m.startsWith("Skipping cleanup of temp dir"));
}
@DisplayName("is not done for a cleanup mode of NEVER")
@ParameterizedTest
@ElementTypeSource
void never(Class<?> elementType, @TrackLogRecords LogRecordListener listener) throws Exception {
reset(factory);
when(elementContext.getAnnotatedElement()).thenReturn(TestCase.class.getDeclaredField("tempDir"));
closeablePath = TempDirectory.createTempDir(factory, NEVER, elementType, elementContext, extensionContext);
assertThat(closeablePath.get()).isDirectory();
closeablePath.close();
verify(factory).close();
assertThat(closeablePath.get()).exists();
assertThat(listener.stream(Level.INFO)).map(LogRecord::getMessage)//
.anyMatch(m -> m.startsWith("Skipping cleanup of temp dir ")
&& m.endsWith(" for field TestCase.tempDir due to CleanupMode.NEVER."));
}
@DisplayName("is not done for a cleanup mode of ON_SUCCESS, if there is an exception (for annotated field)")
@ParameterizedTest
@ElementTypeSource
void onSuccessWithExceptionForAnnotatedField(Class<?> elementType, @TrackLogRecords LogRecordListener listener)
throws Exception {
Field field = TestCase.class.getDeclaredField("tempDir");
onSuccessWithException(elementType, listener, field,
" for field TestCase.tempDir due to CleanupMode.ON_SUCCESS.");
}
@DisplayName("is not done for a cleanup mode of ON_SUCCESS, if there is an exception (for annotated method parameter)")
@ParameterizedTest
@ElementTypeSource
void onSuccessWithExceptionForAnnotatedMethodParameter(Class<?> elementType,
@TrackLogRecords LogRecordListener listener) throws Exception {
Method method = TestCase.class.getDeclaredMethod("test", TestInfo.class, Path.class);
Parameter parameter = method.getParameters()[1];
onSuccessWithException(elementType, listener, parameter,
"for parameter 'tempDir' in method test(TestInfo, Path) due to CleanupMode.ON_SUCCESS.");
}
@DisplayName("is not done for a cleanup mode of ON_SUCCESS, if there is an exception (for annotated constructor parameter)")
@ParameterizedTest
@ElementTypeSource
void onSuccessWithExceptionForAnnotatedConstructorParameter(Class<?> elementType,
@TrackLogRecords LogRecordListener listener) throws Exception {
Constructor<?> constructor = TestCase.class.getDeclaredConstructor(TestInfo.class, Path.class);
Parameter parameter = constructor.getParameters()[1];
onSuccessWithException(elementType, listener, parameter,
"for parameter 'tempDir' in constructor TestCase(TestInfo, Path) due to CleanupMode.ON_SUCCESS.");
}
private void onSuccessWithException(Class<?> elementType, @TrackLogRecords LogRecordListener listener,
AnnotatedElement annotatedElement, String expectedMessage) throws Exception {
reset(factory);
when(extensionContext.getExecutionException()).thenReturn(Optional.of(new Exception()));
when(elementContext.getAnnotatedElement()).thenReturn(annotatedElement);
closeablePath = TempDirectory.createTempDir(factory, ON_SUCCESS, elementType, elementContext,
extensionContext);
assertThat(closeablePath.get()).isDirectory();
closeablePath.close();
verify(factory).close();
assertThat(closeablePath.get()).exists();
assertThat(listener.stream(Level.INFO)).map(LogRecord::getMessage)//
.anyMatch(m -> m.startsWith("Skipping cleanup of temp dir ") && m.endsWith(expectedMessage));
}
@DisplayName("is done for a cleanup mode of ON_SUCCESS, if there is no exception")
@ParameterizedTest
@ElementTypeSource
void onSuccessWithNoException(Class<?> elementType, @TrackLogRecords LogRecordListener listener)
throws IOException {
reset(factory);
when(extensionContext.getExecutionException()).thenReturn(Optional.empty());
closeablePath = TempDirectory.createTempDir(factory, ON_SUCCESS, elementType, elementContext,
extensionContext);
assertThat(closeablePath.get()).isDirectory();
closeablePath.close();
verify(factory).close();
assertThat(closeablePath.get()).doesNotExist();
assertThat(listener.stream(Level.INFO)).map(LogRecord::getMessage)//
.noneMatch(m -> m.startsWith("Skipping cleanup of temp dir"));
}
@DisplayName("deletes symbolic links targeting directory inside temp dir")
@ParameterizedTest
@ElementTypeSource
@DisabledOnOs(WINDOWS)
void deletesSymbolicLinksTargetingDirInsideTempDir(Class<?> elementType,
@TrackLogRecords LogRecordListener listener) throws IOException {
reset(factory);
closeablePath = TempDirectory.createTempDir(factory, ON_SUCCESS, elementType, elementContext,
extensionContext);
var rootDir = closeablePath.get();
assertThat(rootDir).isDirectory();
var subDir = createDirectory(rootDir.resolve("subDir"));
Files.createFile(subDir.resolve("file"));
Files.createSymbolicLink(rootDir.resolve("symbolicLink"), subDir);
closeablePath.close();
verify(factory).close();
assertThat(rootDir).doesNotExist();
assertThat(listener.stream(Level.WARNING)).map(LogRecord::getMessage).isEmpty();
}
@DisplayName("deletes symbolic links targeting directory outside temp dir")
@ParameterizedTest
@ElementTypeSource
@DisabledOnOs(WINDOWS)
void deletesSymbolicLinksTargetingDirOutsideTempDir(Class<?> elementType,
@TrackLogRecords LogRecordListener listener) throws IOException {
reset(factory);
closeablePath = TempDirectory.createTempDir(factory, ON_SUCCESS, elementType, elementContext,
extensionContext);
var rootDir = closeablePath.get();
assertThat(rootDir).isDirectory();
var directoryOutsideTempDir = createTempDirectory("junit-");
try {
var symbolicLink = createSymbolicLink(rootDir.resolve("symbolicLink"), directoryOutsideTempDir);
closeablePath.close();
verify(factory).close();
assertThat(rootDir).doesNotExist();
assertThat(directoryOutsideTempDir).isDirectory();
assertThat(listener.stream(Level.WARNING)) //
.map(LogRecord::getMessage) //
.contains(("Deleting symbolic link from location inside of temp dir (%s) "
+ "to location outside of temp dir (%s) but not the target file/directory").formatted(
symbolicLink, directoryOutsideTempDir.toRealPath()));
}
finally {
Files.deleteIfExists(directoryOutsideTempDir);
}
}
}
@NullUnmarked
static | Cleanup |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/streaming/runtime/TimestampITCase.java | {
"start": 35603,
"end": 36478
} | class ____ implements SourceFunction<Integer> {
private final long initialTime;
private final int numWatermarks;
private volatile boolean running = true;
public MyTimestampSourceInfinite(long initialTime, int numWatermarks) {
this.initialTime = initialTime;
this.numWatermarks = numWatermarks;
}
@Override
public void run(SourceContext<Integer> ctx) throws Exception {
for (int i = 0; i < numWatermarks; i++) {
ctx.collectWithTimestamp(i, initialTime + i);
ctx.emitWatermark(new Watermark(initialTime + i));
}
while (running) {
Thread.sleep(20);
}
}
@Override
public void cancel() {
running = false;
}
}
private static | MyTimestampSourceInfinite |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/net/ClientOptionsBase.java | {
"start": 6334,
"end": 11174
} | interface ____ bind for network connections. When the local address is null,
* it will pick any local address, the default local address is null.
*
* @param localAddress the local address
* @return a reference to this, so the API can be used fluently
*/
public ClientOptionsBase setLocalAddress(String localAddress) {
this.localAddress = localAddress;
return this;
}
@Override
public ClientOptionsBase setLogActivity(boolean logEnabled) {
return (ClientOptionsBase) super.setLogActivity(logEnabled);
}
@Override
public ClientOptionsBase setActivityLogDataFormat(ByteBufFormat activityLogDataFormat) {
return (ClientOptionsBase) super.setActivityLogDataFormat(activityLogDataFormat);
}
@Override
public ClientOptionsBase setTcpNoDelay(boolean tcpNoDelay) {
return (ClientOptionsBase) super.setTcpNoDelay(tcpNoDelay);
}
@Override
public ClientOptionsBase setTcpKeepAlive(boolean tcpKeepAlive) {
return (ClientOptionsBase) super.setTcpKeepAlive(tcpKeepAlive);
}
@Override
public ClientOptionsBase setSoLinger(int soLinger) {
return (ClientOptionsBase) super.setSoLinger(soLinger);
}
@Override
public ClientOptionsBase setIdleTimeout(int idleTimeout) {
return (ClientOptionsBase) super.setIdleTimeout(idleTimeout);
}
@Override
public ClientOptionsBase setReadIdleTimeout(int idleTimeout) {
return (ClientOptionsBase) super.setReadIdleTimeout(idleTimeout);
}
@Override
public ClientOptionsBase setWriteIdleTimeout(int idleTimeout) {
return (ClientOptionsBase) super.setWriteIdleTimeout(idleTimeout);
}
@Override
public ClientOptionsBase setIdleTimeoutUnit(TimeUnit idleTimeoutUnit) {
return (ClientOptionsBase) super.setIdleTimeoutUnit(idleTimeoutUnit);
}
@Override
public ClientOptionsBase setSsl(boolean ssl) {
return (ClientOptionsBase) super.setSsl(ssl);
}
@Override
public ClientOptionsBase setKeyCertOptions(KeyCertOptions options) {
return (ClientOptionsBase) super.setKeyCertOptions(options);
}
@Override
public ClientOptionsBase setTrustOptions(TrustOptions options) {
return (ClientOptionsBase) super.setTrustOptions(options);
}
@Override
public ClientOptionsBase setUseAlpn(boolean useAlpn) {
return (ClientOptionsBase) super.setUseAlpn(useAlpn);
}
@Override
public ClientOptionsBase setSslEngineOptions(SSLEngineOptions sslEngineOptions) {
return (ClientOptionsBase) super.setSslEngineOptions(sslEngineOptions);
}
@Override
public ClientOptionsBase setSendBufferSize(int sendBufferSize) {
return (ClientOptionsBase) super.setSendBufferSize(sendBufferSize);
}
@Override
public ClientOptionsBase setReceiveBufferSize(int receiveBufferSize) {
return (ClientOptionsBase) super.setReceiveBufferSize(receiveBufferSize);
}
@Override
public ClientOptionsBase setReuseAddress(boolean reuseAddress) {
return (ClientOptionsBase) super.setReuseAddress(reuseAddress);
}
@Override
public ClientOptionsBase setReusePort(boolean reusePort) {
return (ClientOptionsBase) super.setReusePort(reusePort);
}
@Override
public ClientOptionsBase setTrafficClass(int trafficClass) {
return (ClientOptionsBase) super.setTrafficClass(trafficClass);
}
@Override
public ClientOptionsBase addEnabledCipherSuite(String suite) {
return (ClientOptionsBase) super.addEnabledCipherSuite(suite);
}
@Override
public ClientOptionsBase removeEnabledCipherSuite(String suite) {
return (ClientOptionsBase) super.removeEnabledCipherSuite(suite);
}
@Override
public ClientOptionsBase addCrlPath(String crlPath) throws NullPointerException {
return (ClientOptionsBase) super.addCrlPath(crlPath);
}
@Override
public ClientOptionsBase addCrlValue(Buffer crlValue) throws NullPointerException {
return (ClientOptionsBase) super.addCrlValue(crlValue);
}
@Override
public ClientOptionsBase addEnabledSecureTransportProtocol(String protocol) {
return (ClientOptionsBase) super.addEnabledSecureTransportProtocol(protocol);
}
@Override
public ClientOptionsBase removeEnabledSecureTransportProtocol(String protocol) {
return (ClientOptionsBase) super.removeEnabledSecureTransportProtocol(protocol);
}
@Override
public ClientOptionsBase setTcpFastOpen(boolean tcpFastOpen) {
return (ClientOptionsBase) super.setTcpFastOpen(tcpFastOpen);
}
@Override
public ClientOptionsBase setTcpCork(boolean tcpCork) {
return (ClientOptionsBase) super.setTcpCork(tcpCork);
}
@Override
public ClientOptionsBase setTcpQuickAck(boolean tcpQuickAck) {
return (ClientOptionsBase) super.setTcpQuickAck(tcpQuickAck);
}
@Override
public ClientOptionsBase setTcpUserTimeout(int tcpUserTimeout) {
return (ClientOptionsBase) super.setTcpUserTimeout(tcpUserTimeout);
}
}
| to |
java | google__dagger | javatests/dagger/functional/subcomponent/SubcomponentFactoryMethodTest.java | {
"start": 1115,
"end": 1332
} | class ____ {
final String s;
StringModule(String s) {
this.s = s;
}
@Provides
String provideString(int i) {
return s + i;
}
}
@Component(modules = IntModule.class)
| StringModule |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/core/CompletableConverter.java | {
"start": 943,
"end": 1252
} | interface ____<@NonNull R> {
/**
* Applies a function to the upstream Completable and returns a converted value of type {@code R}.
*
* @param upstream the upstream Completable instance
* @return the converted value
*/
R apply(@NonNull Completable upstream);
}
| CompletableConverter |
java | apache__logging-log4j2 | log4j-api/src/main/java/org/apache/logging/log4j/util/Constants.java | {
"start": 5353,
"end": 6013
} | class ____.
*/
private Constants() {}
private static int getMajorVersion() {
return getMajorVersion(System.getProperty("java.version"));
}
static int getMajorVersion(final String version) {
// Split into `major.minor.rest`
final String[] parts = version.split("-|\\.", 3);
boolean isJEP223;
try {
final int token = Integer.parseInt(parts[0]);
isJEP223 = token != 1;
if (isJEP223) {
return token;
}
return Integer.parseInt(parts[1]);
} catch (final Exception ex) {
return 0;
}
}
}
| instantiation |
java | elastic__elasticsearch | x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/DataLoadParamsTests.java | {
"start": 398,
"end": 1248
} | class ____ extends ESTestCase {
public void testGetStart() {
assertEquals("", new DataLoadParams(TimeRange.builder().build(), Optional.empty()).getStart());
assertEquals("3", new DataLoadParams(TimeRange.builder().startTime("3").build(), Optional.empty()).getStart());
}
public void testGetEnd() {
assertEquals("", new DataLoadParams(TimeRange.builder().build(), Optional.empty()).getEnd());
assertEquals("1", new DataLoadParams(TimeRange.builder().endTime("1").build(), Optional.empty()).getEnd());
}
public void testIsResettingBuckets() {
assertFalse(new DataLoadParams(TimeRange.builder().build(), Optional.empty()).isResettingBuckets());
assertTrue(new DataLoadParams(TimeRange.builder().startTime("5").build(), Optional.empty()).isResettingBuckets());
}
}
| DataLoadParamsTests |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/typeoverride/TypeOverrideTest.java | {
"start": 1077,
"end": 3872
} | class ____ extends BaseSessionFactoryFunctionalTest {
@Override
protected String[] getOrmXmlFiles() {
return new String[] { "org/hibernate/orm/test/typeoverride/Entity.hbm.xml" };
}
@Override
protected void applyMetadataBuilder(MetadataBuilder metadataBuilder) {
metadataBuilder.applyBasicType( StoredPrefixedStringType.INSTANCE );
}
@Test
public void testStandardBasicSqlTypeDescriptor() {
final Dialect dialect = getMetadata().getDatabase().getDialect();
final JdbcTypeRegistry jdbcTypeRegistry = getMetadata().getTypeConfiguration()
.getJdbcTypeRegistry();
// no override
assertSame( IntegerJdbcType.INSTANCE, jdbcTypeRegistry.getDescriptor( Types.INTEGER ) );
// A few dialects explicitly override BlobTypeDescriptor.DEFAULT
if ( CockroachDialect.class.isInstance( dialect ) ) {
assertSame(
BlobJdbcType.MATERIALIZED,
jdbcTypeRegistry.getDescriptor( Types.BLOB )
);
}
else if ( PostgreSQLDialect.class.isInstance( dialect ) ) {
assertSame(
BlobJdbcType.BLOB_BINDING,
jdbcTypeRegistry.getDescriptor( Types.BLOB )
);
}
else if ( GaussDBDialect.class.isInstance( dialect ) ) {
assertSame(
BlobJdbcType.BLOB_BINDING,
jdbcTypeRegistry.getDescriptor( Types.BLOB )
);
}
else if ( SybaseDialect.class.isInstance( dialect ) ) {
assertSame(
BlobJdbcType.PRIMITIVE_ARRAY_BINDING,
jdbcTypeRegistry.getDescriptor( Types.BLOB )
);
}
else if ( HANADialect.class.isInstance( dialect ) ) {
Assertions.assertInstanceOf(
HANADialect.HANABlobType.class,
jdbcTypeRegistry.getDescriptor( Types.BLOB )
);
}
else {
assertSame(
BlobJdbcType.DEFAULT,
jdbcTypeRegistry.getDescriptor( Types.BLOB )
);
}
}
@AfterEach
public void tearDown() {
sessionFactory().getSchemaManager().truncate();
}
@Test
public void testInsert() {
Entity e = new Entity( "name" );
inTransaction(
session ->
session.persist( e )
);
inTransaction(
session -> {
Entity entity = session.get( Entity.class, e.getId() );
assertFalse( entity.getName().startsWith( StoredPrefixedStringType.PREFIX ) );
assertEquals( "name", entity.getName() );
session.remove( entity );
}
);
}
@Test
@SkipForDialect(dialectClass = SybaseDialect.class, matchSubTypes = true, reason = "HHH-6426")
public void testRegisteredFunction() {
Entity e = new Entity( "name " );
inTransaction(
session ->
session.persist( e )
);
inTransaction(
session -> {
Entity entity = session.get( Entity.class, e.getId() );
assertFalse( entity.getName().startsWith( StoredPrefixedStringType.PREFIX ) );
assertEquals( "name ", entity.getName() );
}
);
inTransaction(
session ->
session.remove( e )
);
}
}
| TypeOverrideTest |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/persister/entity/AbstractEntityPersister.java | {
"start": 164235,
"end": 177611
} | class ____ implements CacheEntryHelper {
public static final NoopCacheEntryHelper INSTANCE = new NoopCacheEntryHelper();
@Override
public CacheEntryStructure getCacheEntryStructure() {
return UnstructuredCacheEntry.INSTANCE;
}
@Override
public CacheEntry buildCacheEntry(Object entity, Object[] state, Object version, SharedSessionContractImplementor session) {
throw new HibernateException( "Illegal attempt to build cache entry for non-cached entity" );
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// org.hibernate.metamodel.mapping.EntityMappingType
@Override
public void forEachAttributeMapping(Consumer<? super AttributeMapping> action) {
this.attributeMappings.forEach( action );
}
@Override
public void forEachAttributeMapping(IndexedConsumer<? super AttributeMapping> consumer) {
attributeMappings.indexedForEach( consumer );
}
@Override
public void prepareMappingModel(MappingModelCreationProcess creationProcess) {
if ( identifierMapping == null ) {
prepareMappings( creationProcess );
handleSubtypeMappings( creationProcess );
prepareMultiTableMutationStrategy( creationProcess );
prepareMultiTableInsertStrategy( creationProcess );
}
}
private void handleSubtypeMappings(MappingModelCreationProcess creationProcess) {
// Register a callback for after all `#prepareMappingModel` calls have finished. Here we want to delay the
// generation of `staticFetchableList` because we need to wait until after all subclasses have had their
// `#prepareMappingModel` called (and their declared attribute mappings resolved)
creationProcess.registerInitializationCallback(
"Entity(" + getEntityName() + ") `staticFetchableList` generator",
() -> {
final var builder = new ImmutableAttributeMappingList.Builder( attributeMappings.size() );
visitSubTypeAttributeMappings( builder::add );
assert superMappingType != null || builder.assertFetchableIndexes();
staticFetchableList = builder.build();
return true;
}
);
}
private static ReflectionOptimizer.AccessOptimizer accessOptimizer(EntityRepresentationStrategy strategy) {
final var reflectionOptimizer = strategy.getReflectionOptimizer();
return reflectionOptimizer == null ? null : reflectionOptimizer.getAccessOptimizer();
}
private void prepareMappings(MappingModelCreationProcess creationProcess) {
final var persistentClass =
creationProcess.getCreationContext().getBootModel()
.getEntityBinding( getEntityName() );
initializeSpecialAttributeMappings( creationProcess, persistentClass );
versionGenerator = createVersionGenerator( super.getVersionGenerator(), versionMapping );
buildDeclaredAttributeMappings( creationProcess, persistentClass );
getAttributeMappings();
initializeNaturalIdMapping( creationProcess, persistentClass );
}
private void initializeSpecialAttributeMappings
(MappingModelCreationProcess creationProcess, PersistentClass bootEntityDescriptor) {
if ( superMappingType != null ) {
( (InFlightEntityMappingType) superMappingType ).prepareMappingModel( creationProcess );
if ( shouldProcessSuperMapping() ) {
inheritSupertypeSpecialAttributeMappings();
}
else {
prepareMappingModel( creationProcess, bootEntityDescriptor );
}
}
else {
prepareMappingModel( creationProcess, bootEntityDescriptor );
}
}
private void inheritSupertypeSpecialAttributeMappings() {
discriminatorMapping = superMappingType.getDiscriminatorMapping();
identifierMapping = superMappingType.getIdentifierMapping();
naturalIdMapping = superMappingType.getNaturalIdMapping();
versionMapping = superMappingType.getVersionMapping();
rowIdMapping = superMappingType.getRowIdMapping();
softDeleteMapping = superMappingType.getSoftDeleteMapping();
}
private void buildDeclaredAttributeMappings
(MappingModelCreationProcess creationProcess, PersistentClass bootEntityDescriptor) {
final var properties = getProperties();
final var mappingsBuilder = AttributeMappingsMap.builder();
int stateArrayPosition = getStateArrayInitialPosition( creationProcess );
int fetchableIndex = getFetchableIndexOffset();
for ( int i = 0; i < getPropertySpan(); i++ ) {
final var runtimeAttributeDefinition = properties[i];
final String attributeName = runtimeAttributeDefinition.getName();
final var bootProperty = bootEntityDescriptor.getProperty( attributeName );
if ( superMappingType == null
|| superMappingType.findAttributeMapping( bootProperty.getName() ) == null ) {
mappingsBuilder.put(
attributeName,
generateNonIdAttributeMapping(
runtimeAttributeDefinition,
bootProperty,
stateArrayPosition++,
fetchableIndex++,
creationProcess
)
);
}
declaredAttributeMappings = mappingsBuilder.build();
// otherwise, it's defined on the supertype, skip it here
}
}
private static @Nullable BeforeExecutionGenerator createVersionGenerator(
@Nullable BeforeExecutionGenerator configuredGenerator,
@Nullable EntityVersionMapping versionMapping) {
if ( versionMapping != null ) {
// need to do this here because EntityMetamodel doesn't have the EntityVersionMapping :-(
return configuredGenerator == null ? new VersionGeneration( versionMapping ) : configuredGenerator;
}
else {
return configuredGenerator;
}
}
private void prepareMultiTableMutationStrategy(MappingModelCreationProcess creationProcess) {
// No need for multi-table mutation strategy for subselect entity since update/delete don't make sense
if ( !isSubselect() && hasMultipleTables() ) {
creationProcess.registerInitializationCallback(
"Entity(" + getEntityName() + ") `sqmMultiTableMutationStrategy` interpretation",
() -> {
sqmMultiTableMutationStrategy =
interpretSqmMultiTableStrategy( this, creationProcess );
if ( sqmMultiTableMutationStrategy == null ) {
return false;
}
else {
sqmMultiTableMutationStrategy.prepare( creationProcess );
return true;
}
}
);
}
}
private void prepareMultiTableInsertStrategy(MappingModelCreationProcess creationProcess) {
// No need for multi-table insert strategy for subselect entity since insert doesn't make sense
if ( !isSubselect() && ( hasMultipleTables() || generatorNeedsMultiTableInsert() ) ) {
creationProcess.registerInitializationCallback(
"Entity(" + getEntityName() + ") `sqmMultiTableInsertStrategy` interpretation",
() -> {
sqmMultiTableInsertStrategy =
interpretSqmMultiTableInsertStrategy( this, creationProcess );
if ( sqmMultiTableInsertStrategy == null ) {
return false;
}
else {
sqmMultiTableInsertStrategy.prepare( creationProcess );
return true;
}
}
);
}
}
private boolean isSubselect() {
// For the lack of a
return getRootTableName().charAt( 0 ) == '(';
}
private boolean generatorNeedsMultiTableInsert() {
final var generator = getGenerator();
if ( generator instanceof BulkInsertionCapableIdentifierGenerator
&& generator instanceof OptimizableGenerator optimizableGenerator ) {
final var optimizer = optimizableGenerator.getOptimizer();
return optimizer != null && optimizer.getIncrementSize() > 1;
}
else {
return false;
}
}
private int getFetchableIndexOffset() {
if ( superMappingType != null ) {
final var rootEntityDescriptor = getRootEntityDescriptor();
int offset = rootEntityDescriptor.getNumberOfDeclaredAttributeMappings();
for ( var subMappingType : rootEntityDescriptor.getSubMappingTypes() ) {
if ( subMappingType == this ) {
break;
}
// Determining the number of attribute mappings unfortunately has to be done this way,
// because calling `subMappingType.getNumberOfDeclaredAttributeMappings()` at this point
// may produce wrong results because subMappingType might not have completed prepareMappingModel yet
final int propertySpan =
subMappingType.getEntityPersister().getPropertySpan();
final int superPropertySpan =
subMappingType.getSuperMappingType().getEntityPersister().getPropertySpan();
final int numberOfDeclaredAttributeMappings = propertySpan - superPropertySpan;
offset += numberOfDeclaredAttributeMappings;
}
return offset;
}
return 0;
}
private void prepareMappingModel(MappingModelCreationProcess creationProcess, PersistentClass bootEntityDescriptor) {
final var instantiator = getRepresentationStrategy().getInstantiator();
final Supplier<?> instantiate = instantiator.canBeInstantiated() ? instantiator::instantiate : null;
identifierMapping =
creationProcess.processSubPart( EntityIdentifierMapping.ID_ROLE_NAME,
(role, process) -> generateIdentifierMapping( instantiate, bootEntityDescriptor, process ) );
versionMapping = generateVersionMapping( instantiate, bootEntityDescriptor, creationProcess );
rowIdMapping = rowIdName == null ? null
: creationProcess.processSubPart( rowIdName,
(role, process) -> new EntityRowIdMappingImpl( rowIdName, getTableName(), this ) );
discriminatorMapping = generateDiscriminatorMapping( bootEntityDescriptor );
final var rootClass = bootEntityDescriptor.getRootClass();
softDeleteMapping =
resolveSoftDeleteMapping( this, rootClass, getIdentifierTableName(), creationProcess );
if ( softDeleteMapping != null && rootClass.getCustomSQLDelete() != null ) {
throw new UnsupportedMappingException( "Entity may not define both @SoftDelete and @SQLDelete" );
}
}
private void initializeNaturalIdMapping
(MappingModelCreationProcess creationProcess, PersistentClass bootEntityDescriptor) {
if ( superMappingType != null ) {
naturalIdMapping = superMappingType.getNaturalIdMapping();
}
else if ( bootEntityDescriptor.hasNaturalId() ) {
naturalIdMapping = generateNaturalIdMapping( creationProcess, bootEntityDescriptor );
}
else {
naturalIdMapping = null;
}
}
protected NaturalIdMapping generateNaturalIdMapping
(MappingModelCreationProcess creationProcess, PersistentClass bootEntityDescriptor) {
//noinspection AssertWithSideEffects
assert bootEntityDescriptor.hasNaturalId();
final int[] naturalIdAttributeIndexes = getNaturalIdentifierProperties();
assert naturalIdAttributeIndexes.length > 0;
if ( naturalIdAttributeIndexes.length == 1 ) {
final String propertyName = getPropertyNames()[ naturalIdAttributeIndexes[ 0 ] ];
final var attributeMapping = (SingularAttributeMapping) findAttributeMapping( propertyName );
return new SimpleNaturalIdMapping( attributeMapping, this, creationProcess );
}
// collect the names of the attributes making up the natural-id.
final Set<String> attributeNames = setOfSize( naturalIdAttributeIndexes.length );
for ( int naturalIdAttributeIndex : naturalIdAttributeIndexes ) {
attributeNames.add( getPropertyNames()[ naturalIdAttributeIndex ] );
}
// then iterate over the attribute mappings finding the ones having names
// in the collected names. iterate here because it is already alphabetical
final List<SingularAttributeMapping> collectedAttrMappings = new ArrayList<>();
for ( int i = 0; i < attributeMappings.size(); i++ ) {
final var attributeMapping = attributeMappings.get( i );
if ( attributeNames.contains( attributeMapping.getAttributeName() ) ) {
collectedAttrMappings.add( (SingularAttributeMapping) attributeMapping );
}
}
if ( collectedAttrMappings.size() <= 1 ) {
throw new MappingException( "Expected multiple natural-id attributes, but found only one: " + getEntityName() );
}
return new CompoundNaturalIdMapping(this, collectedAttrMappings, creationProcess );
}
protected static SqmMultiTableMutationStrategy interpretSqmMultiTableStrategy(
AbstractEntityPersister entityMappingDescriptor,
MappingModelCreationProcess creationProcess) {
assert entityMappingDescriptor.hasMultipleTables();
final var superMappingType = entityMappingDescriptor.getSuperMappingType();
if ( superMappingType != null ) {
final var sqmMultiTableMutationStrategy =
superMappingType.getSqmMultiTableMutationStrategy();
if ( sqmMultiTableMutationStrategy != null ) {
return sqmMultiTableMutationStrategy;
}
}
return creationProcess.getCreationContext().getServiceRegistry()
.requireService( SqmMultiTableMutationStrategyProvider.class )
.createMutationStrategy( entityMappingDescriptor, creationProcess );
}
protected static SqmMultiTableInsertStrategy interpretSqmMultiTableInsertStrategy(
AbstractEntityPersister entityMappingDescriptor,
MappingModelCreationProcess creationProcess) {
return creationProcess.getCreationContext().getServiceRegistry()
.requireService( SqmMultiTableMutationStrategyProvider.class )
.createInsertStrategy( entityMappingDescriptor, creationProcess );
}
@Override
public SqmMultiTableMutationStrategy getSqmMultiTableMutationStrategy() {
return sqmMultiTableMutationStrategy;
}
@Override
public SqmMultiTableInsertStrategy getSqmMultiTableInsertStrategy() {
return sqmMultiTableInsertStrategy;
}
protected int getStateArrayInitialPosition(MappingModelCreationProcess creationProcess) {
// todo (6.0) not sure this is correct in case of SingleTable Inheritance
// and for Table per | NoopCacheEntryHelper |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/dialect/mysql/ast/statement/MySqlRenameTableStatement.java | {
"start": 972,
"end": 1522
} | class ____ extends MySqlStatementImpl implements SQLAlterStatement {
private List<Item> items = new ArrayList<Item>(2);
public List<Item> getItems() {
return items;
}
public void addItem(Item item) {
if (item != null) {
item.setParent(this);
}
this.items.add(item);
}
public void accept0(MySqlASTVisitor visitor) {
if (visitor.visit(this)) {
acceptChild(visitor, items);
}
visitor.endVisit(this);
}
public static | MySqlRenameTableStatement |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/graphs/LoadEntityGraphWithCompositeKeyCollectionsTest.java | {
"start": 6713,
"end": 7465
} | class ____ {
private Integer exerciseId;
@Column(name = "activity_id")
private String activityId;
public ActivityExerciseId() {
}
public ActivityExerciseId(Integer exerciseId, String activityId) {
this.exerciseId = exerciseId;
this.activityId = activityId;
}
@Override
public boolean equals(Object o) {
if ( o == null || getClass() != o.getClass() ) {
return false;
}
ActivityExerciseId that = (ActivityExerciseId) o;
return Objects.equals( exerciseId, that.exerciseId ) && Objects.equals( activityId, that.activityId );
}
@Override
public int hashCode() {
return Objects.hash( exerciseId, activityId );
}
}
@Entity(name = "Exercise")
@Table(name = "exercises")
public static | ActivityExerciseId |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/PulsarEndpointBuilderFactory.java | {
"start": 1590,
"end": 27744
} | interface ____
extends
EndpointConsumerBuilder {
default AdvancedPulsarEndpointConsumerBuilder advanced() {
return (AdvancedPulsarEndpointConsumerBuilder) this;
}
/**
* The Authentication FQCN to be used while creating the client from
* URI.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param authenticationClass the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder authenticationClass(String authenticationClass) {
doSetProperty("authenticationClass", authenticationClass);
return this;
}
/**
* The Authentication Parameters to be used while creating the client
* from URI.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param authenticationParams the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder authenticationParams(String authenticationParams) {
doSetProperty("authenticationParams", authenticationParams);
return this;
}
/**
* The Pulsar Service URL to point while creating the client from URI.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param serviceUrl the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder serviceUrl(String serviceUrl) {
doSetProperty("serviceUrl", serviceUrl);
return this;
}
/**
* Group the consumer acknowledgments for the specified time in
* milliseconds - defaults to 100.
*
* The option is a: <code>long</code> type.
*
* Default: 100
* Group: consumer
*
* @param ackGroupTimeMillis the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder ackGroupTimeMillis(long ackGroupTimeMillis) {
doSetProperty("ackGroupTimeMillis", ackGroupTimeMillis);
return this;
}
/**
* Group the consumer acknowledgments for the specified time in
* milliseconds - defaults to 100.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 100
* Group: consumer
*
* @param ackGroupTimeMillis the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder ackGroupTimeMillis(String ackGroupTimeMillis) {
doSetProperty("ackGroupTimeMillis", ackGroupTimeMillis);
return this;
}
/**
* Timeout for unacknowledged messages in milliseconds - defaults to
* 10000.
*
* The option is a: <code>long</code> type.
*
* Default: 10000
* Group: consumer
*
* @param ackTimeoutMillis the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder ackTimeoutMillis(long ackTimeoutMillis) {
doSetProperty("ackTimeoutMillis", ackTimeoutMillis);
return this;
}
/**
* Timeout for unacknowledged messages in milliseconds - defaults to
* 10000.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 10000
* Group: consumer
*
* @param ackTimeoutMillis the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder ackTimeoutMillis(String ackTimeoutMillis) {
doSetProperty("ackTimeoutMillis", ackTimeoutMillis);
return this;
}
/**
* RedeliveryBackoff to use for ack timeout redelivery backoff.
*
* The option is a:
* <code>org.apache.pulsar.client.api.RedeliveryBackoff</code> type.
*
* Group: consumer
*
* @param ackTimeoutRedeliveryBackoff the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder ackTimeoutRedeliveryBackoff(org.apache.pulsar.client.api.RedeliveryBackoff ackTimeoutRedeliveryBackoff) {
doSetProperty("ackTimeoutRedeliveryBackoff", ackTimeoutRedeliveryBackoff);
return this;
}
/**
* RedeliveryBackoff to use for ack timeout redelivery backoff.
*
* The option will be converted to a
* <code>org.apache.pulsar.client.api.RedeliveryBackoff</code> type.
*
* Group: consumer
*
* @param ackTimeoutRedeliveryBackoff the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder ackTimeoutRedeliveryBackoff(String ackTimeoutRedeliveryBackoff) {
doSetProperty("ackTimeoutRedeliveryBackoff", ackTimeoutRedeliveryBackoff);
return this;
}
/**
* Whether to allow manual message acknowledgements. If this option is
* enabled, then messages are not acknowledged automatically after
* successful route completion. Instead, an instance of
* PulsarMessageReceipt is stored as a header on the
* org.apache.camel.Exchange. Messages can then be acknowledged using
* PulsarMessageReceipt at any time before the ackTimeout occurs.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param allowManualAcknowledgement the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder allowManualAcknowledgement(boolean allowManualAcknowledgement) {
doSetProperty("allowManualAcknowledgement", allowManualAcknowledgement);
return this;
}
/**
* Whether to allow manual message acknowledgements. If this option is
* enabled, then messages are not acknowledged automatically after
* successful route completion. Instead, an instance of
* PulsarMessageReceipt is stored as a header on the
* org.apache.camel.Exchange. Messages can then be acknowledged using
* PulsarMessageReceipt at any time before the ackTimeout occurs.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param allowManualAcknowledgement the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder allowManualAcknowledgement(String allowManualAcknowledgement) {
doSetProperty("allowManualAcknowledgement", allowManualAcknowledgement);
return this;
}
/**
* Name of the consumer when subscription is EXCLUSIVE.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: sole-consumer
* Group: consumer
*
* @param consumerName the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder consumerName(String consumerName) {
doSetProperty("consumerName", consumerName);
return this;
}
/**
* Prefix to add to consumer names when a SHARED or FAILOVER
* subscription is used.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: cons
* Group: consumer
*
* @param consumerNamePrefix the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder consumerNamePrefix(String consumerNamePrefix) {
doSetProperty("consumerNamePrefix", consumerNamePrefix);
return this;
}
/**
* Size of the consumer queue - defaults to 10.
*
* The option is a: <code>int</code> type.
*
* Default: 10
* Group: consumer
*
* @param consumerQueueSize the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder consumerQueueSize(int consumerQueueSize) {
doSetProperty("consumerQueueSize", consumerQueueSize);
return this;
}
/**
* Size of the consumer queue - defaults to 10.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 10
* Group: consumer
*
* @param consumerQueueSize the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder consumerQueueSize(String consumerQueueSize) {
doSetProperty("consumerQueueSize", consumerQueueSize);
return this;
}
/**
* Name of the topic where the messages which fail maxRedeliverCount
* times will be sent. Note: if not set, default topic name will be
* topicName-subscriptionName-DLQ.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param deadLetterTopic the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder deadLetterTopic(String deadLetterTopic) {
doSetProperty("deadLetterTopic", deadLetterTopic);
return this;
}
/**
* To enable retry letter topic mode. The default retry letter topic
* uses this format: topicname-subscriptionname-RETRY.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param enableRetry the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder enableRetry(boolean enableRetry) {
doSetProperty("enableRetry", enableRetry);
return this;
}
/**
* To enable retry letter topic mode. The default retry letter topic
* uses this format: topicname-subscriptionname-RETRY.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param enableRetry the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder enableRetry(String enableRetry) {
doSetProperty("enableRetry", enableRetry);
return this;
}
/**
* Policy to use by consumer when using key-shared subscription type.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param keySharedPolicy the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder keySharedPolicy(String keySharedPolicy) {
doSetProperty("keySharedPolicy", keySharedPolicy);
return this;
}
/**
* Maximum number of times that a message will be redelivered before
* being sent to the dead letter queue. If this value is not set, no
* Dead Letter Policy will be created.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: consumer
*
* @param maxRedeliverCount the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder maxRedeliverCount(Integer maxRedeliverCount) {
doSetProperty("maxRedeliverCount", maxRedeliverCount);
return this;
}
/**
* Maximum number of times that a message will be redelivered before
* being sent to the dead letter queue. If this value is not set, no
* Dead Letter Policy will be created.
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Group: consumer
*
* @param maxRedeliverCount the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder maxRedeliverCount(String maxRedeliverCount) {
doSetProperty("maxRedeliverCount", maxRedeliverCount);
return this;
}
/**
* Whether to use the messageListener interface, or to receive messages
* using a separate thread pool.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: consumer
*
* @param messageListener the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder messageListener(boolean messageListener) {
doSetProperty("messageListener", messageListener);
return this;
}
/**
* Whether to use the messageListener interface, or to receive messages
* using a separate thread pool.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: consumer
*
* @param messageListener the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder messageListener(String messageListener) {
doSetProperty("messageListener", messageListener);
return this;
}
/**
* RedeliveryBackoff to use for negative ack redelivery backoff.
*
* The option is a:
* <code>org.apache.pulsar.client.api.RedeliveryBackoff</code> type.
*
* Group: consumer
*
* @param negativeAckRedeliveryBackoff the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder negativeAckRedeliveryBackoff(org.apache.pulsar.client.api.RedeliveryBackoff negativeAckRedeliveryBackoff) {
doSetProperty("negativeAckRedeliveryBackoff", negativeAckRedeliveryBackoff);
return this;
}
/**
* RedeliveryBackoff to use for negative ack redelivery backoff.
*
* The option will be converted to a
* <code>org.apache.pulsar.client.api.RedeliveryBackoff</code> type.
*
* Group: consumer
*
* @param negativeAckRedeliveryBackoff the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder negativeAckRedeliveryBackoff(String negativeAckRedeliveryBackoff) {
doSetProperty("negativeAckRedeliveryBackoff", negativeAckRedeliveryBackoff);
return this;
}
/**
* Set the negative acknowledgement delay.
*
* The option is a: <code>long</code> type.
*
* Default: 60000000
* Group: consumer
*
* @param negativeAckRedeliveryDelayMicros the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder negativeAckRedeliveryDelayMicros(long negativeAckRedeliveryDelayMicros) {
doSetProperty("negativeAckRedeliveryDelayMicros", negativeAckRedeliveryDelayMicros);
return this;
}
/**
* Set the negative acknowledgement delay.
*
* The option will be converted to a <code>long</code> type.
*
* Default: 60000000
* Group: consumer
*
* @param negativeAckRedeliveryDelayMicros the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder negativeAckRedeliveryDelayMicros(String negativeAckRedeliveryDelayMicros) {
doSetProperty("negativeAckRedeliveryDelayMicros", negativeAckRedeliveryDelayMicros);
return this;
}
/**
* Number of consumers - defaults to 1.
*
* The option is a: <code>int</code> type.
*
* Default: 1
* Group: consumer
*
* @param numberOfConsumers the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder numberOfConsumers(int numberOfConsumers) {
doSetProperty("numberOfConsumers", numberOfConsumers);
return this;
}
/**
* Number of consumers - defaults to 1.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 1
* Group: consumer
*
* @param numberOfConsumers the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder numberOfConsumers(String numberOfConsumers) {
doSetProperty("numberOfConsumers", numberOfConsumers);
return this;
}
/**
* Number of threads to receive and handle messages when using a
* separate thread pool.
*
* The option is a: <code>int</code> type.
*
* Default: 1
* Group: consumer
*
* @param numberOfConsumerThreads the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder numberOfConsumerThreads(int numberOfConsumerThreads) {
doSetProperty("numberOfConsumerThreads", numberOfConsumerThreads);
return this;
}
/**
* Number of threads to receive and handle messages when using a
* separate thread pool.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 1
* Group: consumer
*
* @param numberOfConsumerThreads the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder numberOfConsumerThreads(String numberOfConsumerThreads) {
doSetProperty("numberOfConsumerThreads", numberOfConsumerThreads);
return this;
}
/**
* Enable compacted topic reading.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param readCompacted the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder readCompacted(boolean readCompacted) {
doSetProperty("readCompacted", readCompacted);
return this;
}
/**
* Enable compacted topic reading.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param readCompacted the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder readCompacted(String readCompacted) {
doSetProperty("readCompacted", readCompacted);
return this;
}
/**
* Name of the topic to use in retry mode. Note: if not set, default
* topic name will be topicName-subscriptionName-RETRY.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param retryLetterTopic the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder retryLetterTopic(String retryLetterTopic) {
doSetProperty("retryLetterTopic", retryLetterTopic);
return this;
}
/**
* Control the initial position in the topic of a newly created
* subscription. Default is latest message.
*
* The option is a:
* <code>org.apache.camel.component.pulsar.utils.consumers.SubscriptionInitialPosition</code> type.
*
* Default: LATEST
* Group: consumer
*
* @param subscriptionInitialPosition the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder subscriptionInitialPosition(org.apache.camel.component.pulsar.utils.consumers.SubscriptionInitialPosition subscriptionInitialPosition) {
doSetProperty("subscriptionInitialPosition", subscriptionInitialPosition);
return this;
}
/**
* Control the initial position in the topic of a newly created
* subscription. Default is latest message.
*
* The option will be converted to a
* <code>org.apache.camel.component.pulsar.utils.consumers.SubscriptionInitialPosition</code> type.
*
* Default: LATEST
* Group: consumer
*
* @param subscriptionInitialPosition the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder subscriptionInitialPosition(String subscriptionInitialPosition) {
doSetProperty("subscriptionInitialPosition", subscriptionInitialPosition);
return this;
}
/**
* Name of the subscription to use.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: subs
* Group: consumer
*
* @param subscriptionName the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder subscriptionName(String subscriptionName) {
doSetProperty("subscriptionName", subscriptionName);
return this;
}
/**
* Determines to which topics this consumer should be subscribed to -
* Persistent, Non-Persistent, or both. Only used with pattern
* subscriptions.
*
* The option is a:
* <code>org.apache.pulsar.client.api.RegexSubscriptionMode</code> type.
*
* Default: PersistentOnly
* Group: consumer
*
* @param subscriptionTopicsMode the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder subscriptionTopicsMode(org.apache.pulsar.client.api.RegexSubscriptionMode subscriptionTopicsMode) {
doSetProperty("subscriptionTopicsMode", subscriptionTopicsMode);
return this;
}
/**
* Determines to which topics this consumer should be subscribed to -
* Persistent, Non-Persistent, or both. Only used with pattern
* subscriptions.
*
* The option will be converted to a
* <code>org.apache.pulsar.client.api.RegexSubscriptionMode</code> type.
*
* Default: PersistentOnly
* Group: consumer
*
* @param subscriptionTopicsMode the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder subscriptionTopicsMode(String subscriptionTopicsMode) {
doSetProperty("subscriptionTopicsMode", subscriptionTopicsMode);
return this;
}
/**
* Type of the subscription EXCLUSIVESHAREDFAILOVERKEY_SHARED, defaults
* to EXCLUSIVE.
*
* The option is a:
* <code>org.apache.camel.component.pulsar.utils.consumers.SubscriptionType</code> type.
*
* Default: EXCLUSIVE
* Group: consumer
*
* @param subscriptionType the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder subscriptionType(org.apache.camel.component.pulsar.utils.consumers.SubscriptionType subscriptionType) {
doSetProperty("subscriptionType", subscriptionType);
return this;
}
/**
* Type of the subscription EXCLUSIVESHAREDFAILOVERKEY_SHARED, defaults
* to EXCLUSIVE.
*
* The option will be converted to a
* <code>org.apache.camel.component.pulsar.utils.consumers.SubscriptionType</code> type.
*
* Default: EXCLUSIVE
* Group: consumer
*
* @param subscriptionType the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder subscriptionType(String subscriptionType) {
doSetProperty("subscriptionType", subscriptionType);
return this;
}
/**
* Whether the topic is a pattern (regular expression) that allows the
* consumer to subscribe to all matching topics in the namespace.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param topicsPattern the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder topicsPattern(boolean topicsPattern) {
doSetProperty("topicsPattern", topicsPattern);
return this;
}
/**
* Whether the topic is a pattern (regular expression) that allows the
* consumer to subscribe to all matching topics in the namespace.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param topicsPattern the value to set
* @return the dsl builder
*/
default PulsarEndpointConsumerBuilder topicsPattern(String topicsPattern) {
doSetProperty("topicsPattern", topicsPattern);
return this;
}
}
/**
* Advanced builder for endpoint consumers for the Pulsar component.
*/
public | PulsarEndpointConsumerBuilder |
java | google__gson | gson/src/main/java/com/google/gson/annotations/JsonAdapter.java | {
"start": 1495,
"end": 2207
} | class ____ extends TypeAdapter<User> {
* @Override public void write(JsonWriter out, User user) throws IOException {
* // implement write: combine firstName and lastName into name
* out.beginObject();
* out.name("name");
* out.value(user.firstName + " " + user.lastName);
* out.endObject();
* }
*
* @Override public User read(JsonReader in) throws IOException {
* // implement read: split name into firstName and lastName
* in.beginObject();
* in.nextName();
* String[] nameParts = in.nextString().split(" ");
* in.endObject();
* return new User(nameParts[0], nameParts[1]);
* }
* }
* </pre>
*
* Since {@code User} | UserJsonAdapter |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/collections/nature/elemental/ElementalListTest.java | {
"start": 810,
"end": 2109
} | class ____ {
private static final Logger log = Logger.getLogger( ElementalListTest.class );
@BeforeEach
public void prepareTestData(SessionFactoryScope scope) {
scope.inTransaction( (session) -> {
Person person = new Person( 1 );
person.phones.add( "027-123-4567" );
person.phones.add( "028-234-9876" );
session.persist( person );
} );
}
@AfterEach
public void dropTestData(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Test
public void testLifecycle(SessionFactoryScope scope) {
scope.inTransaction( (entityManager) -> {
Person person = entityManager.find( Person.class, 1 );
log.info( "Clear element collection and add element" );
//tag::ex-collection-elemental-lifecycle[]
person.getPhones().clear();
person.getPhones().add( "123-456-7890" );
person.getPhones().add( "456-000-1234" );
//end::ex-collection-elemental-lifecycle[]
} );
scope.inTransaction( (entityManager) -> {
Person person = entityManager.find( Person.class, 1 );
log.info( "Remove one element" );
//tag::ex-collection-elemental-remove[]
person.getPhones().remove( 0 );
//end::ex-collection-elemental-remove[]
} );
}
//tag::ex-collection-elemental-model[]
@Entity(name = "Person")
public static | ElementalListTest |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/core/cluster/commands/HashClusterCommandIntegrationTests.java | {
"start": 497,
"end": 774
} | class ____ extends HashCommandIntegrationTests {
@Inject
public HashClusterCommandIntegrationTests(StatefulRedisClusterConnection<String, String> connection) {
super(ClusterTestUtil.redisCommandsOverCluster(connection));
}
}
| HashClusterCommandIntegrationTests |
java | quarkusio__quarkus | extensions/oidc-client/runtime/src/main/java/io/quarkus/oidc/client/runtime/OidcClientRecorder.java | {
"start": 1327,
"end": 13616
} | class ____ {
private static final Logger LOG = Logger.getLogger(OidcClientRecorder.class);
private static final String CLIENT_ID_ATTRIBUTE = "client-id";
static final String DEFAULT_OIDC_CLIENT_ID = "Default";
static Map<String, OidcClient> createStaticOidcClients(OidcClientsConfig oidcClientsConfig, Vertx vertx,
OidcTlsSupport tlsSupport, OidcClientConfig defaultClientConfig) {
String defaultClientId = defaultClientConfig.id().get();
Map<String, OidcClient> staticOidcClients = new HashMap<>();
for (var config : oidcClientsConfig.namedClients().entrySet()) {
final String namedKey = config.getKey();
if (!OidcClientsConfig.DEFAULT_CLIENT_KEY.equals(namedKey)) {
var namedOidcClientConfig = config.getValue();
OidcCommonUtils.verifyConfigurationId(defaultClientId, namedKey, namedOidcClientConfig.id());
staticOidcClients.put(namedKey, createOidcClient(namedOidcClientConfig, namedKey, vertx, tlsSupport));
}
}
return Map.copyOf(staticOidcClients);
}
public Function<SyntheticCreationalContext<OidcClient>, OidcClient> createOidcClientBean(String clientName) {
return new Function<SyntheticCreationalContext<OidcClient>, OidcClient>() {
@Override
public OidcClient apply(SyntheticCreationalContext<OidcClient> ctx) {
return ctx.getInjectedReference(OidcClients.class).getClient(clientName);
}
};
}
protected static OidcClient createOidcClient(OidcClientConfig oidcConfig, String oidcClientId, Vertx vertx,
OidcTlsSupport tlsSupport) {
return createOidcClientUni(oidcConfig, oidcClientId, vertx, tlsSupport).await()
.atMost(oidcConfig.connectionTimeout());
}
protected static Uni<OidcClient> createOidcClientUni(OidcClientConfig oidcConfig, String oidcClientId,
Vertx vertx, OidcTlsSupport tlsSupport) {
if (!oidcConfig.clientEnabled()) {
String message = String.format("'%s' client configuration is disabled", oidcClientId);
LOG.debug(message);
return Uni.createFrom().item(new DisabledOidcClient(message));
}
if (oidcConfig.id().isEmpty()) {
// if user did not set the client id
// we do set 'id' to the named client key
// e.g. quarkus.oidc-client.<<name>>.id=<<name>>
return Uni.createFrom().failure(new IllegalStateException("OIDC Client ID must be set"));
}
try {
if (oidcConfig.authServerUrl().isEmpty() && !OidcCommonUtils.isAbsoluteUrl(oidcConfig.tokenPath())) {
throw new ConfigurationException(
"Either 'quarkus.oidc-client.auth-server-url' or absolute 'quarkus.oidc-client.token-path' URL must be set");
}
OidcCommonUtils.verifyEndpointUrl(getEndpointUrl(oidcConfig));
OidcCommonUtils.verifyCommonConfiguration(oidcConfig, false, false);
} catch (Throwable t) {
LOG.debug(t.getMessage());
String message = String.format("'%s' client configuration is not initialized", oidcClientId);
return Uni.createFrom().item(new DisabledOidcClient(message));
}
WebClientOptions options = new WebClientOptions();
options.setFollowRedirects(oidcConfig.followRedirects());
OidcCommonUtils.setHttpClientOptions(oidcConfig, options, tlsSupport.forConfig(oidcConfig.tls()));
var mutinyVertx = new io.vertx.mutiny.core.Vertx(vertx);
WebClient client = WebClient.create(mutinyVertx, options);
Map<OidcEndpoint.Type, List<OidcRequestFilter>> oidcRequestFilters = OidcCommonUtils.getOidcRequestFilters();
Map<OidcEndpoint.Type, List<OidcResponseFilter>> oidcResponseFilters = OidcCommonUtils.getOidcResponseFilters();
Uni<OidcConfigurationMetadata> tokenUrisUni = null;
if (OidcCommonUtils.isAbsoluteUrl(oidcConfig.tokenPath())) {
tokenUrisUni = Uni.createFrom().item(
new OidcConfigurationMetadata(oidcConfig.tokenPath().get(),
OidcCommonUtils.isAbsoluteUrl(oidcConfig.revokePath()) ? oidcConfig.revokePath().get() : null));
} else {
String authServerUriString = OidcCommonUtils.getAuthServerUrl(oidcConfig);
if (!oidcConfig.discoveryEnabled().orElse(true)) {
tokenUrisUni = Uni.createFrom()
.item(new OidcConfigurationMetadata(
OidcCommonUtils.getOidcEndpointUrl(authServerUriString, oidcConfig.tokenPath()),
OidcCommonUtils.getOidcEndpointUrl(authServerUriString, oidcConfig.revokePath())));
} else {
tokenUrisUni = discoverTokenUris(client, oidcRequestFilters, oidcResponseFilters,
authServerUriString.toString(), oidcConfig,
mutinyVertx);
}
}
return tokenUrisUni.onItemOrFailure()
.transformToUni(new BiFunction<OidcConfigurationMetadata, Throwable, Uni<? extends OidcClient>>() {
@Override
public Uni<OidcClient> apply(OidcConfigurationMetadata metadata, Throwable t) {
if (t != null) {
throw toOidcClientException(getEndpointUrl(oidcConfig), t);
}
if (metadata.tokenRequestUri == null) {
throw new ConfigurationException(
"OpenId Connect Provider token endpoint URL is not configured and can not be discovered");
}
String grantType = oidcConfig.grant().type().getGrantType();
MultiMap tokenGrantParams = null;
if (oidcConfig.grant().type() != Grant.Type.REFRESH) {
tokenGrantParams = new MultiMap(io.vertx.core.MultiMap.caseInsensitiveMultiMap());
setGrantClientParams(oidcConfig, tokenGrantParams, grantType);
if (oidcConfig.grantOptions() != null) {
Map<String, String> grantOptions = oidcConfig.grantOptions()
.get(oidcConfig.grant().type().name().toLowerCase());
if (grantOptions != null) {
if (oidcConfig.grant().type() == Grant.Type.PASSWORD) {
// Without this block `password` will be listed first, before `username`
// which is not a technical problem but might affect Wiremock tests or the endpoints
// which expect a specific order.
final String userName = grantOptions.get(OidcConstants.PASSWORD_GRANT_USERNAME);
final String userPassword = grantOptions.get(OidcConstants.PASSWORD_GRANT_PASSWORD);
if (userName == null || userPassword == null) {
throw new ConfigurationException(
"Username and password must be set when a password grant is used",
Set.of("quarkus.oidc-client.grant.type",
"quarkus.oidc-client.grant-options"));
}
tokenGrantParams.add(OidcConstants.PASSWORD_GRANT_USERNAME, userName);
tokenGrantParams.add(OidcConstants.PASSWORD_GRANT_PASSWORD, userPassword);
for (Map.Entry<String, String> entry : grantOptions.entrySet()) {
if (!OidcConstants.PASSWORD_GRANT_USERNAME.equals(entry.getKey())
&& !OidcConstants.PASSWORD_GRANT_PASSWORD.equals(entry.getKey())) {
tokenGrantParams.add(entry.getKey(), entry.getValue());
}
}
} else {
tokenGrantParams.addAll(grantOptions);
}
}
if (oidcConfig.grant().type() == Grant.Type.EXCHANGE
&& !tokenGrantParams.contains(OidcConstants.EXCHANGE_GRANT_SUBJECT_TOKEN_TYPE)) {
tokenGrantParams.add(OidcConstants.EXCHANGE_GRANT_SUBJECT_TOKEN_TYPE,
OidcConstants.EXCHANGE_GRANT_SUBJECT_ACCESS_TOKEN_TYPE);
}
}
}
MultiMap commonRefreshGrantParams = new MultiMap(io.vertx.core.MultiMap.caseInsensitiveMultiMap());
setGrantClientParams(oidcConfig, commonRefreshGrantParams, OidcConstants.REFRESH_TOKEN_GRANT);
return OidcClientImpl.of(client, metadata.tokenRequestUri, metadata.tokenRevokeUri, grantType,
tokenGrantParams, commonRefreshGrantParams, oidcConfig, oidcRequestFilters,
oidcResponseFilters, vertx);
}
});
}
private static String getEndpointUrl(OidcClientConfig oidcConfig) {
return oidcConfig.authServerUrl().isPresent() ? oidcConfig.authServerUrl().get() : oidcConfig.tokenPath().get();
}
private static void setGrantClientParams(OidcClientConfig oidcConfig, MultiMap grantParams, String grantType) {
grantParams.add(OidcConstants.GRANT_TYPE, grantType);
if (oidcConfig.scopes().isPresent()) {
grantParams.add(OidcConstants.TOKEN_SCOPE, String.join(" ", oidcConfig.scopes().get()));
}
if (oidcConfig.audience().isPresent()) {
grantParams.add(OidcConstants.TOKEN_AUDIENCE_GRANT_PROPERTY, String.join(" ", oidcConfig.audience().get()));
}
}
private static Uni<OidcConfigurationMetadata> discoverTokenUris(WebClient client,
Map<OidcEndpoint.Type, List<OidcRequestFilter>> oidcRequestFilters,
Map<OidcEndpoint.Type, List<OidcResponseFilter>> oidcResponseFilters,
String authServerUrl, OidcClientConfig oidcConfig, io.vertx.mutiny.core.Vertx vertx) {
final long connectionDelayInMillisecs = OidcCommonUtils.getConnectionDelayInMillis(oidcConfig);
OidcRequestContextProperties contextProps = new OidcRequestContextProperties(
Map.of(CLIENT_ID_ATTRIBUTE, oidcConfig.id().orElse(DEFAULT_OIDC_CLIENT_ID)));
return OidcCommonUtils.discoverMetadata(client, oidcRequestFilters, contextProps, oidcResponseFilters,
authServerUrl, connectionDelayInMillisecs, vertx, oidcConfig.useBlockingDnsLookup())
.onItem().transform(json -> new OidcConfigurationMetadata(json.getString("token_endpoint"),
json.getString("revocation_endpoint")));
}
protected static OidcClientException toOidcClientException(String authServerUrlString, Throwable cause) {
return new OidcClientException(OidcCommonUtils.formatConnectionErrorMessage(authServerUrlString), cause);
}
public void initOidcClients() {
try {
// makes sure that OIDC Clients are created at the latest when runtime synthetic beans are ready
Arc.container().instance(OidcClients.class).get();
} catch (CreationException wrapper) {
if (wrapper.getCause() instanceof RuntimeException runtimeException) {
// so that users see ConfigurationException etc. without noise
throw runtimeException;
}
throw wrapper;
}
}
private static | OidcClientRecorder |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/select/MySqlSelectTest_220_jdbc_fn.java | {
"start": 856,
"end": 1921
} | class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "SELECT TRIM(BOTH ' ' FROM {fn CONCAT({fn CONCAT(' ', `calcs`.`str2`)}, ' ')}) AS `TEMP(Test)(1903992131)(0)`\n" +
"\n" +
"FROM `calcs`\n" +
"\n" +
"GROUP BY 1";
System.out.println(sql);
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
assertEquals(1, statementList.size());
SQLStatement stmt = statementList.get(0);
assertEquals("SELECT TRIM(BOTH ' ' FROM CONCAT(CONCAT(' ', `calcs`.`str2`), ' ')) AS `TEMP(Test)(1903992131)(0)`\n" +
"FROM `calcs`\n" +
"GROUP BY 1", stmt.toString());
assertEquals("select TRIM(BOTH ' ' from CONCAT(CONCAT(' ', `calcs`.`str2`), ' ')) as `TEMP(Test)(1903992131)(0)`\n" +
"from `calcs`\n" +
"group by 1", stmt.clone().toLowerCaseString());
}
}
| MySqlSelectTest_220_jdbc_fn |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/package-info.java | {
"start": 4082,
"end": 12861
} | interface ____ not required is when it always returns another function in its surrogate.
* </li>
* <li>
* To introduce your aggregation to the engine:
* <ul>
* <li>
* Implement serialization for your aggregation by implementing
* {@link org.elasticsearch.common.io.stream.NamedWriteable#getWriteableName},
* {@link org.elasticsearch.common.io.stream.NamedWriteable#writeTo},
* and a deserializing constructor. Then add an {@link org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry}
* constant and add that constant to the list in
* {@link org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateWritables#getNamedWriteables}.
* </li>
* <li>
* Add it to {@link org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry}.
* </li>
* </ul>
* </li>
* </ol>
*
* <h3>Creating aggregators for your function</h3>
* <p>
* Aggregators contain the core logic of how to combine values, what to store, how to process data, etc.
* Currently, we rely on code generation (per aggregation per type) in order to implement such functionality.
* This approach was picked for performance reasons (namely to avoid virtual method calls and boxing types).
* As a result we could not rely on interfaces implementation and generics.
* </p>
* <p>
* In order to implement aggregation logic create your class (typically named "${FunctionName}${Type}Aggregator").
* It must be placed in `org.elasticsearch.compute.aggregation` in order to be picked up by code generation.
* Annotate it with {@link org.elasticsearch.compute.ann.Aggregator} and {@link org.elasticsearch.compute.ann.GroupingAggregator}
* The first one is responsible for an entire data set aggregation, while the second one is responsible for grouping within buckets.
* </p>
* <h4>Before you start implementing it, please note that:</h4>
* <ul>
* <li>All methods must be public static</li>
* <li>
* {@code init/initSingle/initGrouping} could have optional {@link org.elasticsearch.common.util.BigArrays} or
* {@link org.elasticsearch.compute.operator.DriverContext} arguments that are going to be injected automatically.
* It is also possible to declare any number of arbitrary arguments that must be provided via generated Supplier.
* </li>
* <li>
* {@code combine, combineIntermediate, evaluateFinal} methods (see below) could be generated automatically
* when both input type I and mutable accumulator state AggregatorState and GroupingAggregatorState are primitive (DOUBLE, INT).
* </li>
* <li>
* Code generation expects at least one IntermediateState field that is going to be used to keep
* the serialized state of the aggregation (eg AggregatorState and GroupingAggregatorState).
* It must be defined even if you rely on autogenerated implementation for the primitive types.
* </li>
* </ul>
* <h4>Aggregation expects:</h4>
* <ul>
* <li>
* type AggregatorState (a mutable state used to accumulate result of the aggregation) to be public, not inner and implements
* {@link org.elasticsearch.compute.aggregation.AggregatorState}
* </li>
* <li>type I (input to your aggregation function), usually primitive types and {@link org.apache.lucene.util.BytesRef}</li>
* <li>{@code AggregatorState init()} or {@code AggregatorState initSingle()} returns empty initialized aggregation state</li>
* <li>
* {@code void combine(AggregatorState state, I input)} or {@code AggregatorState combine(AggregatorState state, I input)}
* adds input entry to the aggregation state
* </li>
* <li>
* {@code void combineIntermediate(AggregatorState state, intermediate states)} adds serialized aggregation state
* to the current aggregation state (used to combine results across different nodes)
* </li>
* <li>
* {@code Block evaluateFinal(AggregatorState state, DriverContext)} converts the inner state of the aggregation to the result
* column
* </li>
* <li>
* (optional) {@code void first(AggregatorState state, I input)} if present, this is called the first time a value
* is seen <strong>instead</strong> of calling {@code combine}. This is more efficient than manually checking for
* uninitialized state on every call to {@code combine}.
* </li>
* </ul>
* <h4>Grouping aggregation expects:</h4>
* <ul>
* <li>
* type GroupingAggregatorState (a mutable state used to accumulate result of the grouping aggregation) to be public,
* not inner and implements {@link org.elasticsearch.compute.aggregation.GroupingAggregatorState}
* </li>
* <li>type I (input to your aggregation function), usually primitive types and {@link org.apache.lucene.util.BytesRef}</li>
* <li>
* {@code GroupingAggregatorState init()} or {@code GroupingAggregatorState initGrouping()} returns empty initialized grouping
* aggregation state
* </li>
* <li>
* {@code void combine(GroupingAggregatorState state, int groupId, I input)} adds input entry to the corresponding group (bucket)
* of the grouping aggregation state
* </li>
* <li>
* {@code void combineIntermediate(GroupingAggregatorState current, int groupId, intermediate states)} adds serialized
* aggregation state to the current grouped aggregation state (used to combine results across different nodes)
* </li>
* <li>
* {@code Block evaluateFinal(GroupingAggregatorState state, IntVectorSelected, DriverContext)} converts the inner state
* of the grouping aggregation to the result column
* </li>
* </ul>
* <ol>
* <li>
* Copy an existing aggregator to use as a base. You'll usually make one per type. Check other classes to see the naming pattern.
* You can find them in {@link org.elasticsearch.compute.aggregation}.
* <p>
* Note that some aggregators are autogenerated, so they live in different directories.
* The base is {@code x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/}
* </p>
* </li>
* <li>
* Implement (or create an empty) methods according to the above list.
* Also check {@link org.elasticsearch.compute.ann.Aggregator} JavaDoc as it contains generated method usage.
* </li>
* <li>
* Make a test for your aggregator.
* You can copy an existing one from {@code x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/}.
* <p>
* Tests extending from {@code org.elasticsearch.compute.aggregation.AggregatorFunctionTestCase}
* will already include most required cases. You should only need to fill the required abstract methods.
* </p>
* </li>
* <li>
* Code generation is triggered when running the tests.
* Run the CsvTests to generate the code. Generated code should include:
* <p>
* One of them will be the {@code AggregatorFunctionSupplier} for your aggregator.
* Find it by its name ({@code <Aggregation-name><Type>AggregatorFunctionSupplier}),
* and return it in the {@code toSupplier} method in your function, under the correct type condition.
* </p>
* </li>
* <li>
* Now, complete the implementation of the aggregator, until the tests pass!
* </li>
* </ol>
*
* <h3>StringTemplates</h3>
* <p>
* Making an aggregator per type may be repetitive. To avoid code duplication, we use StringTemplates:
* </p>
* <ol>
* <li>
* Create a new StringTemplate file.
* Use another as a reference, like
* {@code x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-TopAggregator.java.st}.
* </li>
* <li>
* Add the template scripts to {@code x-pack/plugin/esql/compute/build.gradle}.
* <p>
* You can also see there which variables you can use, and which types are currently supported.
* </p>
* </li>
* <li>
* After completing your template, run the generation with {@code ./gradlew :x-pack:plugin:esql:compute:compileJava}.
* <p>
* You may need to tweak some import orders per type so they don’t raise warnings.
* </p>
* </li>
* </ol>
*/
package org.elasticsearch.xpack.esql.expression.function.aggregate;
| is |
java | spring-projects__spring-security | web/src/main/java/org/springframework/security/web/authentication/SavedRequestAwareAuthenticationSuccessHandler.java | {
"start": 3226,
"end": 4472
} | class ____ extends SimpleUrlAuthenticationSuccessHandler {
protected final Log logger = LogFactory.getLog(this.getClass());
private RequestCache requestCache = new HttpSessionRequestCache();
@Override
public void onAuthenticationSuccess(HttpServletRequest request, HttpServletResponse response,
Authentication authentication) throws ServletException, IOException {
SavedRequest savedRequest = this.requestCache.getRequest(request, response);
if (savedRequest == null) {
super.onAuthenticationSuccess(request, response, authentication);
return;
}
String targetUrlParameter = getTargetUrlParameter();
if (isAlwaysUseDefaultTargetUrl()
|| (targetUrlParameter != null && StringUtils.hasText(request.getParameter(targetUrlParameter)))) {
this.requestCache.removeRequest(request, response);
super.onAuthenticationSuccess(request, response, authentication);
return;
}
clearAuthenticationAttributes(request);
// Use the DefaultSavedRequest URL
String targetUrl = savedRequest.getRedirectUrl();
getRedirectStrategy().sendRedirect(request, response, targetUrl);
}
public void setRequestCache(RequestCache requestCache) {
this.requestCache = requestCache;
}
}
| SavedRequestAwareAuthenticationSuccessHandler |
java | quarkusio__quarkus | test-framework/common/src/main/java/io/quarkus/test/common/QuarkusTestResourceLifecycleManager.java | {
"start": 5284,
"end": 6020
} | class ____ implements Predicate<Field> {
private final Class<? extends Annotation> annotationClass;
private final Class<?> expectedFieldType;
public AnnotatedAndMatchesType(Class<? extends Annotation> annotationClass, Class<?> expectedFieldType) {
this.annotationClass = annotationClass;
this.expectedFieldType = expectedFieldType;
}
@Override
public boolean test(Field field) {
if (field.getAnnotation(annotationClass) == null) {
return false;
}
return field.getType().isAssignableFrom(expectedFieldType);
}
}
}
| AnnotatedAndMatchesType |
java | google__guice | extensions/throwingproviders/test/com/google/inject/throwingproviders/TestScope.java | {
"start": 1037,
"end": 1124
} | class ____ implements Scope {
@Retention(RUNTIME)
@ScopeAnnotation
public @ | TestScope |
java | google__guava | guava-gwt/src-super/com/google/common/collect/super/com/google/common/collect/ImmutableSet.java | {
"start": 5758,
"end": 6221
} | class ____<E> extends ImmutableSet<E> {
@LazyInit private transient ImmutableList<E> asList;
@Override
public ImmutableList<E> asList() {
ImmutableList<E> result = asList;
if (result == null) {
return asList = createAsList();
} else {
return result;
}
}
@Override
ImmutableList<E> createAsList() {
return new RegularImmutableAsList<E>(this, toArray());
}
}
abstract static | CachingAsList |
java | apache__camel | components/camel-quickfix/src/main/java/org/apache/camel/component/quickfixj/QuickfixjEngine.java | {
"start": 3074,
"end": 4267
} | class ____ extends ServiceSupport {
public static final String DEFAULT_START_TIME = "00:00:00";
public static final String DEFAULT_END_TIME = "00:00:00";
public static final long DEFAULT_HEARTBTINT = 30;
public static final String SETTING_THREAD_MODEL = "ThreadModel";
public static final String SETTING_USE_JMX = "UseJmx";
private static final Logger LOG = LoggerFactory.getLogger(QuickfixjEngine.class);
private Acceptor acceptor;
private Initiator initiator;
private JmxExporter jmxExporter;
private MessageStoreFactory messageStoreFactory;
private LogFactory sessionLogFactory;
private MessageFactory messageFactory;
private final MessageCorrelator messageCorrelator = new MessageCorrelator();
private final List<QuickfixjEventListener> eventListeners = new CopyOnWriteArrayList<>();
private final String uri;
private ObjectName acceptorObjectName;
private ObjectName initiatorObjectName;
private final SessionSettings settings;
private final AtomicBoolean initialized = new AtomicBoolean();
private final boolean lazy;
private final AtomicInteger refCount = new AtomicInteger();
public | QuickfixjEngine |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java | {
"start": 4543,
"end": 6218
} | class ____ the
* specified Blob service endpoint and account credentials.
*
* @param baseUri
* A <code>java.net.URI</code> object that represents the Blob
* service endpoint used to create the client.
* @param credentials
* A {@link StorageCredentials} object that represents the account
* credentials.
*/
public abstract void createBlobClient(URI baseUri,
StorageCredentials credentials);
/**
* Returns the credentials for the Blob service, as configured for the storage
* account.
*
* @return A {@link StorageCredentials} object that represents the credentials
* for this storage account.
*/
public abstract StorageCredentials getCredentials();
/**
* Returns a reference to a {@link CloudBlobContainerWrapper} object that
* represents the cloud blob container for the specified address.
*
* @param name
* A <code>String</code> that represents the name of the container.
* @return A {@link CloudBlobContainerWrapper} object that represents a
* reference to the cloud blob container.
*
* @throws URISyntaxException
* If the resource URI is invalid.
* @throws StorageException
* If a storage service error occurred.
*/
public abstract CloudBlobContainerWrapper getContainerReference(String name)
throws URISyntaxException, StorageException;
/**
* A thin wrapper over the
* {@link com.microsoft.azure.storage.blob.CloudBlobDirectory} class
* that simply redirects calls to the real object except in unit tests.
*/
@InterfaceAudience.Private
public abstract static | using |
java | apache__kafka | trogdor/src/main/java/org/apache/kafka/trogdor/rest/StopTaskRequest.java | {
"start": 1013,
"end": 1279
} | class ____ extends Message {
private final String id;
@JsonCreator
public StopTaskRequest(@JsonProperty("id") String id) {
this.id = (id == null) ? "" : id;
}
@JsonProperty
public String id() {
return id;
}
}
| StopTaskRequest |
java | apache__camel | components/camel-disruptor/src/test/java/org/apache/camel/component/disruptor/vm/DisruptorVmConcurrentConsumersTest.java | {
"start": 995,
"end": 1654
} | class ____ extends AbstractVmTestSupport {
@Test
void testSendToSeda() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("Hello World");
template2.sendBody("disruptor-vm:foo?concurrentConsumers=5", "Hello World");
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("disruptor-vm:foo?concurrentConsumers=5").to("mock:result");
}
};
}
}
| DisruptorVmConcurrentConsumersTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefix.java | {
"start": 1536,
"end": 1662
} | class ____ extends InternalMultiBucketAggregation<InternalIpPrefix, InternalIpPrefix.Bucket> {
public static | InternalIpPrefix |
java | apache__kafka | clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminBootstrapAddressesTest.java | {
"start": 1343,
"end": 3438
} | class ____ {
@Test
public void testNoBootstrapSet() {
Map<String, Object> map = Map.of(
AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "",
AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG, ""
);
AdminClientConfig config = new AdminClientConfig(map);
assertEquals("You must set either bootstrap.servers or bootstrap.controllers",
assertThrows(ConfigException.class, () -> AdminBootstrapAddresses.fromConfig(config)).
getMessage());
}
@Test
public void testTwoBootstrapsSet() {
Map<String, Object> map = new HashMap<>();
map.put(AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG, "localhost:9092");
map.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
AdminClientConfig config = new AdminClientConfig(map);
assertEquals("You cannot set both bootstrap.servers and bootstrap.controllers",
assertThrows(ConfigException.class, () -> AdminBootstrapAddresses.fromConfig(config)).
getMessage());
}
@ParameterizedTest
@ValueSource(booleans = {false, true})
public void testFromConfig(boolean usingBootstrapControllers) {
Map<String, Object> map = new HashMap<>();
String connectString = "localhost:9092,localhost:9093,localhost:9094";
if (usingBootstrapControllers) {
map.put(AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG, connectString);
} else {
map.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, connectString);
}
AdminClientConfig config = new AdminClientConfig(map);
AdminBootstrapAddresses addresses = AdminBootstrapAddresses.fromConfig(config);
assertEquals(usingBootstrapControllers, addresses.usingBootstrapControllers());
assertEquals(Arrays.asList(
new InetSocketAddress("localhost", 9092),
new InetSocketAddress("localhost", 9093),
new InetSocketAddress("localhost", 9094)),
addresses.addresses());
}
}
| AdminBootstrapAddressesTest |
java | hibernate__hibernate-orm | hibernate-testing/src/main/java/org/hibernate/testing/jdbc/JdbcSpies.java | {
"start": 839,
"end": 950
} | interface ____ {
void onCall(Object spy, Method method, Object[] args, Object result);
}
public static | Callback |
java | quarkusio__quarkus | extensions/spring-boot-properties/deployment/src/main/java/io/quarkus/spring/boot/properties/deployment/ConfigurationPropertiesMetadataBuildItem.java | {
"start": 2721,
"end": 2797
} | class ____
* and produces an instance of that class
*/
public | name |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/state/filesystem/FsSegmentDataInputStream.java | {
"start": 1375,
"end": 4070
} | class ____ extends FSDataInputStreamWrapper
implements WrappingProxyCloseable<FSDataInputStream> {
private final long startingPosition;
private long endingPosition;
public FsSegmentDataInputStream(
FSDataInputStream inputStream, long startingPosition, long segmentSize)
throws IOException {
super(inputStream);
if (startingPosition < 0 || segmentSize < 0) {
throw new IndexOutOfBoundsException(
"Invalid startingPosition/segmentSize: "
+ startingPosition
+ "/"
+ segmentSize);
}
this.startingPosition = startingPosition;
this.endingPosition = startingPosition + segmentSize;
inputStream.seek(startingPosition);
}
@Override
public int read() throws IOException {
if (inputStream.getPos() >= endingPosition) {
return -1;
}
int result = inputStream.read();
if (result == -1) {
return -1;
} else {
return result;
}
}
@Override
public int read(byte[] b) throws IOException {
return this.read(b, 0, b.length);
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
if ((off | len | (off + len) | (b.length - (off + len))) < 0) {
throw new IndexOutOfBoundsException();
}
int n =
(int)
Math.min(
Integer.MAX_VALUE,
Math.min(len, (endingPosition - inputStream.getPos())));
if (n == 0) {
return -1;
}
int ret = inputStream.read(b, off, n);
if (ret < 0) {
endingPosition = inputStream.getPos();
return -1;
}
return ret;
}
@Override
public void seek(long desired) throws IOException {
desired += startingPosition;
inputStream.seek(desired);
}
@Override
public long getPos() throws IOException {
return inputStream.getPos() - startingPosition;
}
@Override
public long skip(long n) throws IOException {
long len = Math.min(n, endingPosition - inputStream.getPos());
return inputStream.skip(n);
}
@Override
public synchronized void mark(int readlimit) {
inputStream.mark(readlimit);
}
@Override
public synchronized void reset() throws IOException {
inputStream.reset();
}
@Override
public boolean markSupported() {
return inputStream.markSupported();
}
}
| FsSegmentDataInputStream |
java | grpc__grpc-java | examples/src/main/java/io/grpc/examples/nameresolve/NameResolveServer.java | {
"start": 981,
"end": 2758
} | class ____ {
static public final int serverCount = 3;
static public final int startPort = 50051;
private static final Logger logger = Logger.getLogger(NameResolveServer.class.getName());
private Server[] servers;
public static void main(String[] args) throws IOException, InterruptedException {
final NameResolveServer server = new NameResolveServer();
server.start();
server.blockUntilShutdown();
}
private void start() throws IOException {
servers = new Server[serverCount];
for (int i = 0; i < serverCount; i++) {
int port = startPort + i;
servers[i] = ServerBuilder.forPort(port)
.addService(new GreeterImpl(port))
.build()
.start();
logger.info("Server started, listening on " + port);
}
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
System.err.println("*** shutting down gRPC server since JVM is shutting down");
try {
NameResolveServer.this.stop();
} catch (InterruptedException e) {
e.printStackTrace(System.err);
}
System.err.println("*** server shut down");
}));
}
private void stop() throws InterruptedException {
for (int i = 0; i < serverCount; i++) {
if (servers[i] != null) {
servers[i].shutdown().awaitTermination(30, TimeUnit.SECONDS);
}
}
}
private void blockUntilShutdown() throws InterruptedException {
for (int i = 0; i < serverCount; i++) {
if (servers[i] != null) {
servers[i].awaitTermination();
}
}
}
static | NameResolveServer |
java | redisson__redisson | redisson-spring-data/redisson-spring-data-16/src/test/java/org/redisson/spring/data/connection/RedissonMultiConnectionTest.java | {
"start": 181,
"end": 1576
} | class ____ extends BaseConnectionTest {
@Test
public void testEcho() {
RedissonConnection connection = new RedissonConnection(redisson);
connection.multi();
assertThat(connection.echo("test".getBytes())).isNull();
assertThat(connection.exec().iterator().next()).isEqualTo("test".getBytes());
}
@Test
public void testSetGet() {
RedissonConnection connection = new RedissonConnection(redisson);
connection.multi();
assertThat(connection.isQueueing()).isTrue();
connection.set("key".getBytes(), "value".getBytes());
assertThat(connection.get("key".getBytes())).isNull();
List<Object> result = connection.exec();
assertThat(connection.isQueueing()).isFalse();
assertThat(result.get(0)).isEqualTo("value".getBytes());
}
@Test
public void testHSetGet() {
RedissonConnection connection = new RedissonConnection(redisson);
connection.multi();
assertThat(connection.hSet("key".getBytes(), "field".getBytes(), "value".getBytes())).isNull();
assertThat(connection.hGet("key".getBytes(), "field".getBytes())).isNull();
List<Object> result = connection.exec();
assertThat((Boolean)result.get(0)).isTrue();
assertThat(result.get(1)).isEqualTo("value".getBytes());
}
}
| RedissonMultiConnectionTest |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/common/runtime/src/main/java/org/jboss/resteasy/reactive/spi/RestHandler.java | {
"start": 130,
"end": 253
} | interface ____<T extends AbstractResteasyReactiveContext> {
void handle(T requestContext) throws Exception;
}
| RestHandler |
java | apache__camel | core/camel-main/src/test/java/org/apache/camel/main/scan/MyConcreteRouteBuilder.java | {
"start": 847,
"end": 1015
} | class ____ extends MyAbstractRouteBuilder {
@Override
public void configure() {
from("direct:concrete").to("mock:concrete");
}
}
| MyConcreteRouteBuilder |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/producer/disposer/DisposerWithWildcardTest.java | {
"start": 1434,
"end": 1920
} | class ____ {
static final List<Object> KEYS = new CopyOnWriteArrayList<>();
@Singleton
@Produces
Map<String, Long> produceA() {
return Collections.singletonMap("A", 1l);
}
@Singleton
@Produces
Map<String, Integer> produceB() {
return Collections.singletonMap("B", 1);
}
void dispose(@Disposes Map<?, ?> myMap) {
KEYS.addAll(myMap.keySet());
}
}
}
| Producers |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/single/SingleCreate.java | {
"start": 1627,
"end": 4268
} | class ____<T>
extends AtomicReference<Disposable>
implements SingleEmitter<T>, Disposable {
private static final long serialVersionUID = -2467358622224974244L;
final SingleObserver<? super T> downstream;
Emitter(SingleObserver<? super T> downstream) {
this.downstream = downstream;
}
@Override
public void onSuccess(T value) {
if (get() != DisposableHelper.DISPOSED) {
Disposable d = getAndSet(DisposableHelper.DISPOSED);
if (d != DisposableHelper.DISPOSED) {
try {
if (value == null) {
downstream.onError(ExceptionHelper.createNullPointerException("onSuccess called with a null value."));
} else {
downstream.onSuccess(value);
}
} finally {
if (d != null) {
d.dispose();
}
}
}
}
}
@Override
public void onError(Throwable t) {
if (!tryOnError(t)) {
RxJavaPlugins.onError(t);
}
}
@Override
public boolean tryOnError(Throwable t) {
if (t == null) {
t = ExceptionHelper.createNullPointerException("onError called with a null Throwable.");
}
if (get() != DisposableHelper.DISPOSED) {
Disposable d = getAndSet(DisposableHelper.DISPOSED);
if (d != DisposableHelper.DISPOSED) {
try {
downstream.onError(t);
} finally {
if (d != null) {
d.dispose();
}
}
return true;
}
}
return false;
}
@Override
public void setDisposable(Disposable d) {
DisposableHelper.set(this, d);
}
@Override
public void setCancellable(Cancellable c) {
setDisposable(new CancellableDisposable(c));
}
@Override
public void dispose() {
DisposableHelper.dispose(this);
}
@Override
public boolean isDisposed() {
return DisposableHelper.isDisposed(get());
}
@Override
public String toString() {
return String.format("%s{%s}", getClass().getSimpleName(), super.toString());
}
}
}
| Emitter |
java | elastic__elasticsearch | x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/StringStatsAggregatorSupplier.java | {
"start": 613,
"end": 927
} | interface ____ {
Aggregator build(
String name,
ValuesSource valuesSource,
boolean showDistribution,
DocValueFormat format,
AggregationContext context,
Aggregator parent,
Map<String, Object> metadata
) throws IOException;
}
| StringStatsAggregatorSupplier |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolEntry.java | {
"start": 1073,
"end": 1408
} | class ____ {
private final CachePoolInfo info;
private final CachePoolStats stats;
public CachePoolEntry(CachePoolInfo info, CachePoolStats stats) {
this.info = info;
this.stats = stats;
}
public CachePoolInfo getInfo() {
return info;
}
public CachePoolStats getStats() {
return stats;
}
}
| CachePoolEntry |
java | elastic__elasticsearch | libs/simdvec/src/test/java/org/elasticsearch/simdvec/ESVectorUtilTests.java | {
"start": 1099,
"end": 29508
} | class ____ extends BaseVectorizationTests {
static final ESVectorizationProvider defaultedProvider = BaseVectorizationTests.defaultProvider();
static final ESVectorizationProvider defOrPanamaProvider = BaseVectorizationTests.maybePanamaProvider();
public void testIpByteBit() {
byte[] d = new byte[random().nextInt(128)];
byte[] q = new byte[d.length * 8];
random().nextBytes(d);
random().nextBytes(q);
int sum = 0;
for (int i = 0; i < q.length; i++) {
if (((d[i / 8] << (i % 8)) & 0x80) == 0x80) {
sum += q[i];
}
}
assertEquals(sum, ESVectorUtil.ipByteBit(q, d));
assertEquals(sum, defaultedProvider.getVectorUtilSupport().ipByteBit(q, d));
assertEquals(sum, defOrPanamaProvider.getVectorUtilSupport().ipByteBit(q, d));
}
public void testIpFloatBit() {
byte[] d = new byte[random().nextInt(128)];
float[] q = new float[d.length * 8];
random().nextBytes(d);
float sum = 0;
for (int i = 0; i < q.length; i++) {
q[i] = random().nextFloat();
if (((d[i / 8] << (i % 8)) & 0x80) == 0x80) {
sum += q[i];
}
}
double delta = 1e-5 * q.length;
assertEquals(sum, ESVectorUtil.ipFloatBit(q, d), delta);
assertEquals(sum, defaultedProvider.getVectorUtilSupport().ipFloatBit(q, d), delta);
assertEquals(sum, defOrPanamaProvider.getVectorUtilSupport().ipFloatBit(q, d), delta);
}
public void testIpFloatByte() {
int vectorSize = randomIntBetween(1, 1024);
// scale the delta according to the vector size
double delta = 1e-5 * vectorSize;
float[] q = new float[vectorSize];
byte[] d = new byte[vectorSize];
for (int i = 0; i < q.length; i++) {
q[i] = random().nextFloat();
}
random().nextBytes(d);
float expected = 0;
for (int i = 0; i < q.length; i++) {
expected += q[i] * d[i];
}
assertThat((double) ESVectorUtil.ipFloatByte(q, d), closeTo(expected, delta));
assertThat((double) defaultedProvider.getVectorUtilSupport().ipFloatByte(q, d), closeTo(expected, delta));
assertThat((double) defOrPanamaProvider.getVectorUtilSupport().ipFloatByte(q, d), closeTo(expected, delta));
}
public void testBitAndCount() {
testBasicBitAndImpl(ESVectorUtil::andBitCountLong);
}
public void testIpByteBinInvariants() {
int iterations = atLeast(10);
for (int i = 0; i < iterations; i++) {
int size = randomIntBetween(1, 10);
var d = new byte[size];
var q = new byte[size * B_QUERY - 1];
expectThrows(IllegalArgumentException.class, () -> ESVectorUtil.ipByteBinByte(q, d));
}
}
public void testBasicIpByteBin() {
testBasicIpByteBinImpl(ESVectorUtil::ipByteBinByte);
testBasicIpByteBinImpl(defaultedProvider.getVectorUtilSupport()::ipByteBinByte);
testBasicIpByteBinImpl(defOrPanamaProvider.getVectorUtilSupport()::ipByteBinByte);
}
void testBasicBitAndImpl(ToLongBiFunction<byte[], byte[]> bitAnd) {
assertEquals(0, bitAnd.applyAsLong(new byte[] { 0 }, new byte[] { 0 }));
assertEquals(0, bitAnd.applyAsLong(new byte[] { 1 }, new byte[] { 0 }));
assertEquals(0, bitAnd.applyAsLong(new byte[] { 0 }, new byte[] { 1 }));
assertEquals(1, bitAnd.applyAsLong(new byte[] { 1 }, new byte[] { 1 }));
byte[] a = new byte[31];
byte[] b = new byte[31];
random().nextBytes(a);
random().nextBytes(b);
int expected = scalarBitAnd(a, b);
assertEquals(expected, bitAnd.applyAsLong(a, b));
}
void testBasicIpByteBinImpl(ToLongBiFunction<byte[], byte[]> ipByteBinFunc) {
assertEquals(15L, ipByteBinFunc.applyAsLong(new byte[] { 1, 1, 1, 1 }, new byte[] { 1 }));
assertEquals(30L, ipByteBinFunc.applyAsLong(new byte[] { 1, 2, 1, 2, 1, 2, 1, 2 }, new byte[] { 1, 2 }));
var d = new byte[] { 1, 2, 3 };
var q = new byte[] { 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3 };
assert scalarIpByteBin(q, d) == 60L; // 4 + 8 + 16 + 32
assertEquals(60L, ipByteBinFunc.applyAsLong(q, d));
d = new byte[] { 1, 2, 3, 4 };
q = new byte[] { 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4 };
assert scalarIpByteBin(q, d) == 75L; // 5 + 10 + 20 + 40
assertEquals(75L, ipByteBinFunc.applyAsLong(q, d));
d = new byte[] { 1, 2, 3, 4, 5 };
q = new byte[] { 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5 };
assert scalarIpByteBin(q, d) == 105L; // 7 + 14 + 28 + 56
assertEquals(105L, ipByteBinFunc.applyAsLong(q, d));
d = new byte[] { 1, 2, 3, 4, 5, 6 };
q = new byte[] { 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6 };
assert scalarIpByteBin(q, d) == 135L; // 9 + 18 + 36 + 72
assertEquals(135L, ipByteBinFunc.applyAsLong(q, d));
d = new byte[] { 1, 2, 3, 4, 5, 6, 7 };
q = new byte[] { 1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7 };
assert scalarIpByteBin(q, d) == 180L; // 12 + 24 + 48 + 96
assertEquals(180L, ipByteBinFunc.applyAsLong(q, d));
d = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 };
q = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8 };
assert scalarIpByteBin(q, d) == 195L; // 13 + 26 + 52 + 104
assertEquals(195L, ipByteBinFunc.applyAsLong(q, d));
d = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
q = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
assert scalarIpByteBin(q, d) == 225L; // 15 + 30 + 60 + 120
assertEquals(225L, ipByteBinFunc.applyAsLong(q, d));
}
public void testIpByteBin() {
testIpByteBinImpl(ESVectorUtil::ipByteBinByte);
testIpByteBinImpl(defaultedProvider.getVectorUtilSupport()::ipByteBinByte);
testIpByteBinImpl(defOrPanamaProvider.getVectorUtilSupport()::ipByteBinByte);
}
public void testCenterAndCalculateOSQStatsDp() {
int size = random().nextInt(128, 512);
float delta = 1e-3f * size;
var vector = new float[size];
var centroid = new float[size];
for (int i = 0; i < size; ++i) {
vector[i] = random().nextFloat();
centroid[i] = random().nextFloat();
}
var centeredLucene = new float[size];
var statsLucene = new float[6];
defaultedProvider.getVectorUtilSupport().centerAndCalculateOSQStatsDp(vector, centroid, centeredLucene, statsLucene);
var centeredPanama = new float[size];
var statsPanama = new float[6];
defOrPanamaProvider.getVectorUtilSupport().centerAndCalculateOSQStatsDp(vector, centroid, centeredPanama, statsPanama);
assertArrayEquals(centeredLucene, centeredPanama, delta);
assertArrayEquals(statsLucene, statsPanama, delta);
}
public void testCenterAndCalculateOSQStatsEuclidean() {
int size = random().nextInt(128, 512);
float delta = 1e-3f * size;
var vector = new float[size];
var centroid = new float[size];
for (int i = 0; i < size; ++i) {
vector[i] = random().nextFloat();
centroid[i] = random().nextFloat();
}
var centeredLucene = new float[size];
var statsLucene = new float[5];
defaultedProvider.getVectorUtilSupport().centerAndCalculateOSQStatsEuclidean(vector, centroid, centeredLucene, statsLucene);
var centeredPanama = new float[size];
var statsPanama = new float[5];
defOrPanamaProvider.getVectorUtilSupport().centerAndCalculateOSQStatsEuclidean(vector, centroid, centeredPanama, statsPanama);
assertArrayEquals(centeredLucene, centeredPanama, delta);
assertArrayEquals(statsLucene, statsPanama, delta);
}
public void testOsqLoss() {
int size = random().nextInt(128, 512);
float deltaEps = 1e-5f * size;
var vector = new float[size];
var min = Float.MAX_VALUE;
var max = -Float.MAX_VALUE;
float vecMean = 0;
float vecVar = 0;
float norm2 = 0;
for (int i = 0; i < size; ++i) {
vector[i] = random().nextFloat();
min = Math.min(min, vector[i]);
max = Math.max(max, vector[i]);
float delta = vector[i] - vecMean;
vecMean += delta / (i + 1);
float delta2 = vector[i] - vecMean;
vecVar += delta * delta2;
norm2 += vector[i] * vector[i];
}
vecVar /= size;
float vecStd = (float) Math.sqrt(vecVar);
int[] destinationDefault = new int[size];
int[] destinationPanama = new int[size];
for (byte bits : new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }) {
int points = 1 << bits;
float[] initInterval = new float[2];
OptimizedScalarQuantizer.initInterval(bits, vecStd, vecMean, min, max, initInterval);
float step = ((initInterval[1] - initInterval[0]) / (points - 1f));
float stepInv = 1f / step;
float expected = defaultedProvider.getVectorUtilSupport()
.calculateOSQLoss(vector, initInterval[0], initInterval[1], step, stepInv, norm2, 0.1f, destinationDefault);
float result = defOrPanamaProvider.getVectorUtilSupport()
.calculateOSQLoss(vector, initInterval[0], initInterval[1], step, stepInv, norm2, 0.1f, destinationPanama);
assertEquals(expected, result, deltaEps);
assertArrayEquals(destinationDefault, destinationPanama);
}
}
public void testOsqGridPoints() {
int size = random().nextInt(128, 512);
float deltaEps = 1e-5f * size;
var vector = new float[size];
var min = Float.MAX_VALUE;
var max = -Float.MAX_VALUE;
var norm2 = 0f;
float vecMean = 0;
float vecVar = 0;
for (int i = 0; i < size; ++i) {
vector[i] = random().nextFloat();
min = Math.min(min, vector[i]);
max = Math.max(max, vector[i]);
float delta = vector[i] - vecMean;
vecMean += delta / (i + 1);
float delta2 = vector[i] - vecMean;
vecVar += delta * delta2;
norm2 += vector[i] * vector[i];
}
vecVar /= size;
float vecStd = (float) Math.sqrt(vecVar);
int[] destinationDefault = new int[size];
int[] destinationPanama = new int[size];
for (byte bits : new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }) {
int points = 1 << bits;
float[] initInterval = new float[2];
OptimizedScalarQuantizer.initInterval(bits, vecStd, vecMean, min, max, initInterval);
float step = ((initInterval[1] - initInterval[0]) / (points - 1f));
float stepInv = 1f / step;
float[] expected = new float[5];
defaultedProvider.getVectorUtilSupport()
.calculateOSQLoss(vector, initInterval[0], initInterval[1], step, stepInv, norm2, 0.1f, destinationDefault);
defaultedProvider.getVectorUtilSupport().calculateOSQGridPoints(vector, destinationDefault, points, expected);
float[] result = new float[5];
defOrPanamaProvider.getVectorUtilSupport()
.calculateOSQLoss(vector, initInterval[0], initInterval[1], step, stepInv, norm2, 0.1f, destinationPanama);
defOrPanamaProvider.getVectorUtilSupport().calculateOSQGridPoints(vector, destinationPanama, points, result);
assertArrayEquals(expected, result, deltaEps);
assertArrayEquals(destinationDefault, destinationPanama);
}
}
public void testSoarDistance() {
int size = random().nextInt(128, 512);
float deltaEps = 1e-3f * size;
var vector = new float[size];
var centroid = new float[size];
var preResidual = new float[size];
for (int i = 0; i < size; ++i) {
vector[i] = random().nextFloat();
centroid[i] = random().nextFloat();
preResidual[i] = random().nextFloat();
}
float soarLambda = random().nextFloat();
float rnorm = random().nextFloat();
var expected = defaultedProvider.getVectorUtilSupport().soarDistance(vector, centroid, preResidual, soarLambda, rnorm);
var result = defOrPanamaProvider.getVectorUtilSupport().soarDistance(vector, centroid, preResidual, soarLambda, rnorm);
assertEquals(expected, result, deltaEps);
}
public void testQuantizeVectorWithIntervals() {
int vectorSize = randomIntBetween(1, 2048);
float[] vector = new float[vectorSize];
byte bits = (byte) randomIntBetween(1, 8);
for (int i = 0; i < vectorSize; ++i) {
vector[i] = random().nextFloat();
}
float low = random().nextFloat();
float high = random().nextFloat();
if (low > high) {
float tmp = low;
low = high;
high = tmp;
}
int[] quantizeExpected = new int[vectorSize];
int[] quantizeResult = new int[vectorSize];
var expected = defaultedProvider.getVectorUtilSupport().quantizeVectorWithIntervals(vector, quantizeExpected, low, high, bits);
var result = defOrPanamaProvider.getVectorUtilSupport().quantizeVectorWithIntervals(vector, quantizeResult, low, high, bits);
assertArrayEquals(quantizeExpected, quantizeResult);
assertEquals(expected, result, 0f);
}
public void testSquareDistanceBulk() {
int vectorSize = randomIntBetween(1, 2048);
float[] query = generateRandomVector(vectorSize);
float[] v0 = generateRandomVector(vectorSize);
float[] v1 = generateRandomVector(vectorSize);
float[] v2 = generateRandomVector(vectorSize);
float[] v3 = generateRandomVector(vectorSize);
float[] expectedDistances = new float[4];
float[] panamaDistances = new float[4];
defaultedProvider.getVectorUtilSupport().squareDistanceBulk(query, v0, v1, v2, v3, expectedDistances);
defOrPanamaProvider.getVectorUtilSupport().squareDistanceBulk(query, v0, v1, v2, v3, panamaDistances);
assertArrayEquals(expectedDistances, panamaDistances, 1e-3f);
}
public void testSoarDistanceBulk() {
int vectorSize = randomIntBetween(1, 2048);
float deltaEps = 1e-3f * vectorSize;
float[] query = generateRandomVector(vectorSize);
float[] v0 = generateRandomVector(vectorSize);
float[] v1 = generateRandomVector(vectorSize);
float[] v2 = generateRandomVector(vectorSize);
float[] v3 = generateRandomVector(vectorSize);
float[] diff = generateRandomVector(vectorSize);
float soarLambda = random().nextFloat();
float rnorm = random().nextFloat(10);
float[] expectedDistances = new float[4];
float[] panamaDistances = new float[4];
defaultedProvider.getVectorUtilSupport().soarDistanceBulk(query, v0, v1, v2, v3, diff, soarLambda, rnorm, expectedDistances);
defOrPanamaProvider.getVectorUtilSupport().soarDistanceBulk(query, v0, v1, v2, v3, diff, soarLambda, rnorm, panamaDistances);
assertArrayEquals(expectedDistances, panamaDistances, deltaEps);
}
public void testPackAsBinary() {
int dims = randomIntBetween(16, 2048);
int[] toPack = new int[dims];
for (int i = 0; i < dims; i++) {
toPack[i] = randomInt(1);
}
int length = BQVectorUtils.discretize(dims, 64) / 8;
byte[] packed = new byte[length];
byte[] packedLegacy = new byte[length];
defaultedProvider.getVectorUtilSupport().packAsBinary(toPack, packedLegacy);
defOrPanamaProvider.getVectorUtilSupport().packAsBinary(toPack, packed);
assertArrayEquals(packedLegacy, packed);
}
public void testPackAsBinaryCorrectness() {
// 5 bits
int[] toPack = new int[] { 1, 1, 0, 0, 1 };
byte[] packed = new byte[1];
ESVectorUtil.packAsBinary(toPack, packed);
assertArrayEquals(new byte[] { (byte) 0b11001000 }, packed);
// 8 bits
toPack = new int[] { 1, 1, 0, 0, 1, 0, 1, 0 };
packed = new byte[1];
ESVectorUtil.packAsBinary(toPack, packed);
assertArrayEquals(new byte[] { (byte) 0b11001010 }, packed);
// 10 bits
toPack = new int[] { 1, 1, 0, 0, 1, 0, 1, 0, 1, 1 };
packed = new byte[2];
ESVectorUtil.packAsBinary(toPack, packed);
assertArrayEquals(new byte[] { (byte) 0b11001010, (byte) 0b11000000 }, packed);
// 16 bits
toPack = new int[] { 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0 };
packed = new byte[2];
ESVectorUtil.packAsBinary(toPack, packed);
assertArrayEquals(new byte[] { (byte) 0b11001010, (byte) 0b11100110 }, packed);
}
public void testPackAsBinaryDuel() {
int dims = random().nextInt(16, 2049);
int[] toPack = new int[dims];
for (int i = 0; i < dims; i++) {
toPack[i] = random().nextInt(2);
}
int length = BQVectorUtils.discretize(dims, 64) / 8;
byte[] packed = new byte[length];
byte[] packedLegacy = new byte[length];
packAsBinaryLegacy(toPack, packedLegacy);
ESVectorUtil.packAsBinary(toPack, packed);
assertArrayEquals(packedLegacy, packed);
}
public void testIntegerTransposeHalfByte() {
int dims = randomIntBetween(16, 2048);
int[] toPack = new int[dims];
for (int i = 0; i < dims; i++) {
toPack[i] = randomInt(15);
}
int length = 4 * BQVectorUtils.discretize(dims, 64) / 8;
byte[] packed = new byte[length];
byte[] packedLegacy = new byte[length];
transposeHalfByteLegacy(toPack, packedLegacy);
ESVectorUtil.transposeHalfByte(toPack, packed);
assertArrayEquals(packedLegacy, packed);
}
public void testTransposeHalfByte() {
int dims = randomIntBetween(16, 2048);
int[] toPack = new int[dims];
for (int i = 0; i < dims; i++) {
toPack[i] = randomInt(15);
}
int length = 4 * BQVectorUtils.discretize(dims, 64) / 8;
byte[] packed = new byte[length];
byte[] packedLegacy = new byte[length];
defaultedProvider.getVectorUtilSupport().transposeHalfByte(toPack, packedLegacy);
defOrPanamaProvider.getVectorUtilSupport().transposeHalfByte(toPack, packed);
assertArrayEquals(packedLegacy, packed);
}
public void testPackAsDibit() {
int dims = randomIntBetween(16, 2048);
int[] toPack = new int[dims];
for (int i = 0; i < dims; i++) {
toPack[i] = randomInt(3);
}
int length = ESNextDiskBBQVectorsFormat.QuantEncoding.TWO_BIT_4BIT_QUERY.getDocPackedLength(dims);
;
byte[] packed = new byte[length];
byte[] packedLegacy = new byte[length];
defaultedProvider.getVectorUtilSupport().packDibit(toPack, packedLegacy);
defOrPanamaProvider.getVectorUtilSupport().packDibit(toPack, packed);
assertArrayEquals(packedLegacy, packed);
}
public void testPackDibitCorrectness() {
// 5 bits
// binary lower bits 1 1 0 0 1
// binary upper bits 0 1 1 0 0
// resulting dibit 1 3 2 0 1
int[] toPack = new int[] { 1, 3, 2, 0, 1 };
byte[] packed = new byte[2];
ESVectorUtil.packDibit(toPack, packed);
assertArrayEquals(new byte[] { (byte) 0b11001000, (byte) 0b01100000 }, packed);
// 8 bits
// binary lower bits 1 1 0 0 1 0 1 0
// binary upper bits 0 1 1 0 0 1 0 1
// resulting dibit 1 3 2 0 1 2 1 2
toPack = new int[] { 1, 3, 2, 0, 1, 2, 1, 2 };
packed = new byte[2];
ESVectorUtil.packDibit(toPack, packed);
assertArrayEquals(new byte[] { (byte) 0b11001010, (byte) 0b01100101 }, packed);
}
private float[] generateRandomVector(int size) {
float[] vector = new float[size];
for (int i = 0; i < size; ++i) {
vector[i] = random().nextFloat();
}
return vector;
}
void testIpByteBinImpl(ToLongBiFunction<byte[], byte[]> ipByteBinFunc) {
int iterations = atLeast(50);
for (int i = 0; i < iterations; i++) {
int size = random().nextInt(5000);
var d = new byte[size];
var q = new byte[size * B_QUERY];
random().nextBytes(d);
random().nextBytes(q);
assertEquals(scalarIpByteBin(q, d), ipByteBinFunc.applyAsLong(q, d));
Arrays.fill(d, Byte.MAX_VALUE);
Arrays.fill(q, Byte.MAX_VALUE);
assertEquals(scalarIpByteBin(q, d), ipByteBinFunc.applyAsLong(q, d));
Arrays.fill(d, Byte.MIN_VALUE);
Arrays.fill(q, Byte.MIN_VALUE);
assertEquals(scalarIpByteBin(q, d), ipByteBinFunc.applyAsLong(q, d));
}
}
static int scalarIpByteBin(byte[] q, byte[] d) {
int res = 0;
for (int i = 0; i < B_QUERY; i++) {
res += (popcount(q, i * d.length, d, d.length) << i);
}
return res;
}
static int scalarBitAnd(byte[] a, byte[] b) {
int res = 0;
for (int i = 0; i < a.length; i++) {
res += Integer.bitCount((a[i] & b[i]) & 0xFF);
}
return res;
}
public static int popcount(byte[] a, int aOffset, byte[] b, int length) {
int res = 0;
for (int j = 0; j < length; j++) {
int value = (a[aOffset + j] & b[j]) & 0xFF;
for (int k = 0; k < Byte.SIZE; k++) {
if ((value & (1 << k)) != 0) {
++res;
}
}
}
return res;
}
// -- indexOf
static final Class<IndexOutOfBoundsException> IOOBE = IndexOutOfBoundsException.class;
public void testIndexOfBounds() {
int iterations = atLeast(50);
for (int i = 0; i < iterations; i++) {
int size = random().nextInt(2, 5000);
var bytes = new byte[size];
expectThrows(IOOBE, () -> ESVectorUtil.indexOf(bytes, 0, bytes.length + 1, (byte) 0x0A));
expectThrows(IOOBE, () -> ESVectorUtil.indexOf(bytes, 1, bytes.length, (byte) 0x0A));
expectThrows(IOOBE, () -> ESVectorUtil.indexOf(bytes, bytes.length, 1, (byte) 0x0A));
expectThrows(IOOBE, () -> ESVectorUtil.indexOf(bytes, bytes.length - 1, 2, (byte) 0x0A));
expectThrows(IOOBE, () -> ESVectorUtil.indexOf(bytes, randomIntBetween(2, size), bytes.length, (byte) 0x0A));
}
}
public void testIndexOfSimple() {
int iterations = atLeast(50);
for (int i = 0; i < iterations; i++) {
int size = random().nextInt(2, 5000);
var bytes = new byte[size];
byte marker = (byte) 0x0A;
int markerIdx = randomIntBetween(0, bytes.length - 1);
bytes[markerIdx] = marker;
assertEquals(markerIdx, ESVectorUtil.indexOf(bytes, 0, bytes.length, marker));
assertEquals(markerIdx, defaultedProvider.getVectorUtilSupport().indexOf(bytes, 0, bytes.length, marker));
assertEquals(markerIdx, defOrPanamaProvider.getVectorUtilSupport().indexOf(bytes, 0, bytes.length, marker));
bytes = new byte[size];
bytes[bytes.length - 1] = marker;
assertEquals(bytes.length - 1, ESVectorUtil.indexOf(bytes, 0, bytes.length, marker));
assertEquals(bytes.length - 1, defaultedProvider.getVectorUtilSupport().indexOf(bytes, 0, bytes.length, marker));
assertEquals(bytes.length - 1, defOrPanamaProvider.getVectorUtilSupport().indexOf(bytes, 0, bytes.length, marker));
assertEquals(bytes.length - 2, ESVectorUtil.indexOf(bytes, 1, bytes.length - 1, marker));
assertEquals(bytes.length - 2, defaultedProvider.getVectorUtilSupport().indexOf(bytes, 1, bytes.length - 1, marker));
assertEquals(bytes.length - 2, defOrPanamaProvider.getVectorUtilSupport().indexOf(bytes, 1, bytes.length - 1, marker));
// not found
assertEquals(-1, ESVectorUtil.indexOf(bytes, 0, bytes.length - 1, marker));
assertEquals(-1, defaultedProvider.getVectorUtilSupport().indexOf(bytes, 0, bytes.length - 1, marker));
assertEquals(-1, defOrPanamaProvider.getVectorUtilSupport().indexOf(bytes, 0, bytes.length - 1, marker));
bytes = new byte[size];
bytes[0] = marker;
assertEquals(0, ESVectorUtil.indexOf(bytes, 0, bytes.length, marker));
assertEquals(0, defaultedProvider.getVectorUtilSupport().indexOf(bytes, 0, bytes.length, marker));
assertEquals(0, defOrPanamaProvider.getVectorUtilSupport().indexOf(bytes, 0, bytes.length, marker));
// not found
assertEquals(-1, ESVectorUtil.indexOf(bytes, 1, bytes.length - 1, marker));
assertEquals(-1, defaultedProvider.getVectorUtilSupport().indexOf(bytes, 1, bytes.length - 1, marker));
assertEquals(-1, defOrPanamaProvider.getVectorUtilSupport().indexOf(bytes, 1, bytes.length - 1, marker));
}
}
public void testIndexOfRandom() {
int iterations = atLeast(50);
for (int i = 0; i < iterations; i++) {
int size = random().nextInt(2, 5000);
var bytes = new byte[size];
random().nextBytes(bytes);
byte marker = randomByte();
int markerIdx = randomIntBetween(0, bytes.length - 1);
bytes[markerIdx] = marker;
final int offset = randomIntBetween(0, bytes.length - 2);
final int length = randomIntBetween(0, bytes.length - offset);
final int expectedIdx = scalarIndexOf(bytes, offset, length, marker);
assertEquals(expectedIdx, ESVectorUtil.indexOf(bytes, offset, length, marker));
assertEquals(expectedIdx, defaultedProvider.getVectorUtilSupport().indexOf(bytes, offset, length, marker));
assertEquals(expectedIdx, defOrPanamaProvider.getVectorUtilSupport().indexOf(bytes, offset, length, marker));
}
}
static int scalarIndexOf(byte[] bytes, final int offset, final int length, final byte marker) {
final int end = offset + length;
for (int i = offset; i < end; i++) {
if (bytes[i] == marker) {
return i - offset;
}
}
return -1;
}
private static void packAsBinaryLegacy(int[] vector, byte[] packed) {
for (int i = 0; i < vector.length;) {
byte result = 0;
for (int j = 7; j >= 0 && i < vector.length; j--) {
assert vector[i] == 0 || vector[i] == 1;
result |= (byte) ((vector[i] & 1) << j);
++i;
}
int index = ((i + 7) / 8) - 1;
assert index < packed.length;
packed[index] = result;
}
}
private static void transposeHalfByteLegacy(int[] q, byte[] quantQueryByte) {
for (int i = 0; i < q.length;) {
assert q[i] >= 0 && q[i] <= 15;
int lowerByte = 0;
int lowerMiddleByte = 0;
int upperMiddleByte = 0;
int upperByte = 0;
for (int j = 7; j >= 0 && i < q.length; j--) {
lowerByte |= (q[i] & 1) << j;
lowerMiddleByte |= ((q[i] >> 1) & 1) << j;
upperMiddleByte |= ((q[i] >> 2) & 1) << j;
upperByte |= ((q[i] >> 3) & 1) << j;
i++;
}
int index = ((i + 7) / 8) - 1;
quantQueryByte[index] = (byte) lowerByte;
quantQueryByte[index + quantQueryByte.length / 4] = (byte) lowerMiddleByte;
quantQueryByte[index + quantQueryByte.length / 2] = (byte) upperMiddleByte;
quantQueryByte[index + 3 * quantQueryByte.length / 4] = (byte) upperByte;
}
}
}
| ESVectorUtilTests |
java | apache__camel | components/camel-huawei/camel-huaweicloud-obs/src/test/java/org/apache/camel/component/huaweicloud/obs/DownloadMoveExistsFunctionalTest.java | {
"start": 1553,
"end": 4350
} | class ____ extends CamelTestSupport {
private static final String ACCESS_KEY = "replace_this_with_access_key";
private static final String SECRET_KEY = "replace_this_with_secret_key";
private static final String REGION = "replace_this_with_region";
private static final String BUCKET_NAME = "replace_this_with_bucket_name";
private static final String EXISTING_DESTINATION_BUCKET = "replace_this_with_destination_bucket";
private static final boolean INCLUDE_FOLDERS = true;
private static final int NUMBER_TO_CONSUME = 10;
@BindToRegistry("serviceKeys")
ServiceKeys serviceKeys = new ServiceKeys(ACCESS_KEY, SECRET_KEY);
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("hwcloud-obs:?" +
"serviceKeys=#serviceKeys" +
"®ion=" + REGION +
"&bucketName=" + BUCKET_NAME +
"&destinationBucket=" + EXISTING_DESTINATION_BUCKET +
"&ignoreSslVerification=true" +
"&maxMessagesPerPoll=10" +
"&includeFolders=" + INCLUDE_FOLDERS +
"&deleteAfterRead=false" +
"&moveAfterRead=true")
.log("Download objects successful")
.to("log:LOG?showAll=true")
.to("mock:download_objects_result");
}
};
}
/**
* The following test cases should be manually enabled to perform test against the actual HuaweiCloud OBS server
* with real user credentials. To perform this test, manually comment out the @Ignore annotation and enter relevant
* service parameters in the placeholders above (static variables of this test class)
*
* @throws Exception
*/
@Disabled("Manually enable this once you configure the parameters in the placeholders above")
@Test
public void testListBuckets() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:download_objects_result");
mock.expectedMinimumMessageCount(NUMBER_TO_CONSUME);
mock.assertIsSatisfied();
List<Exchange> exchanges = mock.getExchanges();
assertTrue(exchanges.size() >= NUMBER_TO_CONSUME);
for (Exchange exchange : exchanges) {
assertTrue(exchange.getIn().getHeader(OBSHeaders.OBJECT_KEY, String.class).length() > 0);
if (exchange.getIn().getHeader(Exchange.CONTENT_LENGTH, Integer.class) > 0) {
assertNotNull(exchange.getIn().getBody(String.class));
assertTrue(exchange.getIn().getBody(String.class).length() > 0);
}
}
}
}
| DownloadMoveExistsFunctionalTest |
java | quarkusio__quarkus | extensions/amazon-lambda/common-deployment/src/test/java/io/quarkus/it/UtilTest.java | {
"start": 164,
"end": 752
} | class ____ {
@Test
public void testStringUtil() throws Exception {
Assertions.assertEquals(LambdaUtil.artifactToLambda("foo.bar-1.0-SNAPSHOT"), "FooBar");
Assertions.assertEquals(LambdaUtil.artifactToLambda("foo..bar--1..0-SNAPSHOT"), "FooBar");
Assertions.assertEquals(LambdaUtil.artifactToLambda("lambdaxray-1.0-SNAPSHOT"), "Lambdaxray");
Assertions.assertEquals(LambdaUtil.artifactToLambda("lambdaXray-1.0-SNAPSHOT"), "Lambdaxray");
Assertions.assertEquals(LambdaUtil.artifactToLambda("quarkus-1.0-rulez"), "QuarkusRulez");
}
}
| UtilTest |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng8469InterpolationPrecendenceTest.java | {
"start": 1040,
"end": 1933
} | class ____ extends AbstractMavenIntegrationTestCase {
/**
* Verify project is buildable.
*/
@Test
void testIt() throws Exception {
Path basedir = extractResources("/mng-8469").getAbsoluteFile().toPath();
Verifier verifier = newVerifier(basedir.toString());
verifier.addCliArgument("help:effective-pom");
verifier.execute();
verifier.verifyErrorFreeLog();
// 4.0.0-rc-2 fails as
// [ERROR] Some problems were encountered while processing the POMs
// [ERROR] The build could not read 1 project -> [Help 1]
// [ERROR]
// [ERROR] The project org.apache.maven.its.mng8469:test:1.0 (...pom.xml) has 1 error
// [ERROR] recursive variable reference: scm.connection
verifier.verifyTextInLog("<connection>foobar</connection>");
}
}
| MavenITmng8469InterpolationPrecendenceTest |
java | netty__netty | microbench/src/main/java/io/netty/handler/codec/http/HttpPostDecoderBenchmark.java | {
"start": 1634,
"end": 3130
} | class ____ extends AbstractMicrobenchmark {
@Param({ "false", "true" })
public boolean direct;
private ByteBuf buf;
private HttpRequest request;
@Setup()
public void setUp() {
request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/post");
request.headers().add(HttpHeaderNames.CONTENT_TYPE, "application/x-www-form-urlencoded");
buf = direct ? Unpooled.directBuffer() : Unpooled.buffer();
for (int i = 0; i < 100; i++) {
if (i != 0) {
buf.writeByte('&');
}
ByteBufUtil.writeAscii(buf, "form-field-" + i);
buf.writeByte('=');
ByteBufUtil.writeAscii(buf, randomString());
}
}
private static CharSequence randomString() {
Random rng = ThreadLocalRandom.current();
int len = 4 + rng.nextInt(110);
StringBuilder sb = new StringBuilder(len);
for (int i = 0; i < len; i++) {
String chars = "_-0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJLKMNOPQRSTUVWXYZ";
sb.append(chars.charAt(rng.nextInt(chars.length())));
}
return sb;
}
@Benchmark
public List<InterfaceHttpData> decode() {
HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(request);
DefaultLastHttpContent content = new DefaultLastHttpContent(buf.duplicate());
decoder.offer(content);
return decoder.getBodyHttpDatas();
}
}
| HttpPostDecoderBenchmark |
java | grpc__grpc-java | xds/src/test/java/io/grpc/xds/RingHashLoadBalancerTest.java | {
"start": 3954,
"end": 56398
} | class ____ {
private static final String AUTHORITY = "foo.googleapis.com";
private static final String CUSTOM_REQUEST_HASH_HEADER = "custom-request-hash-header";
private static final Metadata.Key<String> CUSTOM_METADATA_KEY =
Metadata.Key.of(CUSTOM_REQUEST_HASH_HEADER, Metadata.ASCII_STRING_MARSHALLER);
private static final Attributes.Key<String> CUSTOM_KEY = Attributes.Key.create("custom-key");
private static final ConnectivityStateInfo CSI_CONNECTING =
ConnectivityStateInfo.forNonError(CONNECTING);
public static final ConnectivityStateInfo CSI_READY = ConnectivityStateInfo.forNonError(READY);
@Rule
public final MockitoRule mocks = MockitoJUnit.rule();
private final SynchronizationContext syncContext = new SynchronizationContext(
new UncaughtExceptionHandler() {
@Override
public void uncaughtException(Thread t, Throwable e) {
throw new AssertionError(e);
}
});
private final Map<List<EquivalentAddressGroup>, Subchannel> subchannels = new HashMap<>();
private final Deque<Subchannel> connectionRequestedQueue = new ArrayDeque<>();
private final XxHash64 hashFunc = XxHash64.INSTANCE;
private final TestHelper testHelperInst = new TestHelper();
private final Helper helper = mock(Helper.class, delegatesTo(testHelperInst));
@Captor
private ArgumentCaptor<SubchannelPicker> pickerCaptor;
private RingHashLoadBalancer loadBalancer;
private boolean defaultNewPickFirst = PickFirstLoadBalancerProvider.isEnabledNewPickFirst();
@Before
public void setUp() {
loadBalancer = new RingHashLoadBalancer(helper);
// Consume calls not relevant for tests that would otherwise fail verifyNoMoreInteractions
verify(helper).getAuthority();
verify(helper).getSynchronizationContext();
}
@After
public void tearDown() {
PickFirstLoadBalancerProviderAccessor.setEnableNewPickFirst(defaultNewPickFirst);
loadBalancer.shutdown();
for (Subchannel subchannel : subchannels.values()) {
verify(subchannel).shutdown();
}
connectionRequestedQueue.clear();
}
@Test
public void subchannelLazyConnectUntilPicked() {
RingHashConfig config = new RingHashConfig(10, 100, "");
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1); // one server
Status addressesAcceptanceStatus = loadBalancer.acceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(servers).setLoadBalancingPolicyConfig(config).build());
assertThat(addressesAcceptanceStatus.isOk()).isTrue();
verify(helper).updateBalancingState(eq(IDLE), pickerCaptor.capture());
assertThat(subchannels.size()).isEqualTo(0);
// Picking subchannel triggers connection.
PickSubchannelArgs args = getDefaultPickSubchannelArgs(hashFunc.hashVoid());
PickResult result = pickerCaptor.getValue().pickSubchannel(args);
assertThat(result.getStatus().isOk()).isTrue();
assertThat(result.getSubchannel()).isNull();
Subchannel subchannel = Iterables.getOnlyElement(subchannels.values());
int expectedTimes = PickFirstLoadBalancerProvider.isEnabledNewPickFirst()
&& !PickFirstLoadBalancerProvider.isEnabledHappyEyeballs() ? 1 : 2;
verify(subchannel, times(expectedTimes)).requestConnection();
verify(helper).updateBalancingState(eq(CONNECTING), any(SubchannelPicker.class));
verify(helper).createSubchannel(any(CreateSubchannelArgs.class));
deliverSubchannelState(subchannel, CSI_CONNECTING);
int expectedCount = PickFirstLoadBalancerProvider.isEnabledNewPickFirst() ? 1 : 2;
verify(helper, times(expectedCount)).updateBalancingState(eq(CONNECTING), any());
// Subchannel becomes ready, triggers pick again.
deliverSubchannelState(subchannel, CSI_READY);
verify(helper).updateBalancingState(eq(READY), pickerCaptor.capture());
result = pickerCaptor.getValue().pickSubchannel(args);
assertThat(result.getSubchannel()).isSameInstanceAs(subchannel);
AbstractTestHelper.verifyNoMoreMeaningfulInteractions(helper);
}
@Test
public void subchannelNotAutoReconnectAfterReenteringIdle() {
RingHashConfig config = new RingHashConfig(10, 100, "");
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1); // one server
Status addressesAcceptanceStatus = loadBalancer.acceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(servers).setLoadBalancingPolicyConfig(config).build());
assertThat(addressesAcceptanceStatus.isOk()).isTrue();
verify(helper).updateBalancingState(eq(IDLE), pickerCaptor.capture());
assertThat(subchannels).isEmpty();
// Picking subchannel triggers connection.
PickSubchannelArgs args = getDefaultPickSubchannelArgs(hashFunc.hashVoid());
pickerCaptor.getValue().pickSubchannel(args);
Subchannel subchannel = subchannels.get(Collections.singletonList(servers.get(0)));
InOrder inOrder = Mockito.inOrder(helper, subchannel);
int expectedTimes = PickFirstLoadBalancerProvider.isEnabledHappyEyeballs()
|| !PickFirstLoadBalancerProvider.isEnabledNewPickFirst() ? 2 : 1;
inOrder.verify(subchannel, times(expectedTimes)).requestConnection();
deliverSubchannelState(subchannel, CSI_READY);
inOrder.verify(helper).updateBalancingState(eq(READY), any(SubchannelPicker.class));
deliverSubchannelState(subchannel, ConnectivityStateInfo.forNonError(IDLE));
inOrder.verify(helper).updateBalancingState(eq(IDLE), pickerCaptor.capture());
inOrder.verify(subchannel, never()).requestConnection();
// Picking again triggers reconnection.
pickerCaptor.getValue().pickSubchannel(args);
inOrder.verify(subchannel).requestConnection();
}
@Test
public void aggregateSubchannelStates_connectingReadyIdleFailure() {
RingHashConfig config = new RingHashConfig(10, 100, "");
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1, 1);
InOrder inOrder = Mockito.inOrder(helper);
initializeLbSubchannels(config, servers);
// one in CONNECTING, one in IDLE
deliverSubchannelState(getSubchannel(servers, 0), CSI_CONNECTING);
inOrder.verify(helper).updateBalancingState(eq(CONNECTING), any(SubchannelPicker.class));
verifyConnection(0);
// two in CONNECTING
deliverSubchannelState(getSubchannel(servers, 1), CSI_CONNECTING);
inOrder.verify(helper).updateBalancingState(eq(CONNECTING), any(SubchannelPicker.class));
verifyConnection(0);
// one in CONNECTING, one in READY
deliverSubchannelState(getSubchannel(servers, 1), CSI_READY);
inOrder.verify(helper).updateBalancingState(eq(READY), any(SubchannelPicker.class));
verifyConnection(0);
// one in TRANSIENT_FAILURE, one in READY
deliverSubchannelState(
getSubchannel(servers, 0),
ConnectivityStateInfo.forTransientFailure(
Status.UNKNOWN.withDescription("unknown failure")));
if (PickFirstLoadBalancerProvider.isEnabledNewPickFirst()) {
inOrder.verify(helper).updateBalancingState(eq(READY), any());
} else {
inOrder.verify(helper).refreshNameResolution();
inOrder.verify(helper).updateBalancingState(eq(READY), any());
}
verifyConnection(0);
// one in TRANSIENT_FAILURE, one in IDLE
deliverSubchannelState(
getSubchannel(servers, 1),
ConnectivityStateInfo.forNonError(IDLE));
if (PickFirstLoadBalancerProvider.isEnabledNewPickFirst()) {
inOrder.verify(helper).updateBalancingState(eq(CONNECTING), any());
} else {
inOrder.verify(helper).refreshNameResolution();
inOrder.verify(helper).updateBalancingState(eq(CONNECTING), any());
}
verifyConnection(0);
}
private void verifyConnection(int times) {
for (int i = 0; i < times; i++) {
Subchannel connectOnce = connectionRequestedQueue.poll();
assertWithMessage("Expected %s new connections, but found %s", times, i)
.that(connectOnce).isNotNull();
clearInvocations(connectOnce);
}
assertThat(connectionRequestedQueue.poll()).isNull();
}
@Test
public void aggregateSubchannelStates_allSubchannelsInTransientFailure() {
RingHashConfig config = new RingHashConfig(10, 100, "");
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1, 1, 1, 1);
List<Subchannel> subChannelList = initializeLbSubchannels(config, servers, STAY_IN_CONNECTING);
// reset inOrder to include all the childLBs now that they have been created
clearInvocations(helper);
InOrder inOrder = Mockito.inOrder(helper,
subChannelList.get(0), subChannelList.get(1), subChannelList.get(2), subChannelList.get(3));
// one in TRANSIENT_FAILURE, three in CONNECTING
deliverNotFound(subChannelList, 0);
refreshInvokedButNotUpdateBS(inOrder, TRANSIENT_FAILURE);
// two in TRANSIENT_FAILURE, two in CONNECTING
deliverNotFound(subChannelList, 1);
refreshInvokedAndUpdateBS(inOrder, TRANSIENT_FAILURE);
// All 4 in TF switch to TF
deliverNotFound(subChannelList, 2);
refreshInvokedAndUpdateBS(inOrder, TRANSIENT_FAILURE);
deliverNotFound(subChannelList, 3);
refreshInvokedAndUpdateBS(inOrder, TRANSIENT_FAILURE);
// reset subchannel to CONNECTING - shouldn't change anything since PF hides the state change
deliverSubchannelState(subChannelList.get(2), CSI_CONNECTING);
inOrder.verify(helper, never())
.updateBalancingState(eq(TRANSIENT_FAILURE), any(SubchannelPicker.class));
inOrder.verify(subChannelList.get(2), never()).requestConnection();
// three in TRANSIENT_FAILURE, one in READY
deliverSubchannelState(subChannelList.get(2), CSI_READY);
inOrder.verify(helper).updateBalancingState(eq(READY), any(SubchannelPicker.class));
inOrder.verify(subChannelList.get(2), never()).requestConnection();
}
// Old PF and new PF reverse calling order of updateBlaancingState and refreshNameResolution
private void refreshInvokedButNotUpdateBS(InOrder inOrder, ConnectivityState state) {
inOrder.verify(helper, never()).updateBalancingState(eq(state), any(SubchannelPicker.class));
inOrder.verify(helper).refreshNameResolution();
inOrder.verify(helper, never()).updateBalancingState(eq(state), any(SubchannelPicker.class));
}
// Old PF and new PF reverse calling order of updateBlaancingState and refreshNameResolution
private void refreshInvokedAndUpdateBS(InOrder inOrder, ConnectivityState state) {
if (PickFirstLoadBalancerProvider.isEnabledNewPickFirst()) {
inOrder.verify(helper).updateBalancingState(eq(state), any());
}
inOrder.verify(helper).refreshNameResolution();
if (!PickFirstLoadBalancerProvider.isEnabledNewPickFirst()) {
inOrder.verify(helper).updateBalancingState(eq(state), any());
}
}
@Test
public void ignoreShutdownSubchannelStateChange() {
RingHashConfig config = new RingHashConfig(10, 100, "");
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1, 1, 1);
initializeLbSubchannels(config, servers);
loadBalancer.shutdown();
for (Subchannel sc : subchannels.values()) {
verify(sc).shutdown();
// When the subchannel is being shut down, a SHUTDOWN connectivity state is delivered
// back to the subchannel state listener.
deliverSubchannelState(sc, ConnectivityStateInfo.forNonError(SHUTDOWN));
}
verifyNoMoreInteractions(helper);
}
@Test
public void deterministicPickWithHostsPartiallyRemoved() {
RingHashConfig config = new RingHashConfig(10, 100, "");
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1, 1, 1, 1, 1);
initializeLbSubchannels(config, servers);
InOrder inOrder = Mockito.inOrder(helper);
// Bring all subchannels to READY so that next pick always succeeds.
for (Subchannel subchannel : subchannels.values()) {
deliverSubchannelState(subchannel, CSI_READY);
inOrder.verify(helper).updateBalancingState(eq(READY), pickerCaptor.capture());
}
// Simulate rpc hash hits one ring entry exactly for server1.
long rpcHash = hashFunc.hashAsciiString("FakeSocketAddress-server1_0");
PickSubchannelArgs args = getDefaultPickSubchannelArgs(rpcHash);
pickerCaptor.getValue().pickSubchannel(args);
PickResult result = pickerCaptor.getValue().pickSubchannel(args);
Subchannel subchannel = result.getSubchannel();
assertThat(subchannel.getAddresses()).isEqualTo(servers.get(1));
List<EquivalentAddressGroup> updatedServers = new ArrayList<>();
for (EquivalentAddressGroup addr : servers.subList(0, 2)) { // only server0 and server1 left
Attributes attr = addr.getAttributes().toBuilder().set(CUSTOM_KEY, "custom value").build();
updatedServers.add(new EquivalentAddressGroup(addr.getAddresses(), attr));
}
Subchannel subchannel0_old = getSubchannel(servers, 0);
Subchannel subchannel1_old = getSubchannel(servers, 1);
Status addressesAcceptanceStatus = loadBalancer.acceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(updatedServers).setLoadBalancingPolicyConfig(config).build());
assertThat(addressesAcceptanceStatus.isOk()).isTrue();
verify(subchannel0_old).updateAddresses(Collections.singletonList(updatedServers.get(0)));
verify(subchannel1_old).updateAddresses(Collections.singletonList(updatedServers.get(1)));
inOrder.verify(helper).updateBalancingState(eq(READY), pickerCaptor.capture());
assertThat(pickerCaptor.getValue().pickSubchannel(args).getSubchannel())
.isSameInstanceAs(subchannel);
verifyNoMoreInteractions(helper);
}
@Test
public void deterministicPickWithNewHostsAdded() {
RingHashConfig config = new RingHashConfig(10, 100, "");
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1, 1); // server0 and server1
initializeLbSubchannels(config, servers, DO_NOT_VERIFY, DO_NOT_RESET_HELPER);
InOrder inOrder = Mockito.inOrder(helper);
// Bring all subchannels to READY so that next pick always succeeds.
for (Subchannel subchannel : subchannels.values()) {
deliverSubchannelState(subchannel, CSI_READY);
inOrder.verify(helper).updateBalancingState(eq(READY), pickerCaptor.capture());
}
// Simulate rpc hash hits one ring entry exactly for server1.
long rpcHash = hashFunc.hashAsciiString("FakeSocketAddress-server1_0");
PickSubchannelArgs args = getDefaultPickSubchannelArgs(rpcHash);
pickerCaptor.getValue().pickSubchannel(args);
PickResult result = pickerCaptor.getValue().pickSubchannel(args);
Subchannel subchannel = result.getSubchannel();
assertThat(subchannel.getAddresses()).isEqualTo(servers.get(1));
servers = createWeightedServerAddrs(1, 1, 1, 1, 1); // server2, server3, server4 added
Status addressesAcceptanceStatus = loadBalancer.acceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(servers).setLoadBalancingPolicyConfig(config).build());
assertThat(addressesAcceptanceStatus.isOk()).isTrue();
assertThat(loadBalancer.getChildLbStates().size()).isEqualTo(5);
inOrder.verify(helper).updateBalancingState(eq(READY), pickerCaptor.capture());
assertThat(pickerCaptor.getValue().pickSubchannel(args).getSubchannel())
.isSameInstanceAs(subchannel);
inOrder.verifyNoMoreInteractions();
}
@Test
public void deterministicPickWithRequestHashHeader_oneHeaderValue() {
// Map each server address to exactly one ring entry.
RingHashConfig config = new RingHashConfig(3, 3, CUSTOM_REQUEST_HASH_HEADER);
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1, 1, 1);
initializeLbSubchannels(config, servers);
InOrder inOrder = Mockito.inOrder(helper);
// Bring all subchannels to READY.
for (Subchannel subchannel : subchannels.values()) {
deliverSubchannelState(subchannel, CSI_READY);
inOrder.verify(helper).updateBalancingState(eq(READY), pickerCaptor.capture());
}
// Pick subchannel with custom request hash header where the rpc hash hits server1.
Metadata headers = new Metadata();
headers.put(CUSTOM_METADATA_KEY, "FakeSocketAddress-server1_0");
PickSubchannelArgs args =
new PickSubchannelArgsImpl(
TestMethodDescriptors.voidMethod(),
headers,
CallOptions.DEFAULT,
new PickDetailsConsumer() {});
SubchannelPicker picker = pickerCaptor.getValue();
PickResult result = picker.pickSubchannel(args);
assertThat(result.getStatus().isOk()).isTrue();
assertThat(result.getSubchannel().getAddresses()).isEqualTo(servers.get(1));
}
@Test
public void deterministicPickWithRequestHashHeader_multipleHeaderValues() {
// Map each server address to exactly one ring entry.
RingHashConfig config = new RingHashConfig(3, 3, CUSTOM_REQUEST_HASH_HEADER);
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1, 1, 1);
initializeLbSubchannels(config, servers);
InOrder inOrder = Mockito.inOrder(helper);
// Bring all subchannels to READY.
for (Subchannel subchannel : subchannels.values()) {
deliverSubchannelState(subchannel, CSI_READY);
inOrder.verify(helper).updateBalancingState(eq(READY), pickerCaptor.capture());
}
// Pick subchannel with custom request hash header with multiple values for the same key where
// the rpc hash hits server1.
Metadata headers = new Metadata();
headers.put(CUSTOM_METADATA_KEY, "FakeSocketAddress-server0_0");
headers.put(CUSTOM_METADATA_KEY, "FakeSocketAddress-server1_0");
PickSubchannelArgs args =
new PickSubchannelArgsImpl(
TestMethodDescriptors.voidMethod(),
headers,
CallOptions.DEFAULT,
new PickDetailsConsumer() {});
SubchannelPicker picker = pickerCaptor.getValue();
PickResult result = picker.pickSubchannel(args);
assertThat(result.getStatus().isOk()).isTrue();
assertThat(result.getSubchannel().getAddresses()).isEqualTo(servers.get(1));
}
@Test
public void pickWithRandomHash_allSubchannelsReady() {
loadBalancer = new RingHashLoadBalancer(helper, new FakeRandom());
// Map each server address to exactly one ring entry.
RingHashConfig config = new RingHashConfig(2, 2, "dummy-random-hash");
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1, 1);
initializeLbSubchannels(config, servers);
InOrder inOrder = Mockito.inOrder(helper);
// Bring all subchannels to READY.
Map<EquivalentAddressGroup, Integer> pickCounts = new HashMap<>();
for (Subchannel subchannel : subchannels.values()) {
deliverSubchannelState(subchannel, CSI_READY);
pickCounts.put(subchannel.getAddresses(), 0);
inOrder.verify(helper).updateBalancingState(eq(READY), pickerCaptor.capture());
}
// Pick subchannel 100 times with random hash.
SubchannelPicker picker = pickerCaptor.getValue();
PickSubchannelArgs args = getDefaultPickSubchannelArgs(hashFunc.hashVoid());
for (int i = 0; i < 100; ++i) {
Subchannel pickedSubchannel = picker.pickSubchannel(args).getSubchannel();
EquivalentAddressGroup addr = pickedSubchannel.getAddresses();
pickCounts.put(addr, pickCounts.get(addr) + 1);
}
// Verify the distribution is uniform where server0 and server1 are exactly picked 50 times.
assertThat(pickCounts.get(servers.get(0))).isEqualTo(50);
assertThat(pickCounts.get(servers.get(1))).isEqualTo(50);
}
@Test
public void pickWithRandomHash_atLeastOneSubchannelConnecting() {
// Map each server address to exactly one ring entry.
RingHashConfig config = new RingHashConfig(3, 3, "dummy-random-hash");
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1, 1, 1);
initializeLbSubchannels(config, servers);
// Bring one subchannel to CONNECTING.
deliverSubchannelState(getSubChannel(servers.get(0)), CSI_CONNECTING);
verify(helper).updateBalancingState(eq(CONNECTING), pickerCaptor.capture());
// Pick subchannel with random hash does not trigger connection.
SubchannelPicker picker = pickerCaptor.getValue();
PickSubchannelArgs args = getDefaultPickSubchannelArgs(hashFunc.hashVoid());
PickResult result = picker.pickSubchannel(args);
assertThat(result.getStatus().isOk()).isTrue();
assertThat(result.getSubchannel()).isNull(); // buffer request
verifyConnection(0);
}
@Test
public void pickWithRandomHash_firstSubchannelInTransientFailure_remainingSubchannelsIdle() {
// Map each server address to exactly one ring entry.
RingHashConfig config = new RingHashConfig(3, 3, "dummy-random-hash");
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1, 1, 1);
initializeLbSubchannels(config, servers);
// Bring one subchannel to TRANSIENT_FAILURE.
deliverSubchannelUnreachable(getSubChannel(servers.get(0)));
verify(helper).updateBalancingState(eq(CONNECTING), pickerCaptor.capture());
verifyConnection(0);
// Pick subchannel with random hash does trigger connection by walking the ring
// and choosing the first (at most one) IDLE subchannel along the way.
SubchannelPicker picker = pickerCaptor.getValue();
PickSubchannelArgs args = getDefaultPickSubchannelArgs(hashFunc.hashVoid());
PickResult result = picker.pickSubchannel(args);
assertThat(result.getStatus().isOk()).isTrue();
assertThat(result.getSubchannel()).isNull(); // buffer request
verifyConnection(1);
}
private Subchannel getSubChannel(EquivalentAddressGroup eag) {
return subchannels.get(Collections.singletonList(eag));
}
@Test
public void skipFailingHosts_pickNextNonFailingHost() {
// Map each server address to exactly one ring entry.
RingHashConfig config = new RingHashConfig(3, 3, "");
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1, 1, 1);
Status addressesAcceptanceStatus =
loadBalancer.acceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(servers).setLoadBalancingPolicyConfig(config).build());
assertThat(addressesAcceptanceStatus.isOk()).isTrue();
// Create subchannel for the first address
loadBalancer.getChildLbStates().iterator().next().getCurrentPicker()
.pickSubchannel(getDefaultPickSubchannelArgs(hashFunc.hashVoid()));
verifyConnection(1);
reset(helper);
// ring:
// "FakeSocketAddress-server0_0"
// "FakeSocketAddress-server1_0"
// "FakeSocketAddress-server2_0"
long rpcHash = hashFunc.hashAsciiString("FakeSocketAddress-server0_0");
PickSubchannelArgs args = getDefaultPickSubchannelArgs(rpcHash);
// Bring down server0 to force trying server2.
deliverSubchannelState(
getSubChannel(servers.get(0)),
ConnectivityStateInfo.forTransientFailure(
Status.UNAVAILABLE.withDescription("unreachable")));
verify(helper).updateBalancingState(eq(CONNECTING), pickerCaptor.capture());
PickResult result = pickerCaptor.getValue().pickSubchannel(args);
assertThat(result.getStatus().isOk()).isTrue();
assertThat(result.getSubchannel()).isNull(); // buffer request
// verify kicked off connection to server2
int expectedTimes = PickFirstLoadBalancerProvider.isEnabledHappyEyeballs()
|| !PickFirstLoadBalancerProvider.isEnabledNewPickFirst() ? 2 : 1;
verify(getSubChannel(servers.get(1)), times(expectedTimes)).requestConnection();
assertThat(subchannels.size()).isEqualTo(2); // no excessive connection
deliverSubchannelState(getSubChannel(servers.get(1)), CSI_CONNECTING);
verify(helper, atLeast(1))
.updateBalancingState(eq(CONNECTING), pickerCaptor.capture());
result = pickerCaptor.getValue().pickSubchannel(args);
assertThat(result.getStatus().isOk()).isTrue();
assertThat(result.getSubchannel()).isNull(); // buffer request
deliverSubchannelState(getSubChannel(servers.get(1)), CSI_READY);
verify(helper).updateBalancingState(eq(READY), pickerCaptor.capture());
result = pickerCaptor.getValue().pickSubchannel(args);
assertThat(result.getStatus().isOk()).isTrue();
assertThat(result.getSubchannel().getAddresses()).isEqualTo(servers.get(1));
}
private PickSubchannelArgs getDefaultPickSubchannelArgs(long rpcHash) {
return new PickSubchannelArgsImpl(
TestMethodDescriptors.voidMethod(), new Metadata(),
CallOptions.DEFAULT.withOption(XdsNameResolver.RPC_HASH_KEY, rpcHash),
new PickDetailsConsumer() {});
}
private PickSubchannelArgs getDefaultPickSubchannelArgsForServer(int serverid) {
long rpcHash = hashFunc.hashAsciiString("FakeSocketAddress-server" + serverid + "_0");
return getDefaultPickSubchannelArgs(rpcHash);
}
@Test
public void skipFailingHosts_firstTwoHostsFailed_pickNextFirstReady() {
// Map each server address to exactly one ring entry.
RingHashConfig config = new RingHashConfig(3, 3, "");
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1, 1, 1);
initializeLbSubchannels(config, servers);
// ring:
// "FakeSocketAddress-server0_0"
// "FakeSocketAddress-server1_0"
// "FakeSocketAddress-server2_0"
long rpcHash = hashFunc.hashAsciiString("FakeSocketAddress-server1_0");
PickSubchannelArgs args = getDefaultPickSubchannelArgs(rpcHash);
// Bring down server0 and server2 to force trying server1.
deliverSubchannelState(
getSubchannel(servers, 1),
ConnectivityStateInfo.forTransientFailure(
Status.UNAVAILABLE.withDescription("unreachable")));
deliverSubchannelState(
getSubchannel(servers, 2),
ConnectivityStateInfo.forTransientFailure(
Status.PERMISSION_DENIED.withDescription("permission denied")));
verify(helper).updateBalancingState(eq(TRANSIENT_FAILURE), pickerCaptor.capture());
verifyConnection(0);
PickResult result = pickerCaptor.getValue().pickSubchannel(args); // activate last subchannel
assertThat(result.getStatus().isOk()).isTrue();
int expectedCount = PickFirstLoadBalancerProvider.isEnabledNewPickFirst() ? 0 : 1;
verifyConnection(expectedCount);
deliverSubchannelState(
getSubchannel(servers, 0),
ConnectivityStateInfo.forTransientFailure(
Status.PERMISSION_DENIED.withDescription("permission denied again")));
verify(helper, times(2)).updateBalancingState(eq(TRANSIENT_FAILURE), pickerCaptor.capture());
result = pickerCaptor.getValue().pickSubchannel(args);
assertThat(result.getStatus().isOk()).isFalse(); // fail the RPC
assertThat(result.getStatus().getCode())
.isEqualTo(Code.UNAVAILABLE); // with error status for the original server hit by hash
assertThat(result.getStatus().getDescription()).isEqualTo("unreachable");
// Now connecting to server1.
deliverSubchannelState(getSubchannel(servers, 1), CSI_CONNECTING);
reset(helper);
result = pickerCaptor.getValue().pickSubchannel(args);
assertThat(result.getStatus().isOk()).isFalse(); // fail the RPC
assertThat(result.getStatus().getCode())
.isEqualTo(Code.UNAVAILABLE); // with error status for the original server hit by hash
assertThat(result.getStatus().getDescription()).isEqualTo("unreachable");
// Simulate server1 becomes READY.
deliverSubchannelState(getSubchannel(servers, 1), CSI_READY);
verify(helper).updateBalancingState(eq(READY), pickerCaptor.capture());
SubchannelPicker picker = pickerCaptor.getValue();
result = picker.pickSubchannel(args);
assertThat(result.getStatus().isOk()).isTrue(); // succeed
assertThat(result.getSubchannel().getAddresses()).isEqualTo(servers.get(1)); // with server1
assertThat(picker.pickSubchannel(getDefaultPickSubchannelArgsForServer(0))).isEqualTo(result);
assertThat(picker.pickSubchannel(getDefaultPickSubchannelArgsForServer(2))).isEqualTo(result);
}
@Test
public void removingAddressShutdownSubchannel() {
// Map each server address to exactly one ring entry.
RingHashConfig config = new RingHashConfig(3, 3, "");
List<EquivalentAddressGroup> svs1 = createWeightedServerAddrs(1, 1, 1);
List<Subchannel> subchannels1 = initializeLbSubchannels(config, svs1, STAY_IN_CONNECTING);
List<EquivalentAddressGroup> svs2 = createWeightedServerAddrs(1, 1);
InOrder inOrder = Mockito.inOrder(helper, subchannels1.get(2));
// send LB the missing address
loadBalancer.acceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(svs2).setLoadBalancingPolicyConfig(config).build());
inOrder.verify(helper).updateBalancingState(eq(CONNECTING), any());
inOrder.verify(subchannels1.get(2)).shutdown();
}
@Test
public void allSubchannelsInTransientFailure() {
// Map each server address to exactly one ring entry.
RingHashConfig config = new RingHashConfig(3, 3, "");
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1, 1, 1);
initializeLbSubchannels(config, servers);
// Bring all subchannels to TRANSIENT_FAILURE.
for (Subchannel subchannel : subchannels.values()) {
deliverSubchannelState(subchannel, ConnectivityStateInfo.forTransientFailure(
Status.UNAVAILABLE.withDescription(
subchannel.getAddresses().getAddresses() + " unreachable")));
}
verify(helper, atLeastOnce())
.updateBalancingState(eq(TRANSIENT_FAILURE), pickerCaptor.capture());
verifyConnection(0);
// Picking subchannel triggers connection. RPC hash hits server0.
PickSubchannelArgs args = getDefaultPickSubchannelArgsForServer(0);
PickResult result = pickerCaptor.getValue().pickSubchannel(args);
assertThat(result.getStatus().isOk()).isFalse();
assertThat(result.getStatus().getCode()).isEqualTo(Code.UNAVAILABLE);
assertThat(result.getStatus().getDescription())
.isEqualTo("[FakeSocketAddress-server0] unreachable");
verifyConnection(0); // TF has already started taking care of this, pick doesn't need to
}
@Test
public void firstSubchannelIdle() {
// Map each server address to exactly one ring entry.
RingHashConfig config = new RingHashConfig(3, 3, "");
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1, 1, 1);
initializeLbSubchannels(config, servers);
// Go to TF does nothing, though PF will try to reconnect after backoff
deliverSubchannelState(getSubchannel(servers, 1),
ConnectivityStateInfo.forTransientFailure(
Status.UNAVAILABLE.withDescription("unreachable")));
verify(helper).updateBalancingState(eq(CONNECTING), pickerCaptor.capture());
verifyConnection(0);
// Picking subchannel triggers connection. RPC hash hits server0.
PickSubchannelArgs args = getDefaultPickSubchannelArgs(hashFunc.hashVoid());
PickResult result = pickerCaptor.getValue().pickSubchannel(args);
assertThat(result.getStatus().isOk()).isTrue();
verifyConnection(1);
}
@Test
public void firstSubchannelConnecting() {
// Map each server address to exactly one ring entry.
RingHashConfig config = new RingHashConfig(3, 3, "");
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1, 1, 1);
initializeLbSubchannels(config, servers);
deliverSubchannelState(getSubchannel(servers, 0), CSI_CONNECTING);
deliverSubchannelState(getSubchannel(servers, 1), CSI_CONNECTING);
verify(helper, times(2)).updateBalancingState(eq(CONNECTING), pickerCaptor.capture());
// Picking subchannel triggers connection.
PickSubchannelArgs args = getDefaultPickSubchannelArgs(hashFunc.hashVoid());
PickResult result = pickerCaptor.getValue().pickSubchannel(args);
assertThat(result.getStatus().isOk()).isTrue();
verify(getSubchannel(servers, 0), never()).requestConnection();
verify(getSubchannel(servers, 1), never()).requestConnection();
verify(getSubchannel(servers, 2), never()).requestConnection();
}
private Subchannel getSubchannel(List<EquivalentAddressGroup> servers, int serverIndex) {
return subchannels.get(Collections.singletonList(servers.get(serverIndex)));
}
@Test
public void firstSubchannelFailure() {
// Map each server address to exactly one ring entry.
RingHashConfig config = new RingHashConfig(3, 3, "");
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1, 1, 1);
List<Subchannel> subchannelList =
initializeLbSubchannels(config, servers, RESET_SUBCHANNEL_MOCKS);
// ring:
// "FakeSocketAddress-server1_0"
// "FakeSocketAddress-server0_0"
// "FakeSocketAddress-server2_0"
deliverSubchannelState(subchannelList.get(0),
ConnectivityStateInfo.forTransientFailure(
Status.UNAVAILABLE.withDescription("unreachable")));
verify(helper).updateBalancingState(eq(CONNECTING), pickerCaptor.capture());
verifyConnection(0);
// Per GRFC A61 Picking subchannel should no longer request connections that were failing
PickSubchannelArgs args = getDefaultPickSubchannelArgs(hashFunc.hashVoid());
SubchannelPicker picker1 = pickerCaptor.getValue();
PickResult result = picker1.pickSubchannel(args);
assertThat(result.getStatus().isOk()).isTrue();
assertThat(result.getSubchannel()).isNull();
verify(subchannelList.get(0), never()).requestConnection(); // In TF
verify(subchannelList.get(1)).requestConnection();
verify(subchannelList.get(2), never()).requestConnection(); // Not one of the first 2
}
@Test
public void secondSubchannelConnecting() {
// Map each server address to exactly one ring entry.
RingHashConfig config = new RingHashConfig(3, 3, "");
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1, 1, 1);
initializeLbSubchannels(config, servers);
// ring:
// "FakeSocketAddress-server1_0"
// "FakeSocketAddress-server0_0"
// "FakeSocketAddress-server2_0"
Subchannel firstSubchannel = getSubchannel(servers, 0);
deliverSubchannelUnreachable(firstSubchannel);
verifyConnection(0);
deliverSubchannelState(getSubchannel(servers, 2), CSI_CONNECTING);
verify(helper, times(2)).updateBalancingState(eq(CONNECTING), pickerCaptor.capture());
verifyConnection(0);
// Picking subchannel when idle triggers connection.
deliverSubchannelState(getSubchannel(servers, 2),
ConnectivityStateInfo.forNonError(IDLE));
verifyConnection(0);
PickSubchannelArgs args = getDefaultPickSubchannelArgs(hashFunc.hashVoid());
PickResult result = pickerCaptor.getValue().pickSubchannel(args);
assertThat(result.getStatus().isOk()).isTrue();
verifyConnection(1);
}
@Test
public void secondSubchannelFailure() {
// Map each server address to exactly one ring entry.
RingHashConfig config = new RingHashConfig(3, 3, "");
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1, 1, 1);
initializeLbSubchannels(config, servers);
// ring:
// "FakeSocketAddress-server1_0"
// "FakeSocketAddress-server0_0"
// "FakeSocketAddress-server2_0"
Subchannel firstSubchannel = getSubchannel(servers, 0);
deliverSubchannelUnreachable(firstSubchannel);
deliverSubchannelUnreachable(getSubchannel(servers, 2));
verify(helper).updateBalancingState(eq(TRANSIENT_FAILURE), pickerCaptor.capture());
verifyConnection(0);
// Picking subchannel triggers connection.
PickSubchannelArgs args = getDefaultPickSubchannelArgs(hashFunc.hashVoid());
PickResult result = pickerCaptor.getValue().pickSubchannel(args);
assertThat(result.getStatus().isOk()).isTrue();
verify(getSubchannel(servers, 1)).requestConnection();
verifyConnection(1);
}
@Test
public void thirdSubchannelConnecting() {
// Map each server address to exactly one ring entry.
RingHashConfig config = new RingHashConfig(3, 3, "");
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1, 1, 1);
initializeLbSubchannels(config, servers);
// ring:
// "FakeSocketAddress-server1_0"
// "FakeSocketAddress-server0_0"
// "FakeSocketAddress-server2_0"
Subchannel firstSubchannel = getSubchannel(servers, 0);
deliverSubchannelUnreachable(firstSubchannel);
deliverSubchannelUnreachable(getSubchannel(servers, 2));
deliverSubchannelState(getSubchannel(servers, 1), CSI_CONNECTING);
verify(helper, atLeastOnce())
.updateBalancingState(eq(TRANSIENT_FAILURE), pickerCaptor.capture());
verifyConnection(0);
// Picking subchannel should not trigger connection per gRFC A61.
PickSubchannelArgs args = getDefaultPickSubchannelArgs(hashFunc.hashVoid());
PickResult result = pickerCaptor.getValue().pickSubchannel(args);
assertThat(result.getStatus().isOk()).isTrue();
verifyConnection(0);
}
@Test
public void stickyTransientFailure() {
// Map each server address to exactly one ring entry.
RingHashConfig config = new RingHashConfig(3, 3, "");
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1, 1, 1);
initializeLbSubchannels(config, servers);
// Bring one subchannel to TRANSIENT_FAILURE.
Subchannel firstSubchannel = getSubchannel(servers, 0);
deliverSubchannelUnreachable(firstSubchannel);
verify(helper).updateBalancingState(eq(CONNECTING), pickerCaptor.capture());
verifyConnection(0);
reset(helper);
deliverSubchannelState(firstSubchannel, ConnectivityStateInfo.forNonError(IDLE));
// Should not have called updateBalancingState on the helper again because PickFirst is
// shielding the higher level from the state change.
verify(helper, never()).updateBalancingState(any(), any());
verifyConnection(PickFirstLoadBalancerProvider.isEnabledNewPickFirst() ? 0 : 1);
// Picking subchannel triggers connection on second address. RPC hash hits server0.
PickSubchannelArgs args = getDefaultPickSubchannelArgs(hashFunc.hashVoid());
PickResult result = pickerCaptor.getValue().pickSubchannel(args);
assertThat(result.getStatus().isOk()).isTrue();
verify(getSubchannel(servers, 1)).requestConnection();
verify(getSubchannel(servers, 2), never()).requestConnection();
}
@Test
public void largeWeights() {
RingHashConfig config = new RingHashConfig(10000, 100000, ""); // large ring
List<EquivalentAddressGroup> servers =
createWeightedServerAddrs(Integer.MAX_VALUE, 10, 100); // MAX:10:100
initializeLbSubchannels(config, servers);
// Try value between max signed and max unsigned int
servers = createWeightedServerAddrs(Integer.MAX_VALUE + 100L, 100); // (MAX+100):100
Status addressesAcceptanceStatus = loadBalancer.acceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(servers).setLoadBalancingPolicyConfig(config).build());
assertThat(addressesAcceptanceStatus.isOk()).isTrue();
// Try a negative value
servers = createWeightedServerAddrs(10, -20, 100); // 10:-20:100
addressesAcceptanceStatus = loadBalancer.acceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(servers).setLoadBalancingPolicyConfig(config).build());
assertThat(addressesAcceptanceStatus.isOk()).isFalse();
// Try an individual value larger than max unsigned int
long maxUnsigned = UnsignedInteger.MAX_VALUE.longValue();
servers = createWeightedServerAddrs(maxUnsigned + 10, 10, 100); // uMAX+10:10:100
addressesAcceptanceStatus = loadBalancer.acceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(servers).setLoadBalancingPolicyConfig(config).build());
assertThat(addressesAcceptanceStatus.isOk()).isFalse();
// Try a sum of values larger than max unsigned int
servers = createWeightedServerAddrs(Integer.MAX_VALUE, Integer.MAX_VALUE, 100); // MAX:MAX:100
addressesAcceptanceStatus = loadBalancer.acceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(servers).setLoadBalancingPolicyConfig(config).build());
assertThat(addressesAcceptanceStatus.isOk()).isFalse();
}
@Test
public void hostSelectionProportionalToWeights() {
RingHashConfig config = new RingHashConfig(10000, 100000, ""); // large ring
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1, 10, 100); // 1:10:100
initializeLbSubchannels(config, servers);
// Bring all subchannels to READY.
Map<EquivalentAddressGroup, Integer> pickCounts = new HashMap<>();
for (Subchannel subchannel : subchannels.values()) {
deliverSubchannelState(subchannel, CSI_READY);
pickCounts.put(subchannel.getAddresses(), 0);
}
verify(helper, times(3)).updateBalancingState(eq(READY), pickerCaptor.capture());
SubchannelPicker picker = pickerCaptor.getValue();
for (int i = 0; i < 10000; i++) {
long hash = hashFunc.hashInt(i);
PickSubchannelArgs args = getDefaultPickSubchannelArgs(hash);
Subchannel pickedSubchannel = picker.pickSubchannel(args).getSubchannel();
EquivalentAddressGroup addr = pickedSubchannel.getAddresses();
pickCounts.put(addr, pickCounts.get(addr) + 1);
}
// Actual distribution: server0 = 104, server1 = 808, server2 = 9088
double ratio01 = (double) pickCounts.get(servers.get(0)) / pickCounts.get(servers.get(1));
double ratio12 = (double) pickCounts.get(servers.get(1)) / pickCounts.get(servers.get(2));
assertThat(ratio01).isWithin(0.03).of((double) 1 / 10);
assertThat(ratio12).isWithin(0.03).of((double) 10 / 100);
}
@Test
public void nameResolutionErrorWithNoActiveSubchannels() {
Status error = Status.UNAVAILABLE.withDescription("not reachable");
loadBalancer.handleNameResolutionError(error);
verify(helper).updateBalancingState(eq(TRANSIENT_FAILURE), pickerCaptor.capture());
PickResult result = pickerCaptor.getValue().pickSubchannel(mock(PickSubchannelArgs.class));
assertThat(result.getStatus().getCode()).isEqualTo(Code.UNAVAILABLE);
assertThat(result.getStatus().getDescription()).isEqualTo("not reachable");
assertThat(result.getSubchannel()).isNull();
verifyNoMoreInteractions(helper);
}
@Test
public void nameResolutionErrorWithActiveSubchannels() {
RingHashConfig config = new RingHashConfig(10, 100, "");
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1);
initializeLbSubchannels(config, servers, DO_NOT_VERIFY, DO_NOT_RESET_HELPER);
verify(helper).createSubchannel(any(CreateSubchannelArgs.class));
verify(helper, times(2)).updateBalancingState(eq(IDLE), pickerCaptor.capture());
// Picking subchannel triggers subchannel creation and connection.
PickSubchannelArgs args = getDefaultPickSubchannelArgs(hashFunc.hashVoid());
pickerCaptor.getValue().pickSubchannel(args);
verify(helper, never()).updateBalancingState(eq(READY), any(SubchannelPicker.class));
deliverSubchannelState(
Iterables.getOnlyElement(subchannels.values()), CSI_READY);
verify(helper).updateBalancingState(eq(READY), any(SubchannelPicker.class));
reset(helper);
loadBalancer.handleNameResolutionError(Status.NOT_FOUND.withDescription("target not found"));
verifyNoMoreInteractions(helper);
}
@Test
public void duplicateAddresses() {
RingHashConfig config = new RingHashConfig(10, 100, "");
List<EquivalentAddressGroup> servers = createRepeatedServerAddrs(1, 2, 3);
initializeLbSubchannels(config, servers, DO_NOT_VERIFY);
verify(helper).updateBalancingState(eq(TRANSIENT_FAILURE), pickerCaptor.capture());
PickSubchannelArgs args = getDefaultPickSubchannelArgs(hashFunc.hashVoid());
PickResult result = pickerCaptor.getValue().pickSubchannel(args);
assertThat(result.getStatus().isOk()).isFalse(); // fail the RPC
assertThat(result.getStatus().getCode())
.isEqualTo(Code.UNAVAILABLE); // with error status for the original server hit by hash
String description = result.getStatus().getDescription();
assertThat(description).startsWith(
"Ring hash lb error: EDS resolution was successful, but there were duplicate addresses: ");
assertThat(description).contains("Address: FakeSocketAddress-server1, count: 2");
assertThat(description).contains("Address: FakeSocketAddress-server2, count: 3");
}
@Test
public void subchannelHealthObserved() throws Exception {
// Only the new PF policy observes the new separate listener for health
PickFirstLoadBalancerProviderAccessor.setEnableNewPickFirst(true);
// PickFirst does most of this work. If the test fails, check IS_PETIOLE_POLICY
Map<Subchannel, LoadBalancer.SubchannelStateListener> healthListeners = new HashMap<>();
loadBalancer = new RingHashLoadBalancer(new ForwardingLoadBalancerHelper() {
@Override
public Subchannel createSubchannel(CreateSubchannelArgs args) {
Subchannel subchannel = super.createSubchannel(args.toBuilder()
.setAttributes(args.getAttributes().toBuilder()
.set(LoadBalancer.HAS_HEALTH_PRODUCER_LISTENER_KEY, true)
.build())
.build());
healthListeners.put(
subchannel, args.getOption(LoadBalancer.HEALTH_CONSUMER_LISTENER_ARG_KEY));
return subchannel;
}
@Override
protected Helper delegate() {
return helper;
}
});
InOrder inOrder = Mockito.inOrder(helper);
List<EquivalentAddressGroup> servers = createWeightedServerAddrs(1, 1);
initializeLbSubchannels(new RingHashConfig(10, 100, ""), servers);
Subchannel subchannel0 = subchannels.get(Collections.singletonList(servers.get(0)));
Subchannel subchannel1 = subchannels.get(Collections.singletonList(servers.get(1)));
// Subchannels go READY, but the LB waits for health
for (Subchannel subchannel : subchannels.values()) {
deliverSubchannelState(subchannel, ConnectivityStateInfo.forNonError(READY));
}
inOrder.verify(helper, times(0)).updateBalancingState(eq(READY), any(SubchannelPicker.class));
// Health results lets subchannels go READY
healthListeners.get(subchannel0).onSubchannelState(ConnectivityStateInfo.forNonError(READY));
healthListeners.get(subchannel1).onSubchannelState(ConnectivityStateInfo.forNonError(READY));
inOrder.verify(helper, times(2)).updateBalancingState(eq(READY), pickerCaptor.capture());
SubchannelPicker picker = pickerCaptor.getValue();
Random random = new Random(1);
Set<Subchannel> picks = new HashSet<>();
for (int i = 0; i < 10; i++) {
picks.add(
picker.pickSubchannel(getDefaultPickSubchannelArgs(random.nextLong())).getSubchannel());
}
assertThat(picks).containsExactly(subchannel0, subchannel1);
// Unhealthy subchannel skipped
healthListeners.get(subchannel0).onSubchannelState(
ConnectivityStateInfo.forTransientFailure(Status.UNAVAILABLE.withDescription("oh no")));
inOrder.verify(helper).updateBalancingState(eq(READY), pickerCaptor.capture());
picker = pickerCaptor.getValue();
random.setSeed(1);
picks.clear();
for (int i = 0; i < 10; i++) {
picks.add(
picker.pickSubchannel(getDefaultPickSubchannelArgs(random.nextLong())).getSubchannel());
}
assertThat(picks).containsExactly(subchannel1);
}
@Test
public void config_equalsTester() {
new EqualsTester()
.addEqualityGroup(
new RingHashConfig(1, 2, "headerA"),
new RingHashConfig(1, 2, "headerA"))
.addEqualityGroup(new RingHashConfig(1, 1, "headerA"))
.addEqualityGroup(new RingHashConfig(2, 2, "headerA"))
.addEqualityGroup(new RingHashConfig(1, 2, "headerB"))
.addEqualityGroup(new RingHashConfig(1, 2, ""))
.testEquals();
}
private List<Subchannel> initializeLbSubchannels(RingHashConfig config,
List<EquivalentAddressGroup> servers, InitializationFlags... initFlags) {
boolean doVerifies = true;
boolean resetSubchannels = false;
boolean returnToIdle = true;
boolean resetHelper = true;
for (InitializationFlags flag : initFlags) {
switch (flag) {
case DO_NOT_VERIFY:
doVerifies = false;
break;
case RESET_SUBCHANNEL_MOCKS:
resetSubchannels = true;
break;
case STAY_IN_CONNECTING:
returnToIdle = false;
break;
case DO_NOT_RESET_HELPER:
resetHelper = false;
break;
default:
throw new IllegalArgumentException("Unrecognized flag: " + flag);
}
}
Status addressesAcceptanceStatus =
loadBalancer.acceptResolvedAddresses(
ResolvedAddresses.newBuilder()
.setAddresses(servers).setLoadBalancingPolicyConfig(config).build());
if (doVerifies) {
assertThat(addressesAcceptanceStatus.isOk()).isTrue();
verify(helper).updateBalancingState(eq(IDLE), any(SubchannelPicker.class));
}
if (!addressesAcceptanceStatus.isOk()) {
return new ArrayList<>();
}
// Activate them all to create the child LB and subchannel
for (ChildLbState childLbState : loadBalancer.getChildLbStates()) {
childLbState.getCurrentPicker()
.pickSubchannel(getDefaultPickSubchannelArgs(hashFunc.hashVoid()));
}
if (doVerifies) {
verify(helper, times(servers.size())).createSubchannel(any(CreateSubchannelArgs.class));
verify(helper, times(servers.size()))
.updateBalancingState(eq(CONNECTING), any(SubchannelPicker.class));
verifyConnection(servers.size());
}
if (returnToIdle) {
for (Subchannel subchannel : subchannels.values()) {
deliverSubchannelState(subchannel, ConnectivityStateInfo.forNonError(IDLE));
}
if (doVerifies) {
verify(helper, times(2 * servers.size() - 1))
.updateBalancingState(eq(CONNECTING), any(SubchannelPicker.class));
verify(helper, times(2)).updateBalancingState(eq(IDLE), any(SubchannelPicker.class));
}
}
// Get a list of subchannels in the same order as servers
List<Subchannel> subchannelList = new ArrayList<>();
for (EquivalentAddressGroup server : servers) {
List<EquivalentAddressGroup> singletonList = Collections.singletonList(server);
Subchannel subchannel = subchannels.get(singletonList);
subchannelList.add(subchannel);
if (resetSubchannels) {
reset(subchannel);
}
}
if (resetHelper) {
reset(helper);
}
return subchannelList;
}
private void deliverSubchannelState(Subchannel subchannel, ConnectivityStateInfo state) {
testHelperInst.deliverSubchannelState(subchannel, state);
}
private void deliverNotFound(List<Subchannel> subChannelList, int index) {
deliverSubchannelState(
subChannelList.get(index),
ConnectivityStateInfo.forTransientFailure(
Status.UNAVAILABLE.withDescription("also not found")));
}
protected void deliverSubchannelUnreachable(Subchannel subchannel) {
deliverSubchannelState(subchannel,
ConnectivityStateInfo.forTransientFailure(
Status.UNAVAILABLE.withDescription(
subchannel.getAddresses().getAddresses() + "unreachable")));
}
private static List<EquivalentAddressGroup> createWeightedServerAddrs(long... weights) {
List<EquivalentAddressGroup> addrs = new ArrayList<>();
for (int i = 0; i < weights.length; i++) {
SocketAddress addr = new FakeSocketAddress("server" + i);
Attributes attr = Attributes.newBuilder().set(
XdsAttributes.ATTR_SERVER_WEIGHT, weights[i]).build();
EquivalentAddressGroup eag = new EquivalentAddressGroup(addr, attr);
addrs.add(eag);
}
return addrs;
}
private static List<EquivalentAddressGroup> createRepeatedServerAddrs(long... weights) {
List<EquivalentAddressGroup> addrs = new ArrayList<>();
for (int i = 0; i < weights.length; i++) {
SocketAddress addr = new FakeSocketAddress("server" + i);
for (int j = 0; j < weights[i]; j++) {
EquivalentAddressGroup eag = new EquivalentAddressGroup(addr);
addrs.add(eag);
}
}
return addrs;
}
private static | RingHashLoadBalancerTest |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/MockSASTokenProvider.java | {
"start": 1376,
"end": 3648
} | class ____ implements SASTokenProvider {
private byte[] accountKey;
private ServiceSASGenerator generator;
private boolean skipAuthorizationForTestSetup = false;
private static final Logger LOG = LoggerFactory.getLogger(MockSASTokenProvider.class);
// For testing we use a container SAS for all operations.
private String generateSAS(byte[] accountKey, String accountName, String fileSystemName) {
String containerSAS = "";
try {
containerSAS = generator.getContainerSASWithFullControl(accountName, fileSystemName);
} catch (InvalidConfigurationValueException e) {
LOG.debug(e.getMessage());
containerSAS = "";
}
return containerSAS;
}
@Override
public void initialize(Configuration configuration, String accountName) throws IOException {
try {
AbfsConfiguration abfsConfig = new AbfsConfiguration(configuration, accountName);
accountKey = Base64.decode(abfsConfig.getStorageAccountKey());
} catch (Exception ex) {
throw new IOException(ex);
}
generator = new ServiceSASGenerator(accountKey);
}
/**
* Invokes the authorizer to obtain a SAS token.
*
* @param accountName the name of the storage account.
* @param fileSystem the name of the fileSystem.
* @param path the file or directory path.
* @param operation the operation to be performed on the path.
* @return a SAS token to perform the request operation.
* @throws IOException if there is a network error.
* @throws AccessControlException if access is denied.
*/
@Override
public String getSASToken(String accountName, String fileSystem, String path,
String operation) throws IOException, AccessControlException {
if (!isSkipAuthorizationForTestSetup() && path.contains("unauthorized")) {
throw new AccessControlException(
"The user is not authorized to perform this operation.");
}
return generateSAS(accountKey, accountName, fileSystem);
}
public boolean isSkipAuthorizationForTestSetup() {
return skipAuthorizationForTestSetup;
}
public void setSkipAuthorizationForTestSetup(
boolean skipAuthorizationForTestSetup) {
this.skipAuthorizationForTestSetup = skipAuthorizationForTestSetup;
}
}
| MockSASTokenProvider |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/configuration/BeanMethodQualificationTests.java | {
"start": 14473,
"end": 14647
} | interface ____ {
@AliasFor(annotation = Autowired.class)
boolean required();
}
@Component @Lazy
@Retention(RetentionPolicy.RUNTIME)
@ | InterestingNeedWithRequiredOverride |
java | spring-projects__spring-framework | spring-context-support/src/test/java/org/springframework/cache/jcache/config/JCacheCustomInterceptorTests.java | {
"start": 4917,
"end": 5447
} | class ____ extends JCacheInterceptor {
@Override
protected Object invokeOperation(CacheOperationInvoker invoker) {
try {
return super.invokeOperation(invoker);
}
catch (CacheOperationInvoker.ThrowableWrapper e) {
Throwable original = e.getOriginal();
if (original.getClass() == UnsupportedOperationException.class) {
return 55L;
}
else {
throw new CacheOperationInvoker.ThrowableWrapper(
new RuntimeException("wrapping original", original));
}
}
}
}
}
| TestCacheInterceptor |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamsPartitionAssignor.java | {
"start": 6887,
"end": 8638
} | class ____ {
private final HostInfo hostInfo;
private final ClientState state;
private final SortedSet<String> consumers;
private final Optional<String> rackId;
ClientMetadata(final ProcessId processId, final String endPoint, final Map<String, String> clientTags, final Optional<String> rackId) {
// get the host info, or null if no endpoint is configured (ie endPoint == null)
hostInfo = HostInfo.buildFromEndpoint(endPoint);
// initialize the consumer memberIds
consumers = new TreeSet<>();
// initialize the client state with client tags
state = new ClientState(processId, clientTags);
this.rackId = rackId;
}
void addConsumer(final String consumerMemberId, final List<TopicPartition> ownedPartitions) {
consumers.add(consumerMemberId);
state.incrementCapacity();
state.addOwnedPartitions(ownedPartitions, consumerMemberId);
}
void addPreviousTasksAndOffsetSums(final String consumerId, final Map<TaskId, Long> taskOffsetSums) {
state.addPreviousTasksAndOffsetSums(consumerId, taskOffsetSums);
}
public ClientState state() {
return state;
}
public HostInfo hostInfo() {
return hostInfo;
}
public Optional<String> rackId() {
return rackId;
}
@Override
public String toString() {
return "ClientMetadata{" +
"hostInfo=" + hostInfo +
", consumers=" + consumers +
", state=" + state +
'}';
}
}
@FunctionalInterface
public | ClientMetadata |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/sqm/tuple/internal/CteTupleTableGroupProducer.java | {
"start": 890,
"end": 3408
} | class ____ extends AnonymousTupleTableGroupProducer {
private final AnonymousTupleBasicValuedModelPart searchModelPart;
private final AnonymousTupleBasicValuedModelPart cycleMarkModelPart;
private final AnonymousTupleBasicValuedModelPart cyclePathModelPart;
public CteTupleTableGroupProducer(
SqmCteTable<?> sqmCteTable,
String aliasStem,
SqlTypedMapping[] sqlTypedMappings,
FromClauseAccess fromClauseAccess) {
super( sqmCteTable, aliasStem, sqlTypedMappings, fromClauseAccess );
final SqmCteStatement<?> cteStatement = sqmCteTable.getCteStatement();
final BasicType<String> stringType = cteStatement.nodeBuilder()
.getTypeConfiguration()
.getBasicTypeForJavaType( String.class );
this.searchModelPart = createModelPart( this, cteStatement.getSearchAttributeName(), stringType );
this.cycleMarkModelPart = createModelPart(
this,
cteStatement.getCycleMarkAttributeName(),
cteStatement.getCycleLiteral() == null
? null
: (BasicType<?>) cteStatement.getCycleLiteral().getNodeType()
);
this.cyclePathModelPart = createModelPart( this, cteStatement.getCyclePathAttributeName(), stringType );
}
private static AnonymousTupleBasicValuedModelPart createModelPart(
MappingType declaringType,
String attributeName,
BasicType<?> basicType) {
if ( attributeName != null ) {
return new AnonymousTupleBasicValuedModelPart(
declaringType,
attributeName,
attributeName,
basicType,
basicType,
-1
);
}
return null;
}
public List<CteColumn> determineCteColumns() {
final List<CteColumn> columns = new ArrayList<>( getModelParts().size() + 3 );
forEachSelectable(
(selectionIndex, selectableMapping) -> {
columns.add(
new CteColumn(
selectableMapping.getSelectionExpression(),
selectableMapping.getJdbcMapping()
)
);
}
);
return columns;
}
@Override
public ModelPart findSubPart(String name, EntityMappingType treatTargetType) {
final ModelPart subPart = super.findSubPart( name, treatTargetType );
if ( subPart != null ) {
return subPart;
}
if ( searchModelPart != null && name.equals( searchModelPart.getPartName() ) ) {
return searchModelPart;
}
if ( cycleMarkModelPart != null && name.equals( cycleMarkModelPart.getPartName() ) ) {
return cycleMarkModelPart;
}
if ( cyclePathModelPart != null && name.equals( cyclePathModelPart.getPartName() ) ) {
return cyclePathModelPart;
}
return null;
}
}
| CteTupleTableGroupProducer |
java | netty__netty | testsuite/src/main/java/io/netty/testsuite/transport/DefaultEventLoopTest.java | {
"start": 1451,
"end": 2787
} | class ____ extends AbstractSingleThreadEventLoopTest {
@Test
@Timeout(value = 3000, unit = TimeUnit.MILLISECONDS)
public void testChannelsIteratorNotSupported() throws Exception {
EventLoopGroup group = newEventLoopGroup();
final SingleThreadEventLoop loop = (SingleThreadEventLoop) group.next();
try {
final Channel ch = newChannel();
loop.register(ch).syncUninterruptibly();
assertThrows(UnsupportedOperationException.class, new Executable() {
@Override
public void execute() throws Throwable {
loop.registeredChannelsIterator();
}
});
} finally {
group.shutdownGracefully();
}
}
@Override
protected EventLoopGroup newEventLoopGroup() {
return new DefaultEventLoopGroup();
}
@Override
protected EventLoopGroup newAutoScalingEventLoopGroup() {
return new AutoScalingDefaultEventLoopGroup(SCALING_MAX_THREADS, AUTO_SCALING_CHOOSER_FACTORY);
}
@Override
protected Channel newChannel() {
return new LocalChannel();
}
@Override
protected Class<? extends ServerChannel> serverChannelClass() {
return LocalServerChannel.class;
}
private static final | DefaultEventLoopTest |
java | resilience4j__resilience4j | resilience4j-metrics/src/test/java/io/github/resilience4j/metrics/CircuitBreakerMetricsTest.java | {
"start": 844,
"end": 1826
} | class ____ extends AbstractCircuitBreakerMetricsTest {
@Override
protected CircuitBreaker givenMetricRegistry(String prefix, MetricRegistry metricRegistry) {
CircuitBreakerRegistry circuitBreakerRegistry = CircuitBreakerRegistry.ofDefaults();
CircuitBreaker circuitBreaker = circuitBreakerRegistry.circuitBreaker("testName");
metricRegistry.registerAll(
CircuitBreakerMetrics.ofCircuitBreakerRegistry(prefix, circuitBreakerRegistry));
return circuitBreaker;
}
@Override
protected CircuitBreaker givenMetricRegistry(MetricRegistry metricRegistry) {
CircuitBreakerRegistry circuitBreakerRegistry = CircuitBreakerRegistry.ofDefaults();
CircuitBreaker circuitBreaker = circuitBreakerRegistry.circuitBreaker("testName");
metricRegistry
.registerAll(CircuitBreakerMetrics.ofCircuitBreakerRegistry(circuitBreakerRegistry));
return circuitBreaker;
}
}
| CircuitBreakerMetricsTest |
java | redisson__redisson | redisson/src/main/java/org/redisson/cache/LocalCacheView.java | {
"start": 4733,
"end": 6858
} | class ____ extends AbstractSet<Map.Entry<K, V>> {
@Override
public Iterator<Map.Entry<K, V>> iterator() {
return new Iterator<Map.Entry<K, V>>() {
private Iterator<CacheValue> iter = cache.values().iterator();
@Override
public boolean hasNext() {
return iter.hasNext();
}
@Override
public Map.Entry<K, V> next() {
CacheValue e = iter.next();
V val = toValue(e);
return new AbstractMap.SimpleEntry<K, V>((K) e.getKey(), val);
}
@Override
public void remove() {
if (useObjectAsCacheKey) {
cacheKeyMap.remove(((AbstractCacheMap.MapIterator) iter).cursorValue().getKey());
}
iter.remove();
}
};
}
@Override
public boolean contains(Object o) {
if (!(o instanceof Map.Entry))
return false;
Map.Entry<?, ?> e = (Map.Entry<?, ?>) o;
CacheKey cacheKey = toCacheKey(e.getKey());
CacheValue entry = cache.get(cacheKey);
return entry != null && entry.getValue().equals(e.getValue());
}
@Override
public boolean remove(Object o) {
if (o instanceof Map.Entry) {
Map.Entry<?, ?> e = (Map.Entry<?, ?>) o;
CacheKey cacheKey = toCacheKey(e.getKey());
if (useObjectAsCacheKey) {
cacheKeyMap.remove(e.getKey());
}
return cache.remove(cacheKey) != null;
}
return false;
}
@Override
public int size() {
return cache.size();
}
@Override
public void clear() {
cache.clear();
}
}
public Map<K, V> getCachedMap() {
return new LocalMap();
}
final | LocalEntrySet |
java | apache__flink | flink-state-backends/flink-statebackend-rocksdb/src/main/java/org/apache/flink/state/rocksdb/iterator/AbstractRocksStateKeysIterator.java | {
"start": 1382,
"end": 3240
} | class ____<K> implements AutoCloseable {
@Nonnull protected final RocksIteratorWrapper iterator;
@Nonnull protected final String state;
@Nonnull protected final TypeSerializer<K> keySerializer;
protected final boolean ambiguousKeyPossible;
protected final int keyGroupPrefixBytes;
protected final DataInputDeserializer byteArrayDataInputView;
public AbstractRocksStateKeysIterator(
@Nonnull RocksIteratorWrapper iterator,
@Nonnull String state,
@Nonnull TypeSerializer<K> keySerializer,
int keyGroupPrefixBytes,
boolean ambiguousKeyPossible) {
this.iterator = iterator;
this.state = state;
this.keySerializer = keySerializer;
this.keyGroupPrefixBytes = keyGroupPrefixBytes;
this.ambiguousKeyPossible = ambiguousKeyPossible;
this.byteArrayDataInputView = new DataInputDeserializer();
}
protected K deserializeKey(byte[] keyBytes, DataInputDeserializer readView) throws IOException {
readView.setBuffer(keyBytes, keyGroupPrefixBytes, keyBytes.length - keyGroupPrefixBytes);
return CompositeKeySerializationUtils.readKey(
keySerializer, byteArrayDataInputView, ambiguousKeyPossible);
}
@Override
public void close() {
iterator.close();
}
public static boolean isMatchingNameSpace(
@Nonnull byte[] key, int namespaceBytesStartPos, @Nonnull byte[] namespaceBytes) {
if (key.length >= namespaceBytesStartPos + namespaceBytes.length) {
for (int i = 0; i < namespaceBytes.length; ++i) {
if (key[namespaceBytesStartPos + i] != namespaceBytes[i]) {
return false;
}
}
return true;
}
return false;
}
}
| AbstractRocksStateKeysIterator |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java | {
"start": 27258,
"end": 61785
} | class ____ extends AckedClusterStateUpdateTask {
protected final List<String> deletedRepositories = new ArrayList<>();
private final ProjectId projectId;
private final DeleteRepositoryRequest request;
UnregisterRepositoryTask(
final ProjectId projectId,
final DeleteRepositoryRequest request,
final ActionListener<AcknowledgedResponse> listener
) {
super(request, listener);
this.projectId = projectId;
this.request = request;
}
/**
* Constructor used by {@link org.elasticsearch.action.admin.cluster.repositories.reservedstate.ReservedRepositoryAction}
* @param name the repository name
*/
public UnregisterRepositoryTask(TimeValue dummyTimeout, ProjectId projectId, String name) {
this(projectId, new DeleteRepositoryRequest(dummyTimeout, dummyTimeout, name), null);
}
@Override
public ClusterState execute(ClusterState currentState) {
final var projectState = currentState.projectState(projectId);
RepositoriesMetadata repositories = RepositoriesMetadata.get(projectState.metadata());
if (repositories.repositories().size() > 0) {
List<RepositoryMetadata> repositoriesMetadata = new ArrayList<>(repositories.repositories().size());
boolean changed = false;
for (RepositoryMetadata repositoryMetadata : repositories.repositories()) {
if (Regex.simpleMatch(request.name(), repositoryMetadata.name())) {
ensureRepositoryNotInUse(projectState, repositoryMetadata.name());
ensureNoSearchableSnapshotsIndicesInUse(currentState, repositoryMetadata);
deletedRepositories.add(repositoryMetadata.name());
changed = true;
} else {
repositoriesMetadata.add(repositoryMetadata);
}
}
if (changed) {
repositories = new RepositoriesMetadata(repositoriesMetadata);
return ClusterState.builder(currentState)
.putProjectMetadata(
ProjectMetadata.builder(projectState.metadata()).putCustom(RepositoriesMetadata.TYPE, repositories)
)
.build();
}
}
if (Regex.isMatchAllPattern(request.name())) { // we use a wildcard so we don't barf if it's not present.
return currentState;
}
throw new RepositoryMissingException(request.name());
}
}
public void verifyRepository(
final ProjectId projectId,
final String repositoryName,
final ActionListener<List<DiscoveryNode>> listener
) {
final Repository repository = repository(projectId, repositoryName);
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new ActionRunnable<>(listener) {
@Override
protected void doRun() {
final String verificationToken = repository.startVerification();
if (verificationToken != null) {
try {
var nodeRequest = new Request(repositoryName, verificationToken);
client.execute(
VerifyNodeRepositoryCoordinationAction.TYPE,
nodeRequest,
listener.delegateFailure(
(delegatedListener, response) -> threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> {
try {
repository.endVerification(verificationToken);
} catch (Exception e) {
logger.warn(
() -> projectRepoString(projectId, repositoryName)
+ " failed to finish repository verification",
e
);
delegatedListener.onFailure(e);
return;
}
delegatedListener.onResponse(response.nodes);
})
)
);
} catch (Exception e) {
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> {
try {
repository.endVerification(verificationToken);
} catch (Exception inner) {
inner.addSuppressed(e);
logger.warn(
() -> projectRepoString(projectId, repositoryName) + " failed to finish repository verification",
inner
);
}
listener.onFailure(e);
});
}
} else {
listener.onResponse(Collections.emptyList());
}
}
});
}
public static boolean isDedicatedVotingOnlyNode(Set<DiscoveryNodeRole> roles) {
return roles.contains(DiscoveryNodeRole.MASTER_ROLE)
&& roles.stream().noneMatch(DiscoveryNodeRole::canContainData)
&& roles.contains(DiscoveryNodeRole.VOTING_ONLY_NODE_ROLE);
}
/**
* Checks if new repositories appeared in or disappeared from cluster metadata and updates current list of
* repositories accordingly.
*
* @param event cluster changed event
*/
@Override
public void applyClusterState(ClusterChangedEvent event) {
try {
final ClusterState state = event.state();
final ClusterState previousState = event.previousState();
for (var projectId : event.projectDelta().removed()) { // removed projects
applyProjectStateForRemovedProject(state.version(), previousState.projectState(projectId));
}
for (var projectId : event.projectDelta().added()) { // added projects
applyProjectStateForAddedOrExistingProject(state.version(), state.projectState(projectId), null);
}
// existing projects
final var common = event.projectDelta().added().isEmpty()
? state.metadata().projects().keySet()
: Sets.difference(state.metadata().projects().keySet(), event.projectDelta().added());
for (var projectId : common) {
applyProjectStateForAddedOrExistingProject(
state.version(),
state.projectState(projectId),
previousState.projectState(projectId)
);
}
} catch (Exception ex) {
assert false : new AssertionError(ex);
logger.warn("failure updating cluster state ", ex);
}
}
/**
* Apply changes for one removed project.
*
* @param version The cluster state version of the change.
* @param previousState The previous project state for the removed project.
*/
private void applyProjectStateForRemovedProject(long version, ProjectState previousState) {
final var projectId = previousState.projectId();
assert ProjectId.DEFAULT.equals(projectId) == false : "default project cannot be removed";
final var survivors = closeRemovedRepositories(version, projectId, getProjectRepositories(projectId), RepositoriesMetadata.EMPTY);
assert survivors.isEmpty() : "expect no repositories for removed project [" + projectId + "], but got " + survivors.keySet();
repositories.remove(projectId);
}
/**
* Apply changes for one project. The project can be either newly added or an existing one.
*
* @param version The cluster state version of the change.
* @param state The current project state
* @param previousState The previous project state, or {@code null} if the project was newly added.
*/
private void applyProjectStateForAddedOrExistingProject(long version, ProjectState state, @Nullable ProjectState previousState) {
assert assertReadonlyRepositoriesNotInUseForWrites(state);
final var projectId = state.projectId();
assert ProjectId.DEFAULT.equals(projectId) == false || previousState != null : "default project cannot be added";
assert previousState == null || projectId.equals(previousState.projectId())
: "current and previous states must refer to the same project, but got " + projectId + " != " + previousState.projectId();
final RepositoriesMetadata newMetadata = RepositoriesMetadata.get(state.metadata());
final RepositoriesMetadata oldMetadata = previousState == null
? RepositoriesMetadata.EMPTY
: RepositoriesMetadata.get(previousState.metadata());
final Map<String, Repository> projectRepositories = getProjectRepositories(projectId);
// Check if repositories got changed
if (oldMetadata.equalsIgnoreGenerations(newMetadata)) {
for (Repository repo : projectRepositories.values()) {
repo.updateState(state.cluster());
}
return;
}
logger.trace("processing new index repositories for project [{}] and state version [{}]", projectId, version);
// First, remove repositories that are no longer there
final var survivors = closeRemovedRepositories(version, projectId, projectRepositories, newMetadata);
Map<String, Repository> builder = new HashMap<>();
// Now go through all repositories and update existing or create missing
for (RepositoryMetadata repositoryMetadata : newMetadata.repositories()) {
Repository repository = survivors.get(repositoryMetadata.name());
if (repository != null) {
// Found previous version of this repository
if (canUpdateInPlace(repositoryMetadata, repository) == false) {
// Previous version is different from the version in settings
logger.debug("updating repository {}", projectRepoString(projectId, repositoryMetadata.name()));
closeRepository(repository);
archiveRepositoryStats(repository, version);
repository = null;
try {
repository = createRepository(
projectId,
repositoryMetadata,
typesRegistry,
RepositoriesService::createUnknownTypeRepository
);
} catch (RepositoryException ex) {
// TODO: this catch is bogus, it means the old repo is already closed,
// but we have nothing to replace it
logger.warn(() -> "failed to change repository " + projectRepoString(projectId, repositoryMetadata.name()), ex);
repository = new InvalidRepository(projectId, repositoryMetadata, ex);
}
}
} else {
try {
repository = createRepository(
projectId,
repositoryMetadata,
typesRegistry,
RepositoriesService::createUnknownTypeRepository
);
} catch (RepositoryException ex) {
logger.warn(() -> "failed to create repository " + projectRepoString(projectId, repositoryMetadata.name()), ex);
repository = new InvalidRepository(projectId, repositoryMetadata, ex);
}
}
assert repository != null : "repository should not be null here";
logger.debug("registering repository [{}]", projectRepoString(projectId, repositoryMetadata.name()));
builder.put(repositoryMetadata.name(), repository);
}
for (Repository repo : builder.values()) {
repo.updateState(state.cluster());
}
if (builder.isEmpty() == false) {
repositories.put(projectId, unmodifiableMap(builder));
} else {
repositories.remove(projectId);
}
}
private Map<String, Repository> closeRemovedRepositories(
long version,
ProjectId projectId,
Map<String, Repository> projectRepositories,
RepositoriesMetadata newMetadata
) {
Map<String, Repository> survivors = new HashMap<>();
for (Map.Entry<String, Repository> entry : projectRepositories.entrySet()) {
if (newMetadata.repository(entry.getKey()) == null) {
logger.debug("unregistering repository {}", projectRepoString(projectId, entry.getKey()));
Repository repository = entry.getValue();
closeRepository(repository);
archiveRepositoryStats(repository, version);
} else {
survivors.put(entry.getKey(), entry.getValue());
}
}
return survivors;
}
private static boolean canUpdateInPlace(RepositoryMetadata updatedMetadata, Repository repository) {
assert updatedMetadata.name().equals(repository.getMetadata().name());
return repository.getMetadata().type().equals(updatedMetadata.type())
&& repository.canUpdateInPlace(updatedMetadata.settings(), Collections.emptySet());
}
/**
* Gets the {@link RepositoryData} for the given repository.
*
* @param projectId project to look for the repository
* @param repositoryName repository name
* @param listener listener to pass {@link RepositoryData} to
*/
public void getRepositoryData(final ProjectId projectId, final String repositoryName, final ActionListener<RepositoryData> listener) {
try {
Repository repository = repository(projectId, repositoryName);
assert repository != null; // should only be called once we've validated the repository exists
repository.getRepositoryData(
EsExecutors.DIRECT_EXECUTOR_SERVICE, // TODO contemplate threading here, do we need to fork, see #101445?
listener
);
} catch (Exception e) {
listener.onFailure(e);
}
}
/**
* Returns registered repository, either internal or external
*
* @param repositoryName repository name
* @return registered repository
* @throws RepositoryMissingException if repository with such name isn't registered
*/
@FixForMultiProject
@Deprecated(forRemoval = true)
public Repository repository(String repositoryName) {
return repository(ProjectId.DEFAULT, repositoryName);
}
/**
* Returns registered repository, either internal or external
*
* @param projectId the project to look for the repository
* @param repositoryName repository name
* @return registered repository
* @throws RepositoryMissingException if repository with such name isn't registered
*/
public Repository repository(ProjectId projectId, String repositoryName) {
Repository repository = repositoryOrNull(projectId, repositoryName);
if (repository != null) {
return repository;
}
throw new RepositoryMissingException(repositoryName);
}
/**
* Similar to {@link #repository(ProjectId, String)}, but returns {@code null} instead of throw if the repository is not found.
*/
public Repository repositoryOrNull(ProjectId projectId, String repositoryName) {
Repository repository = repositories.getOrDefault(projectId, Map.of()).get(repositoryName);
if (repository != null) {
return repository;
}
return internalRepositories.getOrDefault(projectId, Map.of()).get(repositoryName);
}
/**
* @return the current collection of registered repositories from all projects.
*/
public List<Repository> getRepositories() {
return repositories.values().stream().map(Map::values).flatMap(Collection::stream).toList();
}
/**
* @return the current collection of registered repositories for the given project, keyed by name.
*/
public Map<String, Repository> getProjectRepositories(ProjectId projectId) {
return repositories.getOrDefault(projectId, Map.of());
}
// Package private for testing
boolean hasRepositoryTrackingForProject(ProjectId projectId) {
return repositories.containsKey(projectId) || internalRepositories.containsKey(projectId);
}
public List<RepositoryStatsSnapshot> repositoriesStats() {
List<RepositoryStatsSnapshot> archivedRepoStats = repositoriesStatsArchive.getArchivedStats();
List<RepositoryStatsSnapshot> activeRepoStats = getRepositoryStatsForActiveRepositories();
List<RepositoryStatsSnapshot> repositoriesStats = new ArrayList<>(archivedRepoStats);
repositoriesStats.addAll(activeRepoStats);
return repositoriesStats;
}
public RepositoriesStats getRepositoriesThrottlingStats() {
return new RepositoriesStats(
getRepositories().stream().collect(Collectors.toMap(r -> r.getMetadata().name(), Repository::getSnapshotStats))
);
}
private List<RepositoryStatsSnapshot> getRepositoryStatsForActiveRepositories() {
return Stream.concat(
repositories.values().stream().map(Map::values).flatMap(Collection::stream),
internalRepositories.values().stream().map(Map::values).flatMap(Collection::stream)
)
.filter(r -> r instanceof MeteredBlobStoreRepository)
.map(r -> (MeteredBlobStoreRepository) r)
.map(MeteredBlobStoreRepository::statsSnapshot)
.toList();
}
public List<RepositoryStatsSnapshot> clearRepositoriesStatsArchive(long maxVersionToClear) {
return repositoriesStatsArchive.clear(maxVersionToClear);
}
public void registerInternalRepository(ProjectId projectId, String name, String type) {
RepositoryMetadata metadata = new RepositoryMetadata(name, type, Settings.EMPTY);
Repository repository = internalRepositories.compute(projectId, (ignored, existingRepos) -> {
if (existingRepos == null) {
existingRepos = Map.of();
}
if (existingRepos.containsKey(name)) {
return existingRepos;
}
logger.debug("put internal repository [{}][{}]", projectRepoString(projectId, name), type);
final var repo = createRepository(
projectId,
metadata,
internalTypesRegistry,
RepositoriesService::throwRepositoryTypeDoesNotExists
);
final var newRepos = new HashMap<>(existingRepos);
newRepos.put(name, repo);
return unmodifiableMap(newRepos);
}).get(name);
if (type.equals(repository.getMetadata().type()) == false) {
logger.warn(
() -> format(
"internal repository [%s][%s] already registered. this prevented the registration of "
+ "internal repository [%s][%s].",
name,
repository.getMetadata().type(),
name,
type
)
);
} else if (getProjectRepositories(projectId).containsKey(name)) {
logger.warn(
() -> format(
"non-internal repository [%s] already registered. this repository will block the "
+ "usage of internal repository [%s][%s].",
name,
metadata.type(),
name
)
);
}
}
public void unregisterInternalRepository(ProjectId projectId, String name) {
final var repositoryRef = new AtomicReference<Repository>();
internalRepositories.computeIfPresent(projectId, (ignored, existingRepos) -> {
if (existingRepos.containsKey(name) == false) {
return existingRepos;
}
final var newRepos = new HashMap<>(existingRepos);
repositoryRef.set(newRepos.remove(name));
if (newRepos.isEmpty()) {
return null;
} else {
return unmodifiableMap(newRepos);
}
});
Repository repository = repositoryRef.get();
if (repository != null) {
RepositoryMetadata metadata = repository.getMetadata();
logger.debug(() -> format("delete internal repository [%s][%s].", metadata.type(), projectRepoString(projectId, name)));
closeRepository(repository);
}
}
/**
* Closes the given repository.
*/
private static void closeRepository(Repository repository) {
logger.debug(
"closing repository [{}]{}",
repository.getMetadata().type(),
projectRepoString(repository.getProjectId(), repository.getMetadata().name())
);
repository.close();
}
private void archiveRepositoryStats(Repository repository, long clusterStateVersion) {
if (repository instanceof MeteredBlobStoreRepository) {
RepositoryStatsSnapshot stats = ((MeteredBlobStoreRepository) repository).statsSnapshotForArchival(clusterStateVersion);
if (repositoriesStatsArchive.archive(stats) == false) {
logger.warn("Unable to archive the repository stats [{}] as the archive is full.", stats);
}
}
}
/**
* Creates repository holder. This method starts the repository
*/
private static Repository createRepository(
@Nullable ProjectId projectId,
RepositoryMetadata repositoryMetadata,
Map<String, Repository.Factory> factories,
BiFunction<ProjectId, RepositoryMetadata, Repository> defaultFactory
) {
logger.debug("creating repository [{}][{}]", repositoryMetadata.type(), repositoryMetadata.name());
Repository.Factory factory = factories.get(repositoryMetadata.type());
if (factory == null) {
return defaultFactory.apply(projectId, repositoryMetadata);
}
Repository repository = null;
try {
repository = factory.create(projectId, repositoryMetadata, factories::get);
repository.start();
return repository;
} catch (Exception e) {
IOUtils.closeWhileHandlingException(repository);
logger.warn(() -> format("failed to create repository [%s][%s]", repositoryMetadata.type(), repositoryMetadata.name()), e);
throw new RepositoryException(repositoryMetadata.name(), "failed to create repository", e);
}
}
/**
* Creates a repository holder.
*
* <p>WARNING: This method is intended for expert only usage mainly in plugins/modules. Please take note of the following:</p>
*
* <ul>
* <li>This method does not register the repository (e.g., in the cluster state).</li>
* <li>This method starts the repository. The repository should be closed after use.</li>
* <li>The repository metadata should be associated to an already registered non-internal repository type and factory pair.</li>
* </ul>
*
* @param projectId the project that the repository is associated with
* @param repositoryMetadata the repository metadata
* @return the started repository
* @throws RepositoryException if repository type is not registered
*/
public Repository createRepository(ProjectId projectId, RepositoryMetadata repositoryMetadata) {
return createRepository(
Objects.requireNonNull(projectId),
repositoryMetadata,
typesRegistry,
RepositoriesService::throwRepositoryTypeDoesNotExists
);
}
/**
* Similar to {@link #createRepository(ProjectId, RepositoryMetadata)}, but repository is not associated with a project, i.e. the
* repository is at the cluster level.
*/
public Repository createNonProjectRepository(RepositoryMetadata repositoryMetadata) {
assert DiscoveryNode.isStateless(clusterService.getSettings())
: "outside stateless only project level repositories are allowed: " + repositoryMetadata;
return createRepository(null, repositoryMetadata, typesRegistry, RepositoriesService::throwRepositoryTypeDoesNotExists);
}
private Collection<LongWithAttributes> getShardSnapshotsInProgress() {
return repositories.values()
.stream()
.flatMap(repositories -> repositories.values().stream())
.map(Repository::getShardSnapshotsInProgress)
.filter(Objects::nonNull)
.toList();
}
private static Repository throwRepositoryTypeDoesNotExists(ProjectId projectId, RepositoryMetadata repositoryMetadata) {
throw new RepositoryException(
repositoryMetadata.name(),
"repository type [" + repositoryMetadata.type() + "] does not exist for project [" + projectId + "]"
);
}
private static Repository createUnknownTypeRepository(ProjectId projectId, RepositoryMetadata repositoryMetadata) {
logger.warn(
"[{}] repository type [{}] is unknown; ensure that all required plugins are installed on this node",
repositoryMetadata.name(),
repositoryMetadata.type()
);
return new UnknownTypeRepository(projectId, repositoryMetadata);
}
public static void validateRepositoryName(final String repositoryName) {
if (Strings.hasLength(repositoryName) == false) {
throw new RepositoryException(repositoryName, "cannot be empty");
}
if (repositoryName.contains("#")) {
throw new RepositoryException(repositoryName, "must not contain '#'");
}
if (Strings.validFileName(repositoryName) == false) {
throw new RepositoryException(repositoryName, "must not contain the following characters " + Strings.INVALID_FILENAME_CHARS);
}
}
private static void ensureRepositoryNotInUseForWrites(ProjectState projectState, String repository) {
final ProjectId projectId = projectState.projectId();
if (SnapshotsInProgress.get(projectState.cluster()).forRepo(projectId, repository).isEmpty() == false) {
throw newRepositoryConflictException(repository, "snapshot is in progress");
}
for (SnapshotDeletionsInProgress.Entry entry : SnapshotDeletionsInProgress.get(projectState.cluster()).getEntries()) {
if (entry.projectId().equals(projectId) && entry.repository().equals(repository)) {
throw newRepositoryConflictException(repository, "snapshot deletion is in progress");
}
}
for (RepositoryCleanupInProgress.Entry entry : RepositoryCleanupInProgress.get(projectState.cluster()).entries()) {
if (entry.projectId().equals(projectId) && entry.repository().equals(repository)) {
throw newRepositoryConflictException(repository, "repository clean up is in progress");
}
}
}
private static void ensureRepositoryNotInUse(ProjectState projectState, String repository) {
ensureRepositoryNotInUseForWrites(projectState, repository);
for (RestoreInProgress.Entry entry : RestoreInProgress.get(projectState.cluster())) {
if (repository.equals(entry.snapshot().getRepository())) {
throw newRepositoryConflictException(repository, "snapshot restore is in progress");
}
}
}
public static boolean isReadOnly(Settings repositorySettings) {
return Boolean.TRUE.equals(repositorySettings.getAsBoolean(BlobStoreRepository.READONLY_SETTING_KEY, null));
}
/**
* Test-only check for the invariant that read-only repositories never have any write activities.
*/
private static boolean assertReadonlyRepositoriesNotInUseForWrites(ProjectState projectState) {
assert projectState != null;
for (final var repositoryMetadata : RepositoriesMetadata.get(projectState.metadata()).repositories()) {
if (isReadOnly(repositoryMetadata.settings())) {
try {
ensureRepositoryNotInUseForWrites(projectState, repositoryMetadata.name());
} catch (Exception e) {
throw new AssertionError("repository [" + repositoryMetadata + "] is readonly but still in use", e);
}
}
}
return true;
}
/**
* Reject a change to the {@code readonly} setting if there is a pending generation change in progress, i.e. some node somewhere is
* updating the root {@link RepositoryData} blob.
*/
private static void rejectInvalidReadonlyFlagChange(RepositoryMetadata existingRepositoryMetadata, Settings newSettings) {
if (isReadOnly(newSettings)
&& isReadOnly(existingRepositoryMetadata.settings()) == false
&& existingRepositoryMetadata.generation() >= RepositoryData.EMPTY_REPO_GEN
&& existingRepositoryMetadata.generation() != existingRepositoryMetadata.pendingGeneration()) {
throw newRepositoryConflictException(
existingRepositoryMetadata.name(),
Strings.format(
"currently updating root blob generation from [%d] to [%d], cannot update readonly flag",
existingRepositoryMetadata.generation(),
existingRepositoryMetadata.pendingGeneration()
)
);
}
}
private static void ensureNoSearchableSnapshotsIndicesInUse(ClusterState clusterState, RepositoryMetadata repositoryMetadata) {
long count = 0L;
List<Index> indices = null;
for (ProjectMetadata project : clusterState.metadata().projects().values()) {
for (IndexMetadata indexMetadata : project) {
if (indexSettingsMatchRepositoryMetadata(indexMetadata, repositoryMetadata)) {
if (indices == null) {
indices = new ArrayList<>();
}
if (indices.size() < 5) {
indices.add(indexMetadata.getIndex());
}
count += 1L;
}
}
}
if (indices != null && indices.isEmpty() == false) {
throw newRepositoryConflictException(
repositoryMetadata.name(),
"found "
+ count
+ " searchable snapshots indices that use the repository: "
+ Strings.collectionToCommaDelimitedString(indices)
+ (count > indices.size() ? ",..." : "")
);
}
}
private static boolean indexSettingsMatchRepositoryMetadata(IndexMetadata indexMetadata, RepositoryMetadata repositoryMetadata) {
if (indexMetadata.isSearchableSnapshot()) {
final Settings indexSettings = indexMetadata.getSettings();
final String indexRepositoryUuid = indexSettings.get(SEARCHABLE_SNAPSHOTS_REPOSITORY_UUID_SETTING_KEY);
if (Strings.hasLength(indexRepositoryUuid)) {
return Objects.equals(repositoryMetadata.uuid(), indexRepositoryUuid);
} else {
return Objects.equals(repositoryMetadata.name(), indexSettings.get(SEARCHABLE_SNAPSHOTS_REPOSITORY_NAME_SETTING_KEY));
}
}
return false;
}
private static RepositoryConflictException newRepositoryConflictException(String repository, String reason) {
return new RepositoryConflictException(
repository,
"trying to modify or unregister repository that is currently used (" + reason + ')'
);
}
public List<BiConsumer<Snapshot, IndexVersion>> getPreRestoreVersionChecks() {
return preRestoreChecks;
}
public static final String COUNT_USAGE_STATS_NAME = "count";
public RepositoryUsageStats getUsageStats() {
if (repositories.isEmpty()) {
return RepositoryUsageStats.EMPTY;
}
final var statsByType = new HashMap<String, Map<String, Long>>();
for (final var repository : getRepositories()) {
final var repositoryType = repository.getMetadata().type();
final var typeStats = statsByType.computeIfAbsent(repositoryType, ignored -> new HashMap<>());
typeStats.compute(COUNT_USAGE_STATS_NAME, (k, count) -> (count == null ? 0L : count) + 1);
final var repositoryUsageTags = repository.getUsageFeatures();
assert repositoryUsageTags.contains(COUNT_USAGE_STATS_NAME) == false : repositoryUsageTags;
for (final var repositoryUsageTag : repositoryUsageTags) {
typeStats.compute(repositoryUsageTag, (k, count) -> (count == null ? 0L : count) + 1);
}
}
return new RepositoryUsageStats(
statsByType.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> Map.copyOf(e.getValue())))
);
}
@Override
protected void doStart() {}
@Override
protected void doStop() {}
@Override
protected void doClose() throws IOException {
clusterService.removeApplier(this);
final Collection<Repository> repos = new ArrayList<>();
repos.addAll(internalRepositories.values().stream().map(Map::values).flatMap(Collection::stream).toList());
repos.addAll(getRepositories());
IOUtils.close(repos);
for (Repository repo : repos) {
repo.awaitIdle();
}
}
}
| UnregisterRepositoryTask |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/subselect/join/SubselectInJoinedTableTest.java | {
"start": 699,
"end": 2003
} | class ____ {
@AfterEach
void tearDown(SessionFactoryScope factoryScope) {
factoryScope.dropData();
}
@Test
@JiraKey("HHH-10998")
public void testSubselectInJoinedTable(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( (s) -> {
OrderEntry orderEntry1 = new OrderEntry();
orderEntry1.setOrderEntryId( 1L );
OrderEntry orderEntry2 = new OrderEntry();
orderEntry2.setOrderEntryId( 2L );
Order order = new Order();
order.setOrderId( 3L );
order.getOrderEntries().add( orderEntry1 );
order.getOrderEntries().add( orderEntry2 );
order.setFirstOrderEntry( orderEntry1 );
s.persist( orderEntry1 );
s.persist( orderEntry2 );
s.persist( order );
} );
factoryScope.inTransaction( (s) -> {
var order = s.find( Order.class, 3L );
var orderEntry1 = s.find( OrderEntry.class, 1L );
var orderEntry2 = s.find( OrderEntry.class, 2L );
assertEquals( orderEntry1.getOrderEntryId(), order.getFirstOrderEntry().getOrderEntryId() );
assertEquals( 2, order.getOrderEntries().size() );
assertEquals( orderEntry1.getOrderEntryId(),
order.getOrderEntries().get( 0 ).getOrderEntryId() );
assertEquals( orderEntry2.getOrderEntryId(),
order.getOrderEntries().get( 1 ).getOrderEntryId() );
} );
}
public static | SubselectInJoinedTableTest |
java | elastic__elasticsearch | x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java | {
"start": 30504,
"end": 32684
} | class ____ extends OptimizerExpressionRule<ConditionalFunction> {
SimplifyConditional() {
super(TransformDirection.DOWN);
}
@Override
protected Expression rule(ConditionalFunction cf) {
Expression e = cf;
List<Expression> children = e.children();
// optimize nullIf
if (e instanceof NullIf nullIf) {
if (Expressions.isNull(nullIf.left()) || Expressions.isNull(nullIf.right())) {
return nullIf.left();
}
}
if (e instanceof ArbitraryConditionalFunction c) {
// exclude any nulls found
List<Expression> newChildren = new ArrayList<>();
for (Expression child : children) {
if (Expressions.isNull(child) == false) {
newChildren.add(child);
// For Coalesce find the first non-null foldable child (if any) and break early
if (e instanceof Coalesce && child.foldable()) {
break;
}
}
}
// update expression
if (newChildren.size() < children.size()) {
e = c.replaceChildren(newChildren);
}
// there's no need for a conditional if all the children are the same (this includes the case of just one value)
if (e instanceof Coalesce && children.size() > 0) {
Expression firstChild = children.get(0);
boolean sameChild = true;
for (int i = 1; i < children.size(); i++) {
Expression child = children.get(i);
if (firstChild.semanticEquals(child) == false) {
sameChild = false;
break;
}
}
if (sameChild) {
return firstChild;
}
}
}
return e;
}
}
static | SimplifyConditional |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/browse/BrowseFilterTest.java | {
"start": 1187,
"end": 2691
} | class ____ extends ContextTestSupport {
protected final Object body1 = "one";
protected final Object body2 = "two";
protected final Object body3 = "three";
protected final Object body4 = "four";
protected final Object body5 = "five";
@Test
public void testFilter() throws Exception {
template.sendBody("browse:foo", body1);
template.sendBody("browse:foo", body2);
template.sendBody("browse:foo", body3);
template.sendBody("browse:foo", body4);
template.sendBody("browse:foo", body5);
Collection<Endpoint> list = context.getEndpoints();
assertEquals(2, list.size(), "number of endpoints");
BrowsableEndpoint be1 = context.getEndpoint("browse:foo", BrowsableEndpoint.class);
assertEquals(5, be1.getExchanges().size());
BrowsableEndpoint be2 = context.getEndpoint("browse:bar?filter=#evenFilter", BrowsableEndpoint.class);
assertEquals(2, be2.getExchanges().size());
assertEquals("two", be2.getExchanges().get(0).getMessage().getBody());
assertEquals("four", be2.getExchanges().get(1).getMessage().getBody());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
context.getRegistry().bind("evenFilter", new EvenPredicate());
from("browse:foo").to("browse:bar?filter=#evenFilter");
}
};
}
private static | BrowseFilterTest |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/BeanWithXPathInjectionPreCompileTest.java | {
"start": 3211,
"end": 3672
} | class ____ {
public String body;
public String foo;
@Override
public String toString() {
return "MyBean[foo: " + foo + " body: " + body + "]";
}
public void read(String body, @XPath(value = "/soap:Envelope/soap:Body/foo/text()", preCompile = false) String foo) {
this.foo = foo;
this.body = body;
LOG.info("read() method called on {}", this);
}
}
}
| MyBean |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/H2Dialect.java | {
"start": 33845,
"end": 35589
} | class ____ implements ParameterMarkerStrategy {
/**
* Singleton access
*/
public static final OrdinalParameterMarkerStrategy INSTANCE = new OrdinalParameterMarkerStrategy();
@Override
public String createMarker(int position, JdbcType jdbcType) {
return "?" + position;
}
}
@Override
public DmlTargetColumnQualifierSupport getDmlTargetColumnQualifierSupport() {
return DmlTargetColumnQualifierSupport.TABLE_ALIAS;
}
@Override
public String getCaseInsensitiveLike() {
return "ilike";
}
@Override
public boolean supportsCaseInsensitiveLike(){
return true;
}
@Override
public boolean supportsPartitionBy() {
return true;
}
@Override
public boolean supportsBindingNullSqlTypeForSetNull() {
return true;
}
@Override
public boolean supportsValuesList() {
return true;
}
@Override
public String getDual() {
return "dual";
}
@Override
public boolean supportsFilterClause() {
// Introduction of FILTER clause https://github.com/h2database/h2database/commit/9e6dbf3baa57000f670826ede431dc7fb4cd9d9c
return true;
}
@Override
public boolean supportsRowConstructor() {
return true;
}
@Override
public boolean supportsArrayConstructor() {
return true;
}
@Override
public boolean supportsJoinInMutationStatementSubquery() {
return false;
}
@Override
public boolean supportsRowValueConstructorSyntax() {
// Just a guess
return true;
}
@Override
public boolean supportsRowValueConstructorDistinctFromSyntax() {
return true;
}
@Override
public boolean supportsWithClauseInSubquery() {
return false;
}
@Override
public boolean supportsRowValueConstructorSyntaxInQuantifiedPredicates() {
// Just a guess
return true;
}
}
| OrdinalParameterMarkerStrategy |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/lucene/queries/BinaryDocValuesRangeQueryTests.java | {
"start": 1482,
"end": 8058
} | class ____ extends ESTestCase {
public void testBasics() throws Exception {
String fieldName = "long_field";
RangeType rangeType = RangeType.LONG;
try (Directory dir = newDirectory()) {
try (RandomIndexWriter writer = new RandomIndexWriter(random(), dir)) {
// intersects (within)
Document document = new Document();
BytesRef encodedRange = rangeType.encodeRanges(singleton(new RangeFieldMapper.Range(rangeType, -10L, 9L, true, true)));
document.add(new BinaryDocValuesField(fieldName, encodedRange));
writer.addDocument(document);
// intersects (crosses)
document = new Document();
encodedRange = rangeType.encodeRanges(singleton(new RangeFieldMapper.Range(rangeType, 10L, 20L, true, true)));
document.add(new BinaryDocValuesField(fieldName, encodedRange));
writer.addDocument(document);
// intersects (contains, crosses)
document = new Document();
encodedRange = rangeType.encodeRanges(singleton(new RangeFieldMapper.Range(rangeType, -20L, 30L, true, true)));
document.add(new BinaryDocValuesField(fieldName, encodedRange));
writer.addDocument(document);
// intersects (within)
document = new Document();
encodedRange = rangeType.encodeRanges(singleton(new RangeFieldMapper.Range(rangeType, -11L, 1L, true, true)));
document.add(new BinaryDocValuesField(fieldName, encodedRange));
writer.addDocument(document);
// intersects (crosses)
document = new Document();
encodedRange = rangeType.encodeRanges(singleton(new RangeFieldMapper.Range(rangeType, 12L, 15L, true, true)));
document.add(new BinaryDocValuesField(fieldName, encodedRange));
writer.addDocument(document);
// disjoint
document = new Document();
encodedRange = rangeType.encodeRanges(singleton(new RangeFieldMapper.Range(rangeType, -122L, -115L, true, true)));
document.add(new BinaryDocValuesField(fieldName, encodedRange));
writer.addDocument(document);
// intersects (crosses)
document = new Document();
encodedRange = rangeType.encodeRanges(singleton(new RangeFieldMapper.Range(rangeType, Long.MIN_VALUE, -11L, true, true)));
document.add(new BinaryDocValuesField(fieldName, encodedRange));
writer.addDocument(document);
// equal (within, contains, intersects)
document = new Document();
encodedRange = rangeType.encodeRanges(singleton(new RangeFieldMapper.Range(rangeType, -11L, 15L, true, true)));
document.add(new BinaryDocValuesField(fieldName, encodedRange));
writer.addDocument(document);
// intersects, within
document = new Document();
encodedRange = rangeType.encodeRanges(singleton(new RangeFieldMapper.Range(rangeType, 5L, 10L, true, true)));
document.add(new BinaryDocValuesField(fieldName, encodedRange));
writer.addDocument(document);
// search
try (IndexReader reader = writer.getReader()) {
IndexSearcher searcher = newSearcher(reader);
Query query = rangeType.dvRangeQuery(fieldName, INTERSECTS, -11L, 15L, true, true);
assertEquals(8, searcher.count(query));
query = rangeType.dvRangeQuery(fieldName, WITHIN, -11L, 15L, true, true);
assertEquals(5, searcher.count(query));
query = rangeType.dvRangeQuery(fieldName, CONTAINS, -11L, 15L, true, true);
assertEquals(2, searcher.count(query));
query = rangeType.dvRangeQuery(fieldName, CROSSES, -11L, 15L, true, true);
assertEquals(3, searcher.count(query));
// test includeFrom = false and includeTo = false
query = rangeType.dvRangeQuery(fieldName, INTERSECTS, -11L, 15L, false, false);
assertEquals(7, searcher.count(query));
query = rangeType.dvRangeQuery(fieldName, WITHIN, -11L, 15L, false, false);
assertEquals(2, searcher.count(query));
query = rangeType.dvRangeQuery(fieldName, CONTAINS, -11L, 15L, false, false);
assertEquals(2, searcher.count(query));
query = rangeType.dvRangeQuery(fieldName, CROSSES, -11L, 15L, false, false);
assertEquals(5, searcher.count(query));
}
}
}
}
public void testNoField() throws IOException {
String fieldName = "long_field";
RangeType rangeType = RangeType.LONG;
// no field in index
try (Directory dir = newDirectory()) {
try (RandomIndexWriter writer = new RandomIndexWriter(random(), dir)) {
writer.addDocument(new Document());
try (IndexReader reader = writer.getReader()) {
IndexSearcher searcher = newSearcher(reader);
Query query = rangeType.dvRangeQuery(fieldName, INTERSECTS, -1L, 1L, true, true);
assertEquals(0, searcher.count(query));
}
}
}
// no field in segment
try (Directory dir = newDirectory()) {
try (RandomIndexWriter writer = new RandomIndexWriter(random(), dir)) {
// intersects (within)
Document document = new Document();
BytesRef encodedRange = rangeType.encodeRanges(singleton(new RangeFieldMapper.Range(rangeType, 0L, 0L, true, true)));
document.add(new BinaryDocValuesField(fieldName, encodedRange));
writer.addDocument(document);
writer.commit();
writer.addDocument(new Document());
try (IndexReader reader = writer.getReader()) {
IndexSearcher searcher = newSearcher(reader);
Query query = rangeType.dvRangeQuery(fieldName, INTERSECTS, -1L, 1L, true, true);
assertEquals(1, searcher.count(query));
}
}
}
}
}
| BinaryDocValuesRangeQueryTests |
java | google__dagger | dagger-android-processor/main/java/dagger/android/processor/AndroidInjectorDescriptor.java | {
"start": 1700,
"end": 2387
} | class ____ {
/** The type to be injected; the return type of the {@code ContributesAndroidInjector} method. */
abstract ClassName injectedType();
/** Scopes to apply to the generated {@link dagger.Subcomponent}. */
abstract ImmutableSet<AnnotationSpec> scopes();
/** See {@code ContributesAndroidInjector#modules()} */
abstract ImmutableSet<ClassName> modules();
/** The {@link dagger.Module} that contains the {@code ContributesAndroidInjector} method. */
abstract ClassName enclosingModule();
/** The method annotated with {@code ContributesAndroidInjector}. */
abstract XExecutableElement method();
@AutoValue.Builder
abstract static | AndroidInjectorDescriptor |
java | hibernate__hibernate-orm | hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/procedure/GaussDBStoredProcedureTest.java | {
"start": 19605,
"end": 20184
} | class ____{
@Id
@Column(name="ID")
private long id;
@Column(name="STREET")
private String street;
@Column(name="CITY")
private String city;
@Column(name="ZIP")
private String zip;
public Address() {
}
public Address(long id, String street, String city, String zip) {
this.id = id;
this.street = street;
this.city = city;
this.zip = zip;
}
public long getId() {
return id;
}
public String getStreet() {
return street;
}
public String getCity() {
return city;
}
public String getZip() {
return zip;
}
}
}
| Address |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/basic/bitset/BitSetJavaTypeRegistrationTests.java | {
"start": 892,
"end": 1619
} | class ____ {
@Test
public void testResolution(SessionFactoryScope scope) {
final EntityPersister productType = scope.getSessionFactory()
.getRuntimeMetamodels()
.getMappingMetamodel()
.findEntityDescriptor(Product.class);
final SingularAttributeMapping bitSetAttribute = (SingularAttributeMapping) productType.findAttributeMapping("bitSet");
// make sure BitSetTypeDescriptor was selected
assertThat( bitSetAttribute.getJavaType(), instanceOf( BitSetJavaType.class));
}
@Table(name = "Product")
//tag::basic-bitset-example-java-type-global[]
@Entity(name = "Product")
@JavaTypeRegistration(javaType = BitSet.class, descriptorClass = BitSetJavaType.class)
public static | BitSetJavaTypeRegistrationTests |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/creators/ValueInstantiatorTest.java | {
"start": 8675,
"end": 20576
} | class ____ extends InstantiatorBase
{
@Override
public String getValueTypeDesc() {
return AnnotatedBeanDelegating.class.getName();
}
@Override
public boolean canCreateUsingDelegate() { return true; }
@Override
public JavaType getDelegateType(DeserializationConfig config) {
return config.constructType(Map.class);
}
@Override
public AnnotatedWithParams getDelegateCreator() {
return null;
}
@Override
public Object createUsingDelegate(DeserializationContext ctxt, Object delegate) {
return new AnnotatedBeanDelegating(delegate, false);
}
}
/*
/**********************************************************
/* Unit tests for default creators
/**********************************************************
*/
private final ObjectMapper MAPPER = sharedMapper();
@Test
public void testCustomBeanInstantiator() throws Exception
{
ObjectMapper mapper = jsonMapperBuilder()
.addModule(new MyModule(MyBean.class, new MyBeanInstantiator()))
.build();
MyBean bean = mapper.readValue("{}", MyBean.class);
assertNotNull(bean);
assertEquals("secret!", bean._secret);
}
@Test
public void testCustomListInstantiator() throws Exception
{
ObjectMapper mapper = jsonMapperBuilder()
.addModule(new MyModule(MyList.class, new MyListInstantiator()))
.build();
MyList result = mapper.readValue("[]", MyList.class);
assertNotNull(result);
assertEquals(MyList.class, result.getClass());
assertEquals(0, result.size());
}
@Test
public void testCustomMapInstantiator() throws Exception
{
ObjectMapper mapper = jsonMapperBuilder()
.addModule(new MyModule(MyMap.class, new MyMapInstantiator()))
.build();
MyMap result = mapper.readValue("{ \"a\":\"b\" }", MyMap.class);
assertNotNull(result);
assertEquals(MyMap.class, result.getClass());
assertEquals(1, result.size());
}
/*
/**********************************************************
/* Unit tests for delegate creators
/**********************************************************
*/
@Test
public void testDelegateBeanInstantiator() throws Exception
{
ObjectMapper mapper = jsonMapperBuilder()
.addModule(new MyModule(MyBean.class, new MyDelegateBeanInstantiator()))
.build();
MyBean bean = mapper.readValue("123", MyBean.class);
assertNotNull(bean);
assertEquals("123", bean._secret);
}
@Test
public void testDelegateListInstantiator() throws Exception
{
ObjectMapper mapper = jsonMapperBuilder()
.addModule(new MyModule(MyList.class, new MyDelegateListInstantiator()))
.build();
MyList result = mapper.readValue("123", MyList.class);
assertNotNull(result);
assertEquals(1, result.size());
assertEquals(Integer.valueOf(123), result.get(0));
}
@Test
public void testDelegateMapInstantiator() throws Exception
{
ObjectMapper mapper = jsonMapperBuilder()
.addModule(new MyModule(MyMap.class, new MyDelegateMapInstantiator()))
.build();
MyMap result = mapper.readValue("123", MyMap.class);
assertNotNull(result);
assertEquals(1, result.size());
assertEquals(Integer.valueOf(123), result.values().iterator().next());
}
@Test
public void testCustomDelegateInstantiator() throws Exception
{
AnnotatedBeanDelegating value = MAPPER.readValue("{\"a\":3}", AnnotatedBeanDelegating.class);
assertNotNull(value);
Object ob = value.value;
assertNotNull(ob);
assertTrue(ob instanceof Map);
}
/*
/**********************************************************
/* Unit tests for property-based creators
/**********************************************************
*/
@Test
public void testPropertyBasedBeanInstantiator() throws Exception
{
ObjectMapper mapper = jsonMapperBuilder()
.addModule(new MyModule(CreatorBean.class,
new InstantiatorBase() {
@Override
public boolean canCreateFromObjectWith() { return true; }
@Override
public CreatorProperty[] getFromObjectArguments(DeserializationConfig config) {
return new CreatorProperty[] {
CreatorProperty.construct(new PropertyName("secret"), config.constructType(String.class), null,
null, null, null, 0, null,
PropertyMetadata.STD_REQUIRED)
};
}
@Override
public Object createFromObjectWith(DeserializationContext ctxt, Object[] args) {
return new CreatorBean((String) args[0]);
}
}))
.build();
CreatorBean bean = mapper.readValue("{\"secret\":123,\"value\":37}", CreatorBean.class);
assertNotNull(bean);
assertEquals("123", bean._secret);
}
@Test
public void testPropertyBasedMapInstantiator() throws Exception
{
ObjectMapper mapper = jsonMapperBuilder()
.addModule(new MyModule(MyMap.class, new CreatorMapInstantiator()))
.build();
MyMap result = mapper.readValue("{\"name\":\"bob\", \"x\":\"y\"}", MyMap.class);
assertNotNull(result);
assertEquals(2, result.size());
assertEquals("bob", result.get("bob"));
assertEquals("y", result.get("x"));
}
/*
/**********************************************************
/* Unit tests for scalar-delegates
/**********************************************************
*/
@Test
public void testBeanFromString() throws Exception
{
ObjectMapper mapper = jsonMapperBuilder()
.addModule(new MyModule(MysteryBean.class,
new InstantiatorBase() {
@Override
public boolean canCreateFromString() { return true; }
@Override
public Object createFromString(DeserializationContext ctxt, String value) {
return new MysteryBean(value);
}
}))
.build();
MysteryBean result = mapper.readValue(q("abc"), MysteryBean.class);
assertNotNull(result);
assertEquals("abc", result.value);
}
@Test
public void testBeanFromInt() throws Exception
{
ObjectMapper mapper = jsonMapperBuilder()
.addModule(new MyModule(MysteryBean.class,
new InstantiatorBase() {
@Override
public boolean canCreateFromInt() { return true; }
@Override
public Object createFromInt(DeserializationContext ctxt, int value) {
return new MysteryBean(value+1);
}
}))
.build();
MysteryBean result = mapper.readValue("37", MysteryBean.class);
assertNotNull(result);
assertEquals(Integer.valueOf(38), result.value);
}
@Test
public void testBeanFromLong() throws Exception
{
ObjectMapper mapper = jsonMapperBuilder()
.addModule(new MyModule(MysteryBean.class,
new InstantiatorBase() {
@Override
public boolean canCreateFromLong() { return true; }
@Override
public Object createFromLong(DeserializationContext ctxt, long value) {
return new MysteryBean(value+1L);
}
}))
.build();
MysteryBean result = mapper.readValue("9876543210", MysteryBean.class);
assertNotNull(result);
assertEquals(Long.valueOf(9876543211L), result.value);
}
@Test
public void testBeanFromDouble() throws Exception
{
ObjectMapper mapper = jsonMapperBuilder()
.addModule(new MyModule(MysteryBean.class,
new InstantiatorBase() {
@Override
public boolean canCreateFromDouble() { return true; }
@Override
public Object createFromDouble(DeserializationContext ctxt, double value) {
return new MysteryBean(2.0 * value);
}
}))
.build();
MysteryBean result = mapper.readValue("0.25", MysteryBean.class);
assertNotNull(result);
assertEquals(Double.valueOf(0.5), result.value);
}
@Test
public void testBeanFromBoolean() throws Exception
{
ObjectMapper mapper = jsonMapperBuilder()
.addModule(new MyModule(MysteryBean.class,
new InstantiatorBase() {
@Override
public boolean canCreateFromBoolean() { return true; }
@Override
public Object createFromBoolean(DeserializationContext ctxt, boolean value) {
return new MysteryBean(Boolean.valueOf(value));
}
}))
.build();
MysteryBean result = mapper.readValue("true", MysteryBean.class);
assertNotNull(result);
assertEquals(Boolean.TRUE, result.value);
}
/*
/**********************************************************
/* Other tests
/**********************************************************
*/
/**
* Beyond basic features, it should be possible to even implement
* polymorphic handling...
*/
@Test
public void testPolymorphicCreatorBean() throws Exception
{
ObjectMapper mapper = jsonMapperBuilder()
.addModule(new MyModule(PolymorphicBeanBase.class, new PolymorphicBeanInstantiator()))
.build();
String JSON = "{\"type\":"+q(PolymorphicBean.class.getName())+",\"name\":\"Axel\"}";
PolymorphicBeanBase result = mapper.readValue(JSON, PolymorphicBeanBase.class);
assertNotNull(result);
assertSame(PolymorphicBean.class, result.getClass());
assertEquals("Axel", ((PolymorphicBean) result).name);
}
@Test
public void testEmptyBean() throws Exception
{
AnnotatedBean bean = MAPPER.readValue("{}", AnnotatedBean.class);
assertNotNull(bean);
assertEquals("foo", bean.a);
assertEquals(3, bean.b);
}
@Test
public void testErrorMessageForMissingCtor() throws Exception
{
// first fail, check message from JSON Object (no default ctor)
try {
MAPPER.readValue("{ }", MyBean.class);
fail("Should not succeed");
} catch (InvalidDefinitionException e) {
verifyException(e, "Cannot construct instance of");
verifyException(e, "no Creators");
}
}
@Test
public void testErrorMessageForMissingStringCtor() throws Exception
{
// then from JSON String
try {
MAPPER.readValue("\"foo\"", MyBean.class);
fail("Should not succeed");
} catch (InvalidDefinitionException e) {
verifyException(e, "Cannot construct instance of");
verifyException(e, "no String-argument constructor/factory");
}
}
}
| AnnotatedBeanDelegatingInstantiator |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/leaderelection/ZooKeeperLeaderElectionConnectionHandlingTest.java | {
"start": 5310,
"end": 7866
} | enum ____ {
LOST_CONNECTION,
SUSPENDED_CONNECTION
}
private void runTestWithZooKeeperConnectionProblem(
Configuration configuration,
BiConsumerWithException<TestingConnectionStateListener, TestingContender, Exception>
validationLogic,
Problem problem)
throws Exception {
CuratorFrameworkWithUnhandledErrorListener curatorFrameworkWrapper =
ZooKeeperUtils.startCuratorFramework(
configuration,
testingFatalErrorHandlerResource.getTestingFatalErrorHandler());
CuratorFramework client = curatorFrameworkWrapper.asCuratorFramework();
LeaderElectionDriverFactory leaderElectionDriverFactory =
new ZooKeeperLeaderElectionDriverFactory(client);
DefaultLeaderElectionService leaderElectionService =
new DefaultLeaderElectionService(
leaderElectionDriverFactory,
testingFatalErrorHandlerResource.getTestingFatalErrorHandler());
final TestingConnectionStateListener connectionStateListener =
new TestingConnectionStateListener();
client.getConnectionStateListenable().addListener(connectionStateListener);
final TestingContender contender = new TestingContender();
try (LeaderElection leaderElection =
leaderElectionService.createLeaderElection("random-component-id")) {
leaderElection.startLeaderElection(contender);
contender.awaitGrantLeadership();
switch (problem) {
case SUSPENDED_CONNECTION:
zooKeeperResource.getCustomExtension().restart();
break;
case LOST_CONNECTION:
zooKeeperResource.getCustomExtension().stop();
break;
default:
throw new IllegalArgumentException(
String.format("Unknown problem type %s.", problem));
}
validationLogic.accept(connectionStateListener, contender);
} finally {
leaderElectionService.close();
curatorFrameworkWrapper.close();
if (problem == Problem.LOST_CONNECTION) {
// in case of lost connections we accept that some unhandled error can occur
testingFatalErrorHandlerResource.getTestingFatalErrorHandler().clearError();
}
}
}
private final | Problem |
java | spring-projects__spring-framework | spring-web/src/test/java/org/springframework/http/converter/json/SpringHandlerInstantiatorTests.java | {
"start": 4640,
"end": 5066
} | class ____ extends JsonDeserializer<User> {
@Autowired
private Capitalizer capitalizer;
@Override
public User deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) throws IOException {
ObjectCodec oc = jsonParser.getCodec();
JsonNode node = oc.readTree(jsonParser);
return new User(this.capitalizer.capitalize(node.get("username").asText()));
}
}
public static | UserDeserializer |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/RemoteIterators.java | {
"start": 21544,
"end": 22935
} | class ____<S>
extends WrappingRemoteIterator<S, S> {
/**
* Probe as to whether work should continue.
*/
private final CallableRaisingIOE<Boolean> continueWork;
/**
* Wrap an iterator with one which adds a continuation probe.
* The probe will be called in the {@link #hasNext()} method, before
* the source iterator is itself checked and in {@link #next()}
* before retrieval.
* That is: it may be called multiple times per iteration.
* @param source source iterator.
* @param continueWork predicate which will trigger a fast halt if it returns false.
*/
private HaltableRemoteIterator(
final RemoteIterator<S> source,
final CallableRaisingIOE<Boolean> continueWork) {
super(source);
this.continueWork = continueWork;
}
@Override
public boolean hasNext() throws IOException {
return sourceHasNext();
}
@Override
public S next() throws IOException {
return sourceNext();
}
@Override
protected boolean sourceHasNext() throws IOException {
return continueWork.apply() && super.sourceHasNext();
}
}
/**
* A remote iterator which simply counts up, stopping once the
* value is greater than the finish.
* This is primarily for tests or when submitting work into a TaskPool.
*/
private static final | HaltableRemoteIterator |
java | google__dagger | javatests/dagger/internal/codegen/MapBindingComponentProcessorTest.java | {
"start": 3762,
"end": 4019
} | class ____ implements Handler {",
" public LoginHandler() {}",
"}");
Source adminHandlerFile =
CompilerTests.javaSource(
"test.AdminHandler",
"package test;",
"",
" | LoginHandler |
java | quarkusio__quarkus | extensions/panache/mongodb-panache-common/deployment/src/main/java/io/quarkus/mongodb/panache/common/deployment/ProjectionForEnhancer.java | {
"start": 366,
"end": 1020
} | class ____ implements BiFunction<String, ClassVisitor, ClassVisitor> {
private static final String BSONPROPERTY_BINARY_NAME = "org/bson/codecs/pojo/annotations/BsonProperty";
private static final String BSONPROPERTY_SIGNATURE = "L" + BSONPROPERTY_BINARY_NAME + ";";
private Map<String, String> propertyMapping;
public ProjectionForEnhancer(Map<String, String> propertyMapping) {
this.propertyMapping = propertyMapping;
}
@Override
public ClassVisitor apply(String className, ClassVisitor classVisitor) {
return new BsonPropertyClassVisitor(classVisitor, propertyMapping);
}
static | ProjectionForEnhancer |
java | quarkusio__quarkus | core/devmode-spi/src/main/java/io/quarkus/dev/console/DevConsoleManager.java | {
"start": 4783,
"end": 6408
} | class ____ by both the deployment and the runtime.
*/
public static <T> void register(String name, BiFunction<Object, Map<String, String>, T> action) {
assistantActions.put(name, action);
}
/**
* Invokes a registered action
*
* @param name the name of the action
* @return the result of the invocation. An empty map is returned for action not returning any result.
*/
@SuppressWarnings("unchecked")
public static <T> T invoke(String name) {
return DevConsoleManager.invoke(name, Map.of());
}
/**
* Invokes a registered action
*
* @param name the name of the action
* @param params the named parameters
* @return the result of the invocation. An empty map is returned for action not returning any result.
*/
@SuppressWarnings("unchecked")
public static <T> T invoke(String name, Map<String, String> params) {
var function = actions.get(name);
if (function == null) {
// Try assistant actions
var bifunction = assistantActions.get(name);
if (bifunction != null) {
Object assistant = DevConsoleManager.getGlobal(DEV_MANAGER_GLOBALS_ASSISTANT);
if (assistant != null) {
return (T) bifunction.apply(assistant, params);
} else {
throw new RuntimeException("Assistant not available");
}
} else {
throw new NoSuchElementException(name);
}
} else {
return (T) function.apply(params);
}
}
}
| shared |
java | spring-projects__spring-framework | spring-expression/src/test/java/org/springframework/expression/spel/testdata/PersonInOtherPackage.java | {
"start": 736,
"end": 943
} | class ____ {
private int age;
public PersonInOtherPackage(int age) {
this.age = age;
}
public int getAge() {
return age;
}
public void setAge(int age) {
this.age = age;
}
}
| PersonInOtherPackage |
java | hibernate__hibernate-orm | local-build-plugins/src/main/java/org/hibernate/build/maven/embedder/MavenEmbedderConfig.java | {
"start": 389,
"end": 969
} | class ____ {
private DirectoryProperty localRepositoryDirectory;
@Inject
public MavenEmbedderConfig(Project project) {
localRepositoryDirectory = project.getObjects().directoryProperty();
localRepositoryDirectory.convention( project.getLayout().getBuildDirectory().dir( "maven-embedder/maven-local" ) );
}
public DirectoryProperty getLocalRepositoryDirectory() {
return localRepositoryDirectory;
}
public void setLocalRepositoryDirectory(DirectoryProperty localRepositoryDirectory) {
this.localRepositoryDirectory = localRepositoryDirectory;
}
}
| MavenEmbedderConfig |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/UUIDs.java | {
"start": 717,
"end": 773
} | class ____ generating various types of UUIDs.
*/
public | for |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/runc/RuncContainerExecutorConfig.java | {
"start": 7449,
"end": 7897
} | class ____ {
public String getPath() {
return path;
}
public boolean isReadonly() {
return readonly;
}
final private String path;
final private boolean readonly;
public OCIRootConfig(String path, boolean readonly) {
this.path = path;
this.readonly = readonly;
}
public OCIRootConfig() {
this(null, false);
}
}
/**
* This | OCIRootConfig |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/longs/Longs_assertNotEqual_Test.java | {
"start": 1402,
"end": 3113
} | class ____ extends LongsBaseTest {
@Test
void should_fail_if_actual_is_null() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> longs.assertNotEqual(someInfo(), null, 8L))
.withMessage(actualIsNull());
}
@Test
void should_pass_if_longs_are_not_equal() {
longs.assertNotEqual(someInfo(), 8L, 6L);
}
@Test
void should_fail_if_longs_are_equal() {
AssertionInfo info = someInfo();
Throwable error = catchThrowable(() -> longs.assertNotEqual(info, 6L, 6L));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldNotBeEqual(6L, 6L));
}
@Test
void should_fail_if_actual_is_null_whatever_custom_comparison_strategy_is() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> longsWithAbsValueComparisonStrategy.assertNotEqual(someInfo(),
null, 8L))
.withMessage(actualIsNull());
}
@Test
void should_pass_if_longs_are_not_equal_according_to_custom_comparison_strategy() {
longsWithAbsValueComparisonStrategy.assertNotEqual(someInfo(), 8L, 6L);
}
@Test
void should_fail_if_longs_are_equal_according_to_custom_comparison_strategy() {
AssertionInfo info = someInfo();
Throwable error = catchThrowable(() -> longsWithAbsValueComparisonStrategy.assertNotEqual(info, -6L, 6L));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldNotBeEqual(-6L, 6L, absValueComparisonStrategy));
}
}
| Longs_assertNotEqual_Test |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncherEventType.java | {
"start": 877,
"end": 1173
} | enum ____ {
LAUNCH_CONTAINER,
RELAUNCH_CONTAINER,
RECOVER_CONTAINER,
CLEANUP_CONTAINER, // The process(grp) itself.
CLEANUP_CONTAINER_FOR_REINIT, // The process(grp) itself.
SIGNAL_CONTAINER,
PAUSE_CONTAINER,
RESUME_CONTAINER,
RECOVER_PAUSED_CONTAINER
}
| ContainersLauncherEventType |
java | apache__camel | components/camel-pqc/src/main/java/org/apache/camel/component/pqc/crypto/PQCDefaultSLHDSAMaterial.java | {
"start": 1131,
"end": 2412
} | class ____ {
public static final KeyPair keyPair;
public static final Signature signer;
static {
if (Security.getProvider(BouncyCastleProvider.PROVIDER_NAME) == null) {
Security.addProvider(new BouncyCastleProvider());
}
if (Security.getProvider(BouncyCastlePQCProvider.PROVIDER_NAME) == null) {
Security.addProvider(new BouncyCastlePQCProvider());
}
KeyPairGenerator generator;
try {
generator = prepareKeyPair();
keyPair = generator.generateKeyPair();
signer = Signature.getInstance(PQCSignatureAlgorithms.SLHDSA.getAlgorithm(),
PQCSignatureAlgorithms.SLHDSA.getBcProvider());
} catch (Exception e) {
throw new RuntimeException(e);
}
}
protected static KeyPairGenerator prepareKeyPair()
throws NoSuchAlgorithmException, NoSuchProviderException, InvalidAlgorithmParameterException {
KeyPairGenerator kpGen = KeyPairGenerator.getInstance(PQCSignatureAlgorithms.SLHDSA.getAlgorithm(),
PQCSignatureAlgorithms.SLHDSA.getBcProvider());
kpGen.initialize(SLHDSAParameterSpec.slh_dsa_sha2_128s, new SecureRandom());
return kpGen;
}
}
| PQCDefaultSLHDSAMaterial |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/selection/generics/ErroneousSource4.java | {
"start": 206,
"end": 620
} | class ____ {
private WildCardSuperWrapper<TypeA> fooWildCardSuperTypeAFailure;
public WildCardSuperWrapper<TypeA> getFooWildCardSuperTypeAFailure() {
return fooWildCardSuperTypeAFailure;
}
public void setFooWildCardSuperTypeAFailure(WildCardSuperWrapper<TypeA> fooWildCardSuperTypeAFailure) {
this.fooWildCardSuperTypeAFailure = fooWildCardSuperTypeAFailure;
}
}
| ErroneousSource4 |
java | apache__dubbo | dubbo-common/src/test/java/org/apache/dubbo/common/extension/ext2/Ext2.java | {
"start": 1023,
"end": 1202
} | interface ____ {
// one of the properties of an argument is an instance of URL.
@Adaptive
String echo(UrlHolder holder, String s);
String bang(URL url, int i);
}
| Ext2 |
java | apache__spark | common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/protocol/CorruptionCause.java | {
"start": 1001,
"end": 1953
} | class ____ extends BlockTransferMessage {
public Cause cause;
public CorruptionCause(Cause cause) {
this.cause = cause;
}
@Override
protected Type type() {
return Type.CORRUPTION_CAUSE;
}
@Override
public String toString() {
return "CorruptionCause[cause=" + cause + "]";
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CorruptionCause that = (CorruptionCause) o;
return cause == that.cause;
}
@Override
public int hashCode() {
return cause.hashCode();
}
@Override
public int encodedLength() {
return 1; /* encoded length of cause */
}
@Override
public void encode(ByteBuf buf) {
buf.writeByte(cause.ordinal());
}
public static CorruptionCause decode(ByteBuf buf) {
int ordinal = buf.readByte();
return new CorruptionCause(Cause.values()[ordinal]);
}
}
| CorruptionCause |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/processor/Punctuator.java | {
"start": 1151,
"end": 2160
} | interface ____ {
/**
* Perform the scheduled periodic operation.
*
* <p> If this method accesses {@link org.apache.kafka.streams.processor.api.ProcessorContext} or
* {@link org.apache.kafka.streams.processor.api.ProcessorContext}, record metadata like topic,
* partition, and offset or {@link org.apache.kafka.streams.processor.api.RecordMetadata} won't
* be available.
*
* <p> Furthermore, for any record that is sent downstream via
* {@link org.apache.kafka.streams.processor.api.ProcessorContext#forward(Record)}
* or {@link org.apache.kafka.streams.processor.api.ProcessorContext#forward(Record)}, there
* won't be any record metadata. If
* {@link org.apache.kafka.streams.processor.api.ProcessorContext#forward(Record)} is used,
* it's also not possible to set records headers.
*
* @param timestamp when the operation is being called, depending on {@link PunctuationType}
*/
void punctuate(long timestamp);
}
| Punctuator |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/test/NeverMatcher.java | {
"start": 605,
"end": 1103
} | class ____<T> extends BaseMatcher<T> {
@SuppressWarnings("unchecked")
public static <T> Matcher<T> never() {
return (Matcher<T>) INSTANCE;
}
private static final Matcher<?> INSTANCE = new NeverMatcher<>();
private NeverMatcher() {/* singleton */}
@Override
public boolean matches(Object actual) {
return false;
}
@Override
public void describeTo(Description description) {
description.appendText("never matches");
}
}
| NeverMatcher |
java | apache__camel | components/camel-test/camel-test-main-junit5/src/main/java/org/apache/camel/test/main/junit5/AdviceRouteMapping.java | {
"start": 1934,
"end": 2120
} | class ____ {
* // The rest of the test class
* }
* </code>
* </pre>
*
* @see RouteBuilder
* @see CamelMainTest
*/
@Documented
@Retention(RetentionPolicy.RUNTIME)
public @ | SomeTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/schemaupdate/AlterTableQuoteDefaultSchemaTest.java | {
"start": 5764,
"end": 5884
} | class ____ {
@Id
public Integer id;
}
@Entity(name = "MyEntity")
@Table(name = "my_entity")
public static | MyEntity |
java | junit-team__junit5 | junit-jupiter-params/src/main/java/org/junit/jupiter/params/provider/EnumSource.java | {
"start": 3359,
"end": 3534
} | enum ____ of the range to be included.
*
* <p>Defaults to an empty string, where the range starts from the first enum
* constant of the specified {@linkplain #value | constant |
java | quarkusio__quarkus | integration-tests/oidc-wiremock/src/test/java/io/quarkus/it/keycloak/CustomOidcWiremockTestResource.java | {
"start": 432,
"end": 2183
} | class ____ extends OidcWiremockTestResource {
@Override
public Map<String, String> start() {
try {
generateCertificates();
} catch (Exception ex) {
throw new RuntimeException(ex);
}
return super.start();
}
private void generateCertificates() throws Exception {
File chainDir = new File("target/chain");
CertificateChainGenerator chainGenerator = new CertificateChainGenerator(chainDir)
.withCN("www.quarkustest.com");
chainGenerator.generate();
Path rootCertPath = Paths.get("target/chain/root.crt");
X509Certificate rootCert = KeyUtils.getCertificate(Files.readString(rootCertPath));
Path leafCertPath = Paths.get("target/chain/www.quarkustest.com.crt");
X509Certificate leafCert = KeyUtils.getCertificate(Files.readString(leafCertPath));
File trustStore = new File(chainDir, "truststore.p12");
KeyStore keyStore = KeyStore.getInstance("PKCS12");
keyStore.load(null, null);
keyStore.setCertificateEntry("root", rootCert);
keyStore.setCertificateEntry("leaf", leafCert);
var fos = new FileOutputStream(trustStore);
keyStore.store(fos, "storepassword".toCharArray());
fos.close();
File trustStoreRoot = new File(chainDir, "truststore-rootcert.p12");
KeyStore keyStoreRootCert = KeyStore.getInstance("PKCS12");
keyStoreRootCert.load(null, null);
keyStoreRootCert.setCertificateEntry("root", rootCert);
var fosRootCert = new FileOutputStream(trustStoreRoot);
keyStoreRootCert.store(fosRootCert, "storepassword".toCharArray());
fosRootCert.close();
}
}
| CustomOidcWiremockTestResource |
java | quarkusio__quarkus | integration-tests/oidc-wiremock/src/main/java/io/quarkus/it/keycloak/UsersResourceOidcRecovered.java | {
"start": 488,
"end": 821
} | class ____ {
@Inject
SecurityIdentity identity;
@GET
@Path("/preferredUserName")
@RolesAllowed("user")
@Produces(MediaType.APPLICATION_JSON)
public User preferredUserName() {
return new User(((JsonWebToken) identity.getPrincipal()).getClaim("preferred_username"));
}
}
| UsersResourceOidcRecovered |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/source/ProgressiveTimestampsAndWatermarks.java | {
"start": 14786,
"end": 15273
} | class ____ the idleness status of main and split-local output, and only marks the
* underlying output as idle if both main and per-split output are idle.
*
* <p>The reason of adding this manager is that the implementation of source reader might only
* use one of main or split-local output for emitting records and watermarks, and we could avoid
* watermark generator on the vacant output keep marking the underlying output as idle.
*/
private static | tracks |
java | square__okhttp | samples/guide/src/main/java/okhttp3/recipes/Progress.java | {
"start": 918,
"end": 2531
} | class ____ {
public void run() throws Exception {
Request request = new Request.Builder()
.url("https://publicobject.com/helloworld.txt")
.build();
final ProgressListener progressListener = new ProgressListener() {
boolean firstUpdate = true;
@Override public void update(long bytesRead, long contentLength, boolean done) {
if (done) {
System.out.println("completed");
} else {
if (firstUpdate) {
firstUpdate = false;
if (contentLength == -1) {
System.out.println("content-length: unknown");
} else {
System.out.format("content-length: %d\n", contentLength);
}
}
System.out.println(bytesRead);
if (contentLength != -1) {
System.out.format("%d%% done\n", (100 * bytesRead) / contentLength);
}
}
}
};
OkHttpClient client = new OkHttpClient.Builder()
.addNetworkInterceptor(chain -> {
Response originalResponse = chain.proceed(chain.request());
return originalResponse.newBuilder()
.body(new ProgressResponseBody(originalResponse.body(), progressListener))
.build();
})
.build();
try (Response response = client.newCall(request).execute()) {
if (!response.isSuccessful()) throw new IOException("Unexpected code " + response);
System.out.println(response.body().string());
}
}
public static void main(String... args) throws Exception {
new Progress().run();
}
private static | Progress |
java | netty__netty | codec-http/src/main/java/io/netty/handler/codec/http/cookie/ServerCookieEncoder.java | {
"start": 1958,
"end": 8663
} | class ____ extends CookieEncoder {
/**
* Strict encoder that validates that name and value chars are in the valid scope
* defined in RFC6265, and (for methods that accept multiple cookies) that only
* one cookie is encoded with any given name. (If multiple cookies have the same
* name, the last one is the one that is encoded.)
*/
public static final ServerCookieEncoder STRICT = new ServerCookieEncoder(true);
/**
* Lax instance that doesn't validate name and value, and that allows multiple
* cookies with the same name.
*/
public static final ServerCookieEncoder LAX = new ServerCookieEncoder(false);
private ServerCookieEncoder(boolean strict) {
super(strict);
}
/**
* Encodes the specified cookie name-value pair into a Set-Cookie header value.
*
* @param name the cookie name
* @param value the cookie value
* @return a single Set-Cookie header value
*/
public String encode(String name, String value) {
return encode(new DefaultCookie(name, value));
}
/**
* Encodes the specified cookie into a Set-Cookie header value.
*
* @param cookie the cookie
* @return a single Set-Cookie header value
*/
public String encode(Cookie cookie) {
final String name = checkNotNull(cookie, "cookie").name();
final String value = cookie.value() != null ? cookie.value() : "";
validateCookie(name, value);
StringBuilder buf = stringBuilder();
if (cookie.wrap()) {
addQuoted(buf, name, value);
} else {
add(buf, name, value);
}
if (cookie.maxAge() != Long.MIN_VALUE) {
add(buf, CookieHeaderNames.MAX_AGE, cookie.maxAge());
Date expires = new Date(cookie.maxAge() * 1000 + System.currentTimeMillis());
buf.append(CookieHeaderNames.EXPIRES);
buf.append('=');
DateFormatter.append(expires, buf);
buf.append(';');
buf.append(HttpConstants.SP_CHAR);
}
if (cookie.path() != null) {
add(buf, CookieHeaderNames.PATH, cookie.path());
}
if (cookie.domain() != null) {
add(buf, CookieHeaderNames.DOMAIN, cookie.domain());
}
if (cookie.isSecure()) {
add(buf, CookieHeaderNames.SECURE);
}
if (cookie.isHttpOnly()) {
add(buf, CookieHeaderNames.HTTPONLY);
}
if (cookie instanceof DefaultCookie) {
DefaultCookie c = (DefaultCookie) cookie;
if (c.sameSite() != null) {
add(buf, CookieHeaderNames.SAMESITE, c.sameSite().name());
}
if (c.isPartitioned()) {
add(buf, CookieHeaderNames.PARTITIONED);
}
}
return stripTrailingSeparator(buf);
}
/** Deduplicate a list of encoded cookies by keeping only the last instance with a given name.
*
* @param encoded The list of encoded cookies.
* @param nameToLastIndex A map from cookie name to index of last cookie instance.
* @return The encoded list with all but the last instance of a named cookie.
*/
private static List<String> dedup(List<String> encoded, Map<String, Integer> nameToLastIndex) {
boolean[] isLastInstance = new boolean[encoded.size()];
for (int idx : nameToLastIndex.values()) {
isLastInstance[idx] = true;
}
List<String> dedupd = new ArrayList<String>(nameToLastIndex.size());
for (int i = 0, n = encoded.size(); i < n; i++) {
if (isLastInstance[i]) {
dedupd.add(encoded.get(i));
}
}
return dedupd;
}
/**
* Batch encodes cookies into Set-Cookie header values.
*
* @param cookies a bunch of cookies
* @return the corresponding bunch of Set-Cookie headers
*/
public List<String> encode(Cookie... cookies) {
if (checkNotNull(cookies, "cookies").length == 0) {
return Collections.emptyList();
}
List<String> encoded = new ArrayList<String>(cookies.length);
Map<String, Integer> nameToIndex = strict && cookies.length > 1 ? new HashMap<String, Integer>() : null;
boolean hasDupdName = false;
for (int i = 0; i < cookies.length; i++) {
Cookie c = cookies[i];
encoded.add(encode(c));
if (nameToIndex != null) {
hasDupdName |= nameToIndex.put(c.name(), i) != null;
}
}
return hasDupdName ? dedup(encoded, nameToIndex) : encoded;
}
/**
* Batch encodes cookies into Set-Cookie header values.
*
* @param cookies a bunch of cookies
* @return the corresponding bunch of Set-Cookie headers
*/
public List<String> encode(Collection<? extends Cookie> cookies) {
if (checkNotNull(cookies, "cookies").isEmpty()) {
return Collections.emptyList();
}
List<String> encoded = new ArrayList<String>(cookies.size());
Map<String, Integer> nameToIndex = strict && cookies.size() > 1 ? new HashMap<String, Integer>() : null;
int i = 0;
boolean hasDupdName = false;
for (Cookie c : cookies) {
encoded.add(encode(c));
if (nameToIndex != null) {
hasDupdName |= nameToIndex.put(c.name(), i++) != null;
}
}
return hasDupdName ? dedup(encoded, nameToIndex) : encoded;
}
/**
* Batch encodes cookies into Set-Cookie header values.
*
* @param cookies a bunch of cookies
* @return the corresponding bunch of Set-Cookie headers
*/
public List<String> encode(Iterable<? extends Cookie> cookies) {
Iterator<? extends Cookie> cookiesIt = checkNotNull(cookies, "cookies").iterator();
if (!cookiesIt.hasNext()) {
return Collections.emptyList();
}
List<String> encoded = new ArrayList<String>();
Cookie firstCookie = cookiesIt.next();
Map<String, Integer> nameToIndex = strict && cookiesIt.hasNext() ? new HashMap<String, Integer>() : null;
int i = 0;
encoded.add(encode(firstCookie));
boolean hasDupdName = nameToIndex != null && nameToIndex.put(firstCookie.name(), i++) != null;
while (cookiesIt.hasNext()) {
Cookie c = cookiesIt.next();
encoded.add(encode(c));
if (nameToIndex != null) {
hasDupdName |= nameToIndex.put(c.name(), i++) != null;
}
}
return hasDupdName ? dedup(encoded, nameToIndex) : encoded;
}
}
| ServerCookieEncoder |
java | spring-projects__spring-boot | module/spring-boot-data-neo4j/src/test/java/org/springframework/boot/data/neo4j/domain/country/ReactiveCountryRepository.java | {
"start": 769,
"end": 857
} | interface ____ extends ReactiveNeo4jRepository<Country, Long> {
}
| ReactiveCountryRepository |
java | alibaba__fastjson | src/test/java/com/alibaba/json/test/benchmark/decode/EishayDecodeBytes.java | {
"start": 512,
"end": 2891
} | class ____ extends BenchmarkCase {
public final static EishayDecodeBytes instance = new EishayDecodeBytes();
private final byte[] bytes;
private final char[] chars;
private final String text;
private final MediaContent content;
public byte[] getBytes() {
return bytes;
}
public char[] getChars() {
return chars;
}
public String getText() {
return text;
}
public MediaContent getContent() {
return content;
}
public EishayDecodeBytes(){
super("EishayDecode-Byte[]");
content = new MediaContent();
Media media = new Media();
media.setUri("http://javaone.com/keynote.mpg");
media.setTitle("Javaone Keynote");
media.setWidth(640);
media.setHeight(480);
media.setFormat("video/mpg4");
media.setDuration(18000000);
media.setSize(58982400);
media.setBitrate(262144);
media.setPersons(Arrays.asList("Bill Gates", "Steve Jobs"));
media.setPlayer(Player.JAVA);
media.setCopyright(null);
content.setMedia(media);
List<Image> images = new ArrayList<Image>();
{
Image image = new Image();
image.setUri("http://javaone.com/keynote_large.jpg");
image.setTitle("Javaone Keynote");
image.setWidth(1024);
image.setHeight(768);
image.setSize(Size.LARGE);
images.add(image);
}
{
Image image = new Image();
image.setUri("http://javaone.com/keynote_small.jpg");
image.setTitle("Javaone Keynote");
image.setWidth(320);
image.setHeight(240);
image.setSize(Size.SMALL);
images.add(image);
}
content.setImages(images);
try {
text = JSON.toJSONString(content, SerializerFeature.WriteEnumUsingToString, SerializerFeature.SortField);
chars = (text + " ").toCharArray();
bytes = text.getBytes("UTF-8");
} catch (UnsupportedEncodingException e) {
throw new RuntimeException();
}
}
@Override
public void execute(Codec codec) throws Exception {
codec.decodeObject(bytes, MediaContent.class);
}
}
| EishayDecodeBytes |
java | redisson__redisson | redisson/src/main/java/org/redisson/liveobject/resolver/AbstractNamingScheme.java | {
"start": 770,
"end": 1027
} | class ____ implements NamingScheme {
protected final Codec codec;
public AbstractNamingScheme(Codec codec) {
this.codec = codec;
}
@Override
public Codec getCodec() {
return codec;
}
}
| AbstractNamingScheme |
java | apache__camel | components/camel-aws/camel-aws2-eventbridge/src/test/java/org/apache/camel/component/aws2/eventbridge/localstack/EventbridgePutEventsIT.java | {
"start": 1860,
"end": 4047
} | class ____ extends CamelTestSupport {
@RegisterExtension
public static AWSService service = AWSServiceFactory.createEventBridgeService();
@EndpointInject
private ProducerTemplate template;
@EndpointInject("mock:result")
private MockEndpoint result;
@Override
protected CamelContext createCamelContext() throws Exception {
CamelContext context = super.createCamelContext();
EventbridgeComponent eventbridgeComponent = context.getComponent("aws2-eventbridge", EventbridgeComponent.class);
eventbridgeComponent.getConfiguration().setEventbridgeClient(AWSSDKClientUtils.newEventBridgeClient());
return context;
}
@Test
public void sendIn() throws Exception {
result.expectedMessageCount(1);
template.send("direct:evs-events", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(EventbridgeConstants.EVENT_RESOURCES_ARN,
"arn:aws:sqs:eu-west-1:780410022472:camel-connector-test");
exchange.getIn().setHeader(EventbridgeConstants.EVENT_SOURCE, "com.pippo");
exchange.getIn().setHeader(EventbridgeConstants.EVENT_DETAIL_TYPE, "peppe");
exchange.getIn().setBody("Test Event");
}
});
MockEndpoint.assertIsSatisfied(context);
Assertions.assertTrue(result.getExchanges().get(0).getMessage().getBody(PutEventsResponse.class).hasEntries());
Assertions.assertEquals(1, result.getExchanges().get(0).getMessage().getBody(PutEventsResponse.class).entries().size());
Assertions.assertNotNull(
result.getExchanges().get(0).getMessage().getBody(PutEventsResponse.class).entries().get(0).eventId());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
String event = "aws2-eventbridge://default?operation=putEvent";
from("direct:evs-events").to(event).log("${body}").to("mock:result");
}
};
}
}
| EventbridgePutEventsIT |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.