language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__camel | components/camel-atom/src/main/java/org/apache/camel/component/atom/AtomEndpoint.java | {
"start": 1474,
"end": 3005
} | class ____ extends FeedEndpoint implements EndpointServiceLocation {
public AtomEndpoint() {
}
public AtomEndpoint(String endpointUri, FeedComponent component, String feedUri) {
super(endpointUri, component, feedUri);
}
@Override
public String getServiceUrl() {
return feedUri;
}
@Override
public String getServiceProtocol() {
return "atom";
}
@Override
public Exchange createExchange(Object feed) {
Exchange exchange = createExchangeWithFeedHeader(feed, AtomConstants.ATOM_FEED);
exchange.getIn().setBody(feed);
return exchange;
}
@Override
public Exchange createExchange(Object feed, Object entry) {
Exchange exchange = createExchangeWithFeedHeader(feed, AtomConstants.ATOM_FEED);
exchange.getIn().setBody(entry);
return exchange;
}
@Override
protected FeedPollingConsumer createEntryPollingConsumer(
FeedEndpoint feedEndpoint, Processor processor, boolean throttleEntries)
throws Exception {
AtomEntryPollingConsumer answer = new AtomEntryPollingConsumer(this, processor, throttleEntries);
configureConsumer(answer);
return answer;
}
@Override
protected FeedPollingConsumer createPollingConsumer(FeedEndpoint feedEndpoint, Processor processor) throws Exception {
AtomPollingConsumer answer = new AtomPollingConsumer(this, processor);
configureConsumer(answer);
return answer;
}
}
| AtomEndpoint |
java | hibernate__hibernate-orm | hibernate-vector/src/main/java/org/hibernate/vector/internal/SparseByteVectorJavaType.java | {
"start": 3741,
"end": 3941
} | class ____ extends MutableMutabilityPlan<SparseByteVector> {
@Override
protected SparseByteVector deepCopyNotNull(SparseByteVector value) {
return value.clone();
}
}
}
| SparseVectorMutabilityPlan |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/functions/FunctionDefinition.java | {
"start": 1122,
"end": 1517
} | class ____ all details necessary to validate a
* function call and perform planning.
*
* <p>A pure function definition doesn't have to contain a runtime implementation. This can be
* provided by the planner at later stages. A {@link UserDefinedFunction} is a function definition
* that includes a runtime implementation already.
*
* @see UserDefinedFunction
*/
@PublicEvolving
public | provide |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/query/NativeQueryLockingTests.java | {
"start": 1076,
"end": 3821
} | class ____ {
final String QUERY_STRING = "select * from SIMPLE_ENTITY";
@Test
void testJpaLockMode(SessionFactoryScope sessions) {
// JPA says this is illegal
sessions.inTransaction( (session) -> {
final Query query = session.createNativeQuery( QUERY_STRING, SimpleEntity.class );
try {
query.setLockMode( LockModeType.PESSIMISTIC_WRITE );
fail( "Expecting failure per JPA" );
}
catch (IllegalStateException e) {
assertThat( e ).hasMessageContaining( "lock mode" );
}
} );
}
@Test
@RequiresDialect( value = H2Dialect.class, comment = "This has more to do with Query internals than the DB; so avoid Dialect variances in generated SQL" )
void testHibernateLockMode(SessionFactoryScope sessions) {
final SQLStatementInspector sqlCollector = sessions.getCollectingStatementInspector();
sqlCollector.clear();
sessions.inTransaction( (session) -> {
final NativeQuery<SimpleEntity> query = session.createNativeQuery( QUERY_STRING, SimpleEntity.class );
query.setHibernateLockMode( LockMode.PESSIMISTIC_WRITE );
query.list();
assertThat( sqlCollector.getSqlQueries() ).hasSize( 1 );
assertThat( sqlCollector.getSqlQueries().get( 0 ) ).endsWith( " for update" );
} );
}
@Test
@RequiresDialect( value = H2Dialect.class, comment = "This has more to do with Query internals than the DB; so avoid Dialect variances in generated SQL" )
void testLockModeHint(SessionFactoryScope sessions) {
final SQLStatementInspector sqlCollector = sessions.getCollectingStatementInspector();
sqlCollector.clear();
sessions.inTransaction( (session) -> {
final Query query = session.createNativeQuery( QUERY_STRING, SimpleEntity.class );
query.setHint( HibernateHints.HINT_NATIVE_LOCK_MODE, LockMode.PESSIMISTIC_WRITE );
query.getResultList();
assertThat( sqlCollector.getSqlQueries() ).hasSize( 1 );
assertThat( sqlCollector.getSqlQueries().get( 0 ) ).endsWith( " for update" );
} );
}
@Test
@RequiresDialect( value = H2Dialect.class, comment = "This has more to do with Query internals than the DB; so avoid Dialect variances in generated SQL" )
void testLockModeHintLowercase(SessionFactoryScope sessions) {
final SQLStatementInspector sqlCollector = sessions.getCollectingStatementInspector();
sqlCollector.clear();
sessions.inTransaction( (session) -> {
final Query query = session.createNativeQuery( QUERY_STRING, SimpleEntity.class );
query.setHint( HibernateHints.HINT_NATIVE_LOCK_MODE, LockMode.PESSIMISTIC_WRITE.name().toLowerCase( Locale.ROOT ) );
query.getResultList();
assertThat( sqlCollector.getSqlQueries() ).hasSize( 1 );
assertThat( sqlCollector.getSqlQueries().get( 0 ) ).endsWith( " for update" );
} );
}
}
| NativeQueryLockingTests |
java | apache__flink | flink-test-utils-parent/flink-table-filesystem-test-utils/src/test/java/org/apache/flink/table/file/testutils/TestFileSystemTableFactoryTest.java | {
"start": 2036,
"end": 4813
} | class ____ {
private static final ResolvedSchema SCHEMA =
ResolvedSchema.of(
Column.physical("f0", DataTypes.STRING()),
Column.physical("f1", DataTypes.BIGINT()),
Column.physical("f2", DataTypes.BIGINT()));
@Test
void testCreateSourceSink() {
Map<String, String> options = new HashMap<>();
options.put(FactoryUtil.CONNECTOR.key(), "test-filesystem");
options.put("path", "/tmp");
options.put("format", "testcsv");
// test ignore format options
options.put("testcsv.my_option", "my_value");
// test ignore partition fields
options.put("partition.fields.f1.date-formatter", "yyyy-MM-dd");
DynamicTableSource source = createTableSource(SCHEMA, options);
assertThat(source).isInstanceOf(FileSystemTableSource.class);
DynamicTableSink sink = createTableSink(SCHEMA, options);
assertThat(sink).isInstanceOf(FileSystemTableSink.class);
}
@Test
void testCreateUnboundedSource() {
Map<String, String> options = new HashMap<>();
options.put(FactoryUtil.CONNECTOR.key(), "test-filesystem");
options.put("path", "/tmp");
options.put("format", "testcsv");
options.put("source.monitor-interval", "5S");
DynamicTableSource source = createTableSource(SCHEMA, options);
assertThat(source).isInstanceOf(FileSystemTableSource.class);
// assert source is unbounded when specify source.monitor-interval
ScanTableSource.ScanRuntimeProvider scanRuntimeProvider =
((FileSystemTableSource) source)
.getScanRuntimeProvider(ScanRuntimeProviderContext.INSTANCE);
assertThat(scanRuntimeProvider.isBounded()).isFalse();
}
@Test
void testCreateBoundedSource() {
Map<String, String> options = new HashMap<>();
options.put(FactoryUtil.CONNECTOR.key(), "test-filesystem");
options.put("path", "/tmp");
options.put("format", "testcsv");
options.put("source.monitor-interval", "5S");
Configuration configuration = new Configuration();
configuration.set(RUNTIME_MODE, BATCH);
DynamicTableSource source = createTableSource(SCHEMA, options, configuration);
assertThat(source).isInstanceOf(FileSystemTableSource.class);
// assert source is bounded when specify source.monitor-interval and in batch mode
ScanTableSource.ScanRuntimeProvider scanRuntimeProvider =
((FileSystemTableSource) source)
.getScanRuntimeProvider(ScanRuntimeProviderContext.INSTANCE);
assertThat(scanRuntimeProvider.isBounded()).isTrue();
}
}
| TestFileSystemTableFactoryTest |
java | apache__flink | flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/snapshot/ForStSnapshotStrategyBase.java | {
"start": 11016,
"end": 17056
} | class ____ {
@Nonnull private final Map<String, StreamStateHandle> confirmedSstFiles;
/**
* Constructor of PreviousSnapshot. Giving a map of uploaded sst files in previous
* checkpoints, prune the sst files which have been re-uploaded in the following
* checkpoints. The prune logic is used to resolve the mismatch between TM and JM due to
* notification delay. Following steps for example:
*
* <ul>
* <li>1) checkpoint 1 uses file 00001.SST uploaded as xxx.sst.
* <li>2) checkpoint 2 uses the same file 00001.SST but re-uploads it as yyy.sst because
* CP 1 wasn't yet confirmed.
* <li>3) TM get a confirmation of checkpoint 1.
* <li>4) JM completes checkpoint 2 and subsumes checkpoint 1 - removing xxx.sst.
* <li>5) checkpoint 3 tries to re-use file 00001.SST uploaded as xxx.sst in checkpoint 1,
* but it was deleted in (4) by JM.
* </ul>
*
* @param currentUploadedSstFiles the sst files uploaded in previous checkpoints.
* @param lastCompletedCheckpoint the last completed checkpoint id.
*/
protected PreviousSnapshot(
@Nullable SortedMap<Long, Collection<HandleAndLocalPath>> currentUploadedSstFiles,
long lastCompletedCheckpoint) {
this.confirmedSstFiles =
currentUploadedSstFiles != null
? pruneFirstCheckpointSstFiles(
currentUploadedSstFiles, lastCompletedCheckpoint)
: Collections.emptyMap();
}
/**
* The last completed checkpoint's uploaded sst files are all included, then for each
* following checkpoint, if a sst file has been re-uploaded, remove it from the first
* checkpoint's sst files.
*
* @param currentUploadedSstFiles the sst files uploaded in the following checkpoint.
* @param lastCompletedCheckpoint the last completed checkpoint id.
*/
private Map<String, StreamStateHandle> pruneFirstCheckpointSstFiles(
@Nonnull SortedMap<Long, Collection<HandleAndLocalPath>> currentUploadedSstFiles,
long lastCompletedCheckpoint) {
Map<String, StreamStateHandle> prunedSstFiles = null;
int removedCount = 0;
for (Map.Entry<Long, Collection<HandleAndLocalPath>> entry :
currentUploadedSstFiles.entrySet()) {
// Iterate checkpoints in ascending order of checkpoint id.
if (entry.getKey() == lastCompletedCheckpoint) {
// The first checkpoint's uploaded sst files are all included.
prunedSstFiles =
entry.getValue().stream()
.collect(
Collectors.toMap(
HandleAndLocalPath::getLocalPath,
HandleAndLocalPath::getHandle));
} else if (prunedSstFiles == null) {
// The last completed checkpoint's uploaded sst files are not existed.
// So we skip the pruning process.
break;
} else if (!prunedSstFiles.isEmpty()) {
// Prune sst files which have been re-uploaded in the following checkpoints.
for (HandleAndLocalPath handleAndLocalPath : entry.getValue()) {
if (!(handleAndLocalPath.getHandle()
instanceof PlaceholderStreamStateHandle)) {
// If it's not a placeholder handle, it means the sst file has been
// re-uploaded in the following checkpoint.
if (prunedSstFiles.remove(handleAndLocalPath.getLocalPath()) != null) {
removedCount++;
}
}
}
}
}
if (removedCount > 0 && LOG.isTraceEnabled()) {
LOG.trace(
"Removed {} re-uploaded sst files from base file set for incremental "
+ "checkpoint. Base checkpoint id: {}",
removedCount,
currentUploadedSstFiles.firstKey());
}
return (prunedSstFiles != null && !prunedSstFiles.isEmpty())
? Collections.unmodifiableMap(prunedSstFiles)
: Collections.emptyMap();
}
protected Optional<StreamStateHandle> getUploaded(String filename) {
if (confirmedSstFiles.containsKey(filename)) {
StreamStateHandle handle = confirmedSstFiles.get(filename);
// We introduce a placeholder state handle to reduce network transfer overhead,
// it will be replaced by the original handle from the shared state registry
// (created from a previous checkpoint).
return Optional.of(
new PlaceholderStreamStateHandle(
handle.getStreamStateHandleID(),
handle.getStateSize(),
FileMergingSnapshotManager.isFileMergingHandle(handle)));
} else {
// Don't use any uploaded but not confirmed handles because they might be deleted
// (by TM) if the previous checkpoint failed. See FLINK-25395
return Optional.empty();
}
}
protected boolean isEmpty() {
return confirmedSstFiles.isEmpty();
}
@Override
public String toString() {
return "PreviousSnapshot{" + "confirmedSstFiles=" + confirmedSstFiles + '}';
}
}
}
| PreviousSnapshot |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/SybaseDialect.java | {
"start": 4090,
"end": 19017
} | class ____ extends AbstractTransactSQLDialect {
private static final DatabaseVersion MINIMUM_VERSION = DatabaseVersion.make( 16, 0 );
//All Sybase dialects share an IN list size limit.
private static final int IN_LIST_SIZE_LIMIT = 250000;
private static final int PARAM_COUNT_LIMIT = 2000;
private final UniqueDelegate uniqueDelegate = new SkipNullableUniqueDelegate(this);
private final SybaseDriverKind driverKind;
@Deprecated(forRemoval = true)
protected final boolean jtdsDriver;
private final SchemaNameResolver schemaNameResolver;
public SybaseDialect() {
this( MINIMUM_VERSION );
}
public SybaseDialect(DatabaseVersion version) {
super(version);
this.driverKind = SybaseDriverKind.OTHER;
this.jtdsDriver = true;
this.schemaNameResolver = determineSchemaNameResolver( driverKind );
}
private static SchemaNameResolver determineSchemaNameResolver(SybaseDriverKind driverKind) {
// if the driver is jTDS, then we need to use a query to determine the schema name.
// if we don't know the driver (OTHER), then be safe and use the query approach
return driverKind != SybaseDriverKind.JCONNECT
? new JTDSSchemaNameResolver()
: DefaultSchemaNameResolver.INSTANCE;
}
public SybaseDialect(DialectResolutionInfo info) {
super(info);
this.driverKind = SybaseDriverKind.determineKind( info );
this.jtdsDriver = driverKind == SybaseDriverKind.JTDS;
this.schemaNameResolver = determineSchemaNameResolver( driverKind );
}
@Override
protected DatabaseVersion getMinimumSupportedVersion() {
return MINIMUM_VERSION;
}
public SybaseDriverKind getDriverKind() {
return driverKind;
}
@Override
public JdbcType resolveSqlTypeDescriptor(
String columnTypeName,
int jdbcTypeCode,
int precision,
int scale,
JdbcTypeRegistry jdbcTypeRegistry) {
switch ( jdbcTypeCode ) {
case Types.NUMERIC:
case Types.DECIMAL:
if ( precision == 19 && scale == 0 ) {
return jdbcTypeRegistry.getDescriptor( Types.BIGINT );
}
}
return super.resolveSqlTypeDescriptor(
columnTypeName,
jdbcTypeCode,
precision,
scale,
jdbcTypeRegistry
);
}
@Override
public int resolveSqlTypeLength(
String columnTypeName,
int jdbcTypeCode,
int precision,
int scale,
int displaySize) {
// Sybase jconnect driver reports the "actual" precision in the display size
return switch (jdbcTypeCode) {
case Types.CHAR, Types.VARCHAR, Types.REAL, Types.DOUBLE -> displaySize;
default -> super.resolveSqlTypeLength( columnTypeName, jdbcTypeCode, precision, scale, displaySize );
};
}
@Override
public SqmTranslatorFactory getSqmTranslatorFactory() {
return new StandardSqmTranslatorFactory() {
@Override
public SqmTranslator<SelectStatement> createSelectTranslator(
SqmSelectStatement<?> sqmSelectStatement,
QueryOptions queryOptions,
DomainParameterXref domainParameterXref,
QueryParameterBindings domainParameterBindings,
LoadQueryInfluencers loadQueryInfluencers,
SqlAstCreationContext creationContext,
boolean deduplicateSelectionItems) {
return new SybaseSqmToSqlAstConverter<>(
sqmSelectStatement,
queryOptions,
domainParameterXref,
domainParameterBindings,
loadQueryInfluencers,
creationContext,
deduplicateSelectionItems
);
}
};
}
@Override
public SqlAstTranslatorFactory getSqlAstTranslatorFactory() {
return new StandardSqlAstTranslatorFactory() {
@Override
protected <T extends JdbcOperation> SqlAstTranslator<T> buildTranslator(
SessionFactoryImplementor sessionFactory, Statement statement) {
return new SybaseSqlAstTranslator<>( sessionFactory, statement );
}
};
}
@Override
public LockingSupport getLockingSupport() {
return TransactSQLLockingSupport.SYBASE;
}
@Override
public boolean supportsNullPrecedence() {
return false;
}
@Override
public int getInExpressionCountLimit() {
return IN_LIST_SIZE_LIMIT;
}
@Override
public int getParameterCountLimit() {
return PARAM_COUNT_LIMIT;
}
@Override
public void contributeTypes(TypeContributions typeContributions, ServiceRegistry serviceRegistry) {
super.contributeTypes(typeContributions, serviceRegistry);
final JdbcTypeRegistry jdbcTypeRegistry = typeContributions.getTypeConfiguration()
.getJdbcTypeRegistry();
if ( driverKind == SybaseDriverKind.JTDS ) {
jdbcTypeRegistry.addDescriptor( Types.TINYINT, TinyIntAsSmallIntJdbcType.INSTANCE );
// The jTDS driver doesn't support the JDBC4 signatures using 'long length' for stream bindings
jdbcTypeRegistry.addDescriptor( Types.CLOB, ClobJdbcType.CLOB_BINDING );
jdbcTypeRegistry.addDescriptor( Types.NCLOB, NClobJdbcType.NCLOB_BINDING );
}
else {
// jConnect driver only conditionally supports getClob/getNClob depending on a server setting. See
// - https://help.sap.com/doc/e3cb6844decf441e85e4670e1cf48c9b/16.0.3.6/en-US/SAP_jConnect_Programmers_Reference_en.pdf
// - https://infocenter.sybase.com/help/index.jsp?topic=/com.sybase.infocenter.dc20155.1570/html/OS_SDK_nf/CIHJFDDH.htm
// - HHH-7889
jdbcTypeRegistry.addDescriptor( Types.CLOB, ClobJdbcType.STREAM_BINDING_EXTRACTING );
jdbcTypeRegistry.addDescriptor( Types.NCLOB, ClobJdbcType.STREAM_BINDING_EXTRACTING );
}
jdbcTypeRegistry.addDescriptor( Types.BLOB, BlobJdbcType.PRIMITIVE_ARRAY_BINDING );
// Sybase requires a custom binder for binding untyped nulls with the NULL type
typeContributions.contributeJdbcType( ObjectNullAsBinaryTypeJdbcType.INSTANCE );
// Until we remove StandardBasicTypes, we have to keep this
typeContributions.contributeType(
new JavaObjectType(
ObjectNullAsBinaryTypeJdbcType.INSTANCE,
typeContributions.getTypeConfiguration()
.getJavaTypeRegistry()
.resolveDescriptor( Object.class )
)
);
typeContributions.contributeType(
new NullType(
ObjectNullAsBinaryTypeJdbcType.INSTANCE,
typeContributions.getTypeConfiguration()
.getJavaTypeRegistry()
.resolveDescriptor( Object.class )
)
);
}
@Override
public NationalizationSupport getNationalizationSupport() {
// At least the jTDS driver doesn't support this
return super.getNationalizationSupport();
}
@Override
public boolean stripsTrailingSpacesFromChar() {
return true;
}
@Override
public void initializeFunctionRegistry(FunctionContributions functionContributions) {
super.initializeFunctionRegistry( functionContributions );
final var functionFactory = new CommonFunctionFactory( functionContributions );
functionFactory.stddev();
functionFactory.variance();
functionFactory.stddevPopSamp_stdevp();
functionFactory.varPopSamp_varp();
functionFactory.stddevPopSamp();
functionFactory.varPopSamp();
functionFactory.round_round();
// For SQL-Server we need to cast certain arguments to varchar(16384) to be able to concat them
functionContributions.getFunctionRegistry().register(
"count",
new CountFunction(
this,
functionContributions.getTypeConfiguration(),
SqlAstNodeRenderingMode.DEFAULT,
"count_big",
"+",
"varchar(16384)",
false
)
);
// AVG by default uses the input type, so we possibly need to cast the argument type, hence a special function
functionFactory.avg_castingNonDoubleArguments( this, SqlAstNodeRenderingMode.DEFAULT );
//this doesn't work 100% on earlier versions of Sybase
//which were missing the third parameter in charindex()
//TODO: we could emulate it with substring() like in Postgres
functionFactory.locate_charindex();
functionFactory.replace_strReplace();
functionFactory.everyAny_minMaxCase();
functionFactory.octetLength_pattern( "datalength(?1)" );
functionFactory.bitLength_pattern( "datalength(?1)*8" );
functionContributions.getFunctionRegistry().register( "timestampadd",
new IntegralTimestampaddFunction( this, functionContributions.getTypeConfiguration() ) );
functionContributions.getFunctionRegistry().register(
"trunc",
new SybaseTruncFunction( functionContributions.getTypeConfiguration() )
);
functionContributions.getFunctionRegistry().registerAlternateKey( "truncate", "trunc" );
}
@Override
public String getNullColumnString() {
return " null";
}
@Override
public boolean canCreateSchema() {
// As far as I can tell, it does not
return false;
}
@Override
public SchemaNameResolver getSchemaNameResolver() {
return schemaNameResolver;
}
@Override
public String getCurrentSchemaCommand() {
return "select user_name()";
}
@Override
public int getMaxIdentifierLength() {
return 128;
}
@Override
public String castPattern(CastType from, CastType to) {
if ( to == CastType.STRING ) {
switch ( from ) {
case DATE:
return "substring(convert(varchar,?1,23),1,10)";
case TIME:
return "convert(varchar,?1,8)";
case TIMESTAMP:
return "convert(varchar,?1,140)";
}
}
return super.castPattern( from, to );
}
/* Something odd is going on with the jConnect driver when using JDBC escape syntax, so let's use native functions */
@Override
public void appendDateTimeLiteral(
SqlAppender appender,
TemporalAccessor temporalAccessor,
@SuppressWarnings("deprecation")
TemporalType precision,
TimeZone jdbcTimeZone) {
switch ( precision ) {
case DATE:
appender.appendSql( "convert(date,'" );
appendAsDate( appender, temporalAccessor );
appender.appendSql( "',140)" );
break;
case TIME:
appender.appendSql( "convert(time,'" );
appendAsTime( appender, temporalAccessor, supportsTemporalLiteralOffset(), jdbcTimeZone );
appender.appendSql( "',8)" );
break;
case TIMESTAMP:
appender.appendSql( "convert(datetime,'" );
appendAsTimestampWithMillis( appender, temporalAccessor, supportsTemporalLiteralOffset(), jdbcTimeZone );
appender.appendSql( "',140)" );
break;
default:
throw new IllegalArgumentException();
}
}
@Override
public void appendDateTimeLiteral(
SqlAppender appender,
Date date,
@SuppressWarnings("deprecation")
TemporalType precision,
TimeZone jdbcTimeZone) {
switch ( precision ) {
case DATE:
appender.appendSql( "convert(date,'" );
appendAsDate( appender, date );
appender.appendSql( "',140)" );
break;
case TIME:
appender.appendSql( "convert(time,'" );
appendAsLocalTime( appender, date );
appender.appendSql( "',8)" );
break;
case TIMESTAMP:
appender.appendSql( "convert(datetime,'" );
appendAsTimestampWithMillis( appender, date, jdbcTimeZone );
appender.appendSql( "',140)" );
break;
default:
throw new IllegalArgumentException();
}
}
@Override
public void appendDateTimeLiteral(
SqlAppender appender,
Calendar calendar,
@SuppressWarnings("deprecation")
TemporalType precision,
TimeZone jdbcTimeZone) {
switch ( precision ) {
case DATE:
appender.appendSql( "convert(date,'" );
appendAsDate( appender, calendar );
appender.appendSql( "',140)" );
break;
case TIME:
appender.appendSql( "convert(time,'" );
appendAsLocalTime( appender, calendar );
appender.appendSql( "',8)" );
break;
case TIMESTAMP:
appender.appendSql( "convert(datetime,'" );
appendAsTimestampWithMillis( appender, calendar, jdbcTimeZone );
appender.appendSql( "',140)" );
break;
default:
throw new IllegalArgumentException();
}
}
@Override
public String translateExtractField(TemporalUnit unit) {
return switch (unit) {
case WEEK -> "calweekofyear"; // the ISO week number I think
default -> super.translateExtractField(unit);
};
}
@Override
public String extractPattern(TemporalUnit unit) {
return unit == TemporalUnit.EPOCH
? "datediff(second, '1970-01-01 00:00:00', ?2)"
: "datepart(?1,?2)"; //TODO!
}
@Override
public boolean supportsFractionalTimestampArithmetic() {
return false;
}
@Override @SuppressWarnings("deprecation")
public String timestampaddPattern(TemporalUnit unit, TemporalType temporalType, IntervalType intervalType) {
//TODO!!
return "dateadd(?1,?2,?3)";
}
@Override @SuppressWarnings("deprecation")
public String timestampdiffPattern(TemporalUnit unit, TemporalType fromTemporalType, TemporalType toTemporalType) {
//TODO!!
return "datediff(?1,?2,?3)";
}
@Override
public void appendDatetimeFormat(SqlAppender appender, String format) {
throw new UnsupportedOperationException( "format() function not supported on Sybase");
}
@Override
public boolean supportsStandardCurrentTimestampFunction() {
return false;
}
@Override
public IdentifierHelper buildIdentifierHelper(IdentifierHelperBuilder builder, DatabaseMetaData metadata)
throws SQLException {
// Default to MIXED because the jconnect driver doesn't seem to report anything useful
builder.setUnquotedCaseStrategy( IdentifierCaseStrategy.MIXED );
if ( metadata == null ) {
builder.setQuotedCaseStrategy( IdentifierCaseStrategy.MIXED );
}
return super.buildIdentifierHelper( builder, metadata );
}
@Override
public NameQualifierSupport getNameQualifierSupport() {
return NameQualifierSupport.BOTH;
}
@Override
public UniqueDelegate getUniqueDelegate() {
return uniqueDelegate;
}
@Override
public CallableStatementSupport getCallableStatementSupport() {
return driverKind == SybaseDriverKind.JTDS
? JTDSCallableStatementSupport.INSTANCE
: SybaseCallableStatementSupport.INSTANCE;
}
@Override
public boolean supportsNamedParameters(DatabaseMetaData databaseMetaData) throws SQLException {
// Only the jTDS driver supports named parameters properly
return driverKind == SybaseDriverKind.JTDS && super.supportsNamedParameters( databaseMetaData );
}
@Override
public String getAlterColumnTypeString(String columnName, String columnType, String columnDefinition) {
return "modify " + columnName + " " + columnType;
}
@Override
public boolean supportsAlterColumnType() {
return true;
}
@Override
public IdentityColumnSupport getIdentityColumnSupport() {
return driverKind == SybaseDriverKind.JTDS
? AbstractTransactSQLIdentityColumnSupport.INSTANCE
: SybaseJconnIdentityColumnSupport.INSTANCE;
}
@Override
public DmlTargetColumnQualifierSupport getDmlTargetColumnQualifierSupport() {
return DmlTargetColumnQualifierSupport.TABLE_ALIAS;
}
@Override
public boolean supportsFromClauseInUpdate() {
return true;
}
@Override
public boolean supportsRowValueConstructorSyntax() {
return false;
}
@Override
public boolean supportsWithClause() {
return false;
}
@Override
public boolean supportsRowValueConstructorSyntaxInQuantifiedPredicates() {
return false;
}
@Override
public boolean supportsRowValueConstructorSyntaxInInList() {
return false;
}
@Override
public boolean addPartitionKeyToPrimaryKey() {
return true;
}
private static | SybaseDialect |
java | redisson__redisson | redisson/src/main/java/org/redisson/client/protocol/decoder/ArrayBooleanDecoder.java | {
"start": 764,
"end": 1335
} | class ____ implements MultiDecoder<boolean[]> {
@Override
public boolean[] decode(List<Object> parts, State state) {
if (parts.isEmpty()) {
return new boolean[0];
}
boolean[] result = new boolean[parts.size()];
for (int i = 0; i < parts.size(); i++) {
Object part = parts.get(i);
if (part instanceof Boolean) {
result[i] = (boolean) part;
} else {
result[i] = false;
}
}
return result;
}
}
| ArrayBooleanDecoder |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/nullness/VoidMissingNullableTest.java | {
"start": 4603,
"end": 5440
} | class ____ {
// BUG: Diagnostic contains: @Nullable
List<Void> a;
// BUG: Diagnostic contains: @Nullable
List<? extends Void> b;
// BUG: Diagnostic contains: @Nullable
List<? super Void> c;
List<?> d;
}
""")
.doTest();
}
@Test
public void positiveTypeArgumentOtherAnnotation() {
aggressiveCompilationHelper
.addSourceLines(
"NonNull.java",
"""
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE_USE)
public @ | Test |
java | spring-projects__spring-boot | module/spring-boot-web-server/src/main/java/org/springframework/boot/web/server/reactive/context/WebServerManager.java | {
"start": 1367,
"end": 2551
} | class ____ {
private final ReactiveWebServerApplicationContext applicationContext;
private final DelayedInitializationHttpHandler handler;
private final WebServer webServer;
WebServerManager(ReactiveWebServerApplicationContext applicationContext, ReactiveWebServerFactory factory,
Supplier<HttpHandler> handlerSupplier, boolean lazyInit) {
this.applicationContext = applicationContext;
Assert.notNull(factory, "'factory' must not be null");
this.handler = new DelayedInitializationHttpHandler(handlerSupplier, lazyInit);
this.webServer = factory.getWebServer(this.handler);
}
void start() {
this.handler.initializeHandler();
this.webServer.start();
this.applicationContext
.publishEvent(new ReactiveWebServerInitializedEvent(this.webServer, this.applicationContext));
}
void shutDownGracefully(GracefulShutdownCallback callback) {
this.webServer.shutDownGracefully(callback);
}
void stop() {
this.webServer.stop();
}
WebServer getWebServer() {
return this.webServer;
}
HttpHandler getHandler() {
return this.handler;
}
/**
* A delayed {@link HttpHandler} that doesn't initialize things too early.
*/
static final | WebServerManager |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/deser/jdk/StringCollectionDeserializer.java | {
"start": 923,
"end": 14692
} | class ____
extends ContainerDeserializerBase<Collection<String>>
{
// // Configuration
/**
* Value deserializer to use, if NOT the standard one
* (if it is, will be null).
*/
private final ValueDeserializer<String> _valueDeserializer;
// // Instance construction settings:
/**
* Instantiator used in case custom handling is needed for creation.
*/
private final ValueInstantiator _valueInstantiator;
/**
* Deserializer that is used iff delegate-based creator is
* to be used for deserializing from JSON Object.
*/
private final ValueDeserializer<Object> _delegateDeserializer;
// NOTE: no PropertyBasedCreator, as JSON Arrays have no properties
/*
/**********************************************************************
/* Life-cycle
/**********************************************************************
*/
public StringCollectionDeserializer(JavaType collectionType,
ValueDeserializer<?> valueDeser, ValueInstantiator valueInstantiator)
{
this(collectionType, valueInstantiator, null, valueDeser, valueDeser, null);
}
@SuppressWarnings("unchecked")
protected StringCollectionDeserializer(JavaType collectionType,
ValueInstantiator valueInstantiator, ValueDeserializer<?> delegateDeser,
ValueDeserializer<?> valueDeser,
NullValueProvider nuller, Boolean unwrapSingle)
{
super(collectionType, nuller, unwrapSingle);
_valueDeserializer = (ValueDeserializer<String>) valueDeser;
_valueInstantiator = valueInstantiator;
_delegateDeserializer = (ValueDeserializer<Object>) delegateDeser;
}
protected StringCollectionDeserializer withResolved(ValueDeserializer<?> delegateDeser,
ValueDeserializer<?> valueDeser,
NullValueProvider nuller, Boolean unwrapSingle)
{
if ((Objects.equals(_unwrapSingle, unwrapSingle)) && (_nullProvider == nuller)
&& (_valueDeserializer == valueDeser) && (_delegateDeserializer == delegateDeser)) {
return this;
}
return new StringCollectionDeserializer(_containerType, _valueInstantiator,
delegateDeser, valueDeser, nuller, unwrapSingle);
}
@Override
public boolean isCachable() {
// 26-Mar-2015, tatu: Important: prevent caching if custom deserializers via annotations
// are involved
return (_valueDeserializer == null) && (_delegateDeserializer == null);
}
@Override
public LogicalType logicalType() {
return LogicalType.Collection;
}
/*
/**********************************************************************
/* Validation, post-processing
/**********************************************************************
*/
@Override
public ValueDeserializer<?> createContextual(DeserializationContext ctxt,
BeanProperty property)
{
// May need to resolve types for delegate-based creators:
ValueDeserializer<Object> delegate = null;
if (_valueInstantiator != null) {
// [databind#2324]: check both array-delegating and delegating
AnnotatedWithParams delegateCreator = _valueInstantiator.getArrayDelegateCreator();
if (delegateCreator != null) {
JavaType delegateType = _valueInstantiator.getArrayDelegateType(ctxt.getConfig());
delegate = findDeserializer(ctxt, delegateType, property);
} else if ((delegateCreator = _valueInstantiator.getDelegateCreator()) != null) {
JavaType delegateType = _valueInstantiator.getDelegateType(ctxt.getConfig());
delegate = findDeserializer(ctxt, delegateType, property);
}
}
ValueDeserializer<?> valueDeser = _valueDeserializer;
final JavaType valueType = _containerType.getContentType();
if (valueDeser == null) {
// [databind#125]: May have a content converter
valueDeser = findConvertingContentDeserializer(ctxt, property, valueDeser);
if (valueDeser == null) {
// And we may also need to get deserializer for String
valueDeser = ctxt.findContextualValueDeserializer(valueType, property);
}
} else { // if directly assigned, probably not yet contextual, so:
valueDeser = ctxt.handleSecondaryContextualization(valueDeser, property, valueType);
}
// 11-Dec-2015, tatu: Should we pass basic `Collection.class`, or more refined? Mostly
// comes down to "List vs Collection" I suppose... for now, pass Collection
Boolean unwrapSingle = findFormatFeature(ctxt, property, Collection.class,
JsonFormat.Feature.ACCEPT_SINGLE_VALUE_AS_ARRAY);
NullValueProvider nuller = findContentNullProvider(ctxt, property, valueDeser);
if (isDefaultDeserializer(valueDeser)) {
valueDeser = null;
}
return withResolved(delegate, valueDeser, nuller, unwrapSingle);
}
/*
/**********************************************************************
/* ContainerDeserializerBase API
/**********************************************************************
*/
@SuppressWarnings("unchecked")
@Override
public ValueDeserializer<Object> getContentDeserializer() {
ValueDeserializer<?> deser = _valueDeserializer;
return (ValueDeserializer<Object>) deser;
}
@Override
public ValueInstantiator getValueInstantiator() {
return _valueInstantiator;
}
/*
/**********************************************************************
/* ValueDeserializer impl
/**********************************************************************
*/
@Override
public Collection<String> deserialize(JsonParser p, DeserializationContext ctxt)
throws JacksonException
{
if (_delegateDeserializer != null) {
return castToCollection(_valueInstantiator.createUsingDelegate(ctxt,
_delegateDeserializer.deserialize(p, ctxt)));
}
final Collection<String> result = castToCollection(_valueInstantiator.createUsingDefault(ctxt));
return deserialize(p, ctxt, result);
}
@Override
public Collection<String> deserialize(JsonParser p, DeserializationContext ctxt,
Collection<String> result)
throws JacksonException
{
// Ok: must point to START_ARRAY
if (!p.isExpectedStartArrayToken()) {
return handleNonArray(p, ctxt, result);
}
if (_valueDeserializer != null) {
return deserializeUsingCustom(p, ctxt, result, _valueDeserializer);
}
try {
while (true) {
// First the common case:
String value = p.nextStringValue();
if (value != null) {
result.add(value);
continue;
}
JsonToken t = p.currentToken();
if (t == JsonToken.END_ARRAY) {
break;
}
if (t == JsonToken.VALUE_NULL) {
if (_skipNullValues) {
continue;
}
} else {
value = _parseString(p, ctxt, _nullProvider);
}
if (value == null) {
value = (String) _nullProvider.getNullValue(ctxt);
if (value == null && _skipNullValues) {
continue;
}
}
result.add(value);
}
} catch (Exception e) {
throw DatabindException.wrapWithPath(ctxt, e,
new JacksonException.Reference(result, result.size()));
}
return result;
}
private Collection<String> deserializeUsingCustom(JsonParser p, DeserializationContext ctxt,
Collection<String> result, final ValueDeserializer<String> deser) throws JacksonException
{
try {
while (true) {
/* 30-Dec-2014, tatu: This may look odd, but let's actually call method
* that suggest we are expecting a String; this helps with some formats,
* notably XML. Note, however, that while we can get String, we can't
* assume that's what we use due to custom deserializer
*/
String value;
if (p.nextStringValue() == null) {
JsonToken t = p.currentToken();
if (t == JsonToken.END_ARRAY) {
break;
}
// Ok: no need to convert Strings, but must recognize nulls
if (t == JsonToken.VALUE_NULL) {
if (_skipNullValues) {
continue;
}
value = null;
} else {
value = deser.deserialize(p, ctxt);
}
} else {
value = deser.deserialize(p, ctxt);
}
if (value == null) {
value = (String) _nullProvider.getNullValue(ctxt);
if (value == null && _skipNullValues) {
continue;
}
}
result.add(value);
}
} catch (Exception e) {
throw DatabindException.wrapWithPath(ctxt, e,
new JacksonException.Reference(result, result.size()));
}
return result;
}
@Override
public Object deserializeWithType(JsonParser p, DeserializationContext ctxt,
TypeDeserializer typeDeserializer) throws JacksonException {
// In future could check current token... for now this should be enough:
return typeDeserializer.deserializeTypedFromArray(p, ctxt);
}
/**
* Helper method called when current token is not START_ARRAY. Will either
* throw an exception, or try to handle value as if member of implicit
* array, depending on configuration.
*/
private final Collection<String> handleNonArray(JsonParser p, DeserializationContext ctxt,
Collection<String> result) throws JacksonException
{
// implicit arrays from single values?
boolean canWrap = (_unwrapSingle == Boolean.TRUE) ||
((_unwrapSingle == null) &&
ctxt.isEnabled(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY));
if (!canWrap) {
if (p.hasToken(JsonToken.VALUE_STRING)) {
return _deserializeFromString(p, ctxt);
}
return castToCollection(ctxt.handleUnexpectedToken(_containerType, p));
}
// Strings are one of "native" (intrinsic) types, so there's never type deserializer involved
ValueDeserializer<String> valueDes = _valueDeserializer;
JsonToken t = p.currentToken();
String value;
if (t == JsonToken.VALUE_NULL) {
// 03-Feb-2017, tatu: Does this work?
if (_skipNullValues) {
return result;
}
value = null;
} else {
if (p.hasToken(JsonToken.VALUE_STRING)) {
String textValue = p.getString();
// https://github.com/FasterXML/jackson-dataformat-xml/issues/513
if (textValue.isEmpty()) {
final CoercionAction act = ctxt.findCoercionAction(logicalType(), handledType(),
CoercionInputShape.EmptyString);
if (act != CoercionAction.Fail) {
return castToCollection(_deserializeFromEmptyString(p, ctxt, act, handledType(),
"empty String (\"\")"));
}
} else if (_isBlank(textValue)) {
final CoercionAction act = ctxt.findCoercionFromBlankString(logicalType(), handledType(),
CoercionAction.Fail);
if (act != CoercionAction.Fail) {
return castToCollection(_deserializeFromEmptyString(p, ctxt, act, handledType(),
"blank String (all whitespace)"));
}
}
// if coercion failed, we can still add it to a list
}
value = (valueDes == null) ? _parseString(p, ctxt, _nullProvider) : valueDes.deserialize(p, ctxt);
}
if (value == null) {
value = (String) _nullProvider.getNullValue(ctxt);
if (value == null && _skipNullValues) {
return result;
}
}
result.add(value);
return result;
}
// Used to avoid type pollution: see
// https://micronaut-projects.github.io/micronaut-test/latest/guide/#typePollution
// for details
//
// @since 2.18
@SuppressWarnings("unchecked")
private static Collection<String> castToCollection(Object o) {
if (o != null) {
// fast path for specific classes to avoid type pollution:
// https://micronaut-projects.github.io/micronaut-test/latest/guide/#typePollution
if (o.getClass() == ArrayList.class) {
return (ArrayList<String>) o;
}
if (o.getClass() == HashSet.class) {
return (HashSet<String>) o;
}
}
return (Collection<String>) o;
}
}
| StringCollectionDeserializer |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/format/datetime/standard/Jsr310DateTimeFormatAnnotationFormatterFactory.java | {
"start": 1736,
"end": 4575
} | class ____ extends EmbeddedValueResolutionSupport
implements AnnotationFormatterFactory<DateTimeFormat> {
// Create the set of field types that may be annotated with @DateTimeFormat.
private static final Set<Class<?>> FIELD_TYPES = Set.of(
Instant.class,
LocalDate.class,
LocalTime.class,
LocalDateTime.class,
ZonedDateTime.class,
OffsetDateTime.class,
OffsetTime.class,
YearMonth.class,
MonthDay.class);
@Override
public final Set<Class<?>> getFieldTypes() {
return FIELD_TYPES;
}
@Override
public Printer<?> getPrinter(DateTimeFormat annotation, Class<?> fieldType) {
DateTimeFormatter formatter = getFormatter(annotation, fieldType);
// Efficient ISO_LOCAL_* variants for printing since they are twice as fast...
if (formatter == DateTimeFormatter.ISO_DATE) {
if (isLocal(fieldType)) {
formatter = DateTimeFormatter.ISO_LOCAL_DATE;
}
}
else if (formatter == DateTimeFormatter.ISO_TIME) {
if (isLocal(fieldType)) {
formatter = DateTimeFormatter.ISO_LOCAL_TIME;
}
}
else if (formatter == DateTimeFormatter.ISO_DATE_TIME) {
if (isLocal(fieldType)) {
formatter = DateTimeFormatter.ISO_LOCAL_DATE_TIME;
}
}
return new TemporalAccessorPrinter(formatter);
}
@Override
@SuppressWarnings("unchecked")
public Parser<?> getParser(DateTimeFormat annotation, Class<?> fieldType) {
DateTimeFormatter formatter = getFormatter(annotation, fieldType);
List<String> resolvedFallbackPatterns = new ArrayList<>();
for (String fallbackPattern : annotation.fallbackPatterns()) {
String resolvedFallbackPattern = resolveEmbeddedValue(fallbackPattern);
if (StringUtils.hasLength(resolvedFallbackPattern)) {
resolvedFallbackPatterns.add(resolvedFallbackPattern);
}
}
return new TemporalAccessorParser((Class<? extends TemporalAccessor>) fieldType,
formatter, resolvedFallbackPatterns.toArray(new String[0]), annotation);
}
/**
* Factory method used to create a {@link DateTimeFormatter}.
* @param annotation the format annotation for the field
* @param fieldType the declared type of the field
* @return a {@link DateTimeFormatter} instance
*/
protected DateTimeFormatter getFormatter(DateTimeFormat annotation, Class<?> fieldType) {
DateTimeFormatterFactory factory = new DateTimeFormatterFactory();
String style = resolveEmbeddedValue(annotation.style());
if (StringUtils.hasLength(style)) {
factory.setStylePattern(style);
}
factory.setIso(annotation.iso());
String pattern = resolveEmbeddedValue(annotation.pattern());
if (StringUtils.hasLength(pattern)) {
factory.setPattern(pattern);
}
return factory.createDateTimeFormatter();
}
private boolean isLocal(Class<?> fieldType) {
return fieldType.getSimpleName().startsWith("Local");
}
}
| Jsr310DateTimeFormatAnnotationFormatterFactory |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/vertx/src/test/java/org/jboss/resteasy/reactive/server/vertx/test/resource/basic/CollectionDefaultValueTest.java | {
"start": 1069,
"end": 2344
} | class ____ {
static Client client;
@RegisterExtension
static ResteasyReactiveUnitTest testExtension = new ResteasyReactiveUnitTest()
.setArchiveProducer(new Supplier<>() {
@Override
public JavaArchive get() {
JavaArchive war = ShrinkWrap.create(JavaArchive.class);
war.addClasses(CollectionDefaultValueResource.class, PortProviderUtil.class);
return war;
}
});
@BeforeEach
public void init() {
client = ClientBuilder.newClient();
}
@AfterEach
public void after() throws Exception {
client.close();
}
private String generateURL(String path) {
return PortProviderUtil.generateURL(path, CollectionDefaultValueTest.class.getSimpleName());
}
/**
* @tpTestDetails Test that empty QueryParam list is empty
* @tpSince RESTEasy 3.0.16
*/
@Test
@DisplayName("Test Empty")
public void testEmpty() throws Exception {
Response response = client.target(generateURL("/collection")).request().get();
Assertions.assertEquals(Response.Status.OK.getStatusCode(), response.getStatus());
response.close();
}
}
| CollectionDefaultValueTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/uniquekey/NaturalIdCachingTest.java | {
"start": 1987,
"end": 2518
} | class ____ implements Serializable {
@Id
private Integer id;
@ManyToOne(fetch = FetchType.LAZY)
@JoinColumn(name="PROP_CODE", referencedColumnName = "CODE")
@JoinColumn(name="PROP_ITEM", referencedColumnName = "ITEM")
private Property property;
private String severalOtherFields = "Several other fields ...";
protected PropertyHolder() {}
public PropertyHolder(Integer id, Property property) {
this.id = id;
this.property = property;
}
}
@Entity(name = "PropertyEntity")
public static | PropertyHolder |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestSpeculativeExecOnCluster.java | {
"start": 14213,
"end": 15263
} | class ____ implements
SleepDurationCalculator {
private double[] thresholds;
private double[] slowFactors;
DynamicSleepDurationCalcImpl() {
thresholds = new double[] {
0.1, 0.25, 0.4, 0.5, 0.6, 0.65, 0.7, 0.8, 0.9
};
slowFactors = new double[] {
2.0, 4.0, 5.0, 6.0, 10.0, 15.0, 20.0, 25.0, 30.0
};
}
public long calcSleepDuration(TaskAttemptID taId, int currCount,
int totalCount,
long defaultSleepDuration) {
if ((taId.getTaskType() == TaskType.MAP)
&& (taId.getTaskID().getId() == 0) && (taId.getId() == 0)) {
double currProgress = ((double) currCount) / totalCount;
double slowFactor = 1.0;
for (int i = 0; i < thresholds.length; i++) {
if (thresholds[i] >= currProgress) {
break;
}
slowFactor = slowFactors[i];
}
return (long) (slowFactor * defaultSleepDuration);
}
return defaultSleepDuration;
}
}
/**
* Dummy | DynamicSleepDurationCalcImpl |
java | apache__camel | components/camel-observation/src/test/java/org/apache/camel/observation/SpanTestData.java | {
"start": 1024,
"end": 2801
} | class ____ {
private String label;
private String uri;
private String operation;
private SpanKind kind = SpanKind.INTERNAL;
private int parentId = -1;
private List<String> logMessages = new ArrayList<>();
private Map<String, String> tags = new HashMap<>();
private ArrayList<SpanTestData> childs = new ArrayList<>();
private Map<String, String> baggage = new HashMap<>();
public String getLabel() {
return label;
}
public SpanTestData setLabel(String label) {
this.label = label;
return this;
}
public String getUri() {
return uri;
}
public SpanTestData setUri(String uri) {
this.uri = uri;
return this;
}
public String getOperation() {
return operation;
}
public SpanTestData setOperation(String operation) {
this.operation = operation;
return this;
}
public SpanKind getKind() {
return kind;
}
public SpanTestData setKind(SpanKind kind) {
this.kind = kind;
return this;
}
public int getParentId() {
return parentId;
}
public SpanTestData setParentId(int parentId) {
this.parentId = parentId;
return this;
}
public SpanTestData addLogMessage(String mesg) {
logMessages.add(mesg);
return this;
}
public List<String> getLogMessages() {
return logMessages;
}
public SpanTestData addTag(String key, String val) {
tags.put(key, val);
return this;
}
public Map<String, String> getTags() {
return tags;
}
public SpanTestData setChilds(SpanTestData[] childs) {
Collections.addAll(this.childs, childs);
return this;
}
}
| SpanTestData |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/injection/unsatisfied/UnsatisfiedMatchByRestrictedTypeTest.java | {
"start": 1152,
"end": 1273
} | class ____ {
@Inject
FooService foo;
}
@Typed(Comparable.class)
@Singleton
static | Consumer |
java | quarkusio__quarkus | independent-projects/tools/registry-client/src/main/java/io/quarkus/registry/PlatformPreferenceIndex.java | {
"start": 714,
"end": 1453
} | class ____ {
private final Map<Integer, List<PlatformReleasePreferenceIndex>> releaseIndices = new HashMap<>();
PlatformReleasePreferenceIndex getReleaseIndex(int registryIndex, String platformKey) {
var list = releaseIndices.computeIfAbsent(registryIndex, k -> new ArrayList<>(1));
for (int i = 0; i < list.size(); ++i) {
final PlatformReleasePreferenceIndex candidate = list.get(i);
if (candidate.getPlatformKey().equals(platformKey)) {
return candidate;
}
}
final PlatformReleasePreferenceIndex result = new PlatformReleasePreferenceIndex(platformKey, list.size());
list.add(result);
return result;
}
}
| PlatformPreferenceIndex |
java | google__dagger | javatests/artifacts/dagger-ksp/transitive-annotation-app/library1/src/main/java/library1/MySubcomponentWithBuilder.java | {
"start": 2519,
"end": 6093
} | class ____ {
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
public abstract Builder mySubcomponentModule(
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
MySubcomponentModule mySubcomponentModule);
@BindsInstance
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
public abstract Builder qualifiedMySubcomponentBinding(
@MyQualifier
// TODO(b/219587431): Support @MyTransitiveAnnotation (Requires generating metadata).
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
MySubcomponentBinding subcomponentBinding);
@BindsInstance
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
public abstract Builder unqualifiedMySubcomponentBinding(
// TODO(b/219587431): Support @MyTransitiveAnnotation (Requires generating metadata).
@MyAnnotation(MyTransitiveType.VALUE) @MyOtherAnnotation(MyTransitiveType.class)
MySubcomponentBinding subcomponentBinding);
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
public abstract MySubcomponentWithBuilder build();
// Non-dagger code
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
public String nonDaggerField = "";
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
public static String nonDaggerStaticField = "";
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
public void nonDaggerMethod(
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
String str) {}
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
public static void nonDaggerStaticMethod(
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
String str) {}
}
// Non-dagger code
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
public MyTransitiveType nonDaggerField = null;
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
public static MyTransitiveType nonDaggerStaticField = null;
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
public MyTransitiveType nonDaggerMethod(
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
MyTransitiveType nonDaggerParameter) {
return nonDaggerParameter;
}
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
public static MyTransitiveType nonDaggerStaticMethod(
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
MyTransitiveType nonDaggerParameter) {
return nonDaggerParameter;
}
}
| Builder |
java | apache__kafka | raft/src/test/java/org/apache/kafka/raft/RaftEventSimulationTest.java | {
"start": 40553,
"end": 40710
} | interface ____ {
boolean acceptInbound(RaftMessage message);
boolean acceptOutbound(RaftMessage message);
}
private static | NetworkFilter |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/source/coordinator/SourceCoordinatorContext.java | {
"start": 29082,
"end": 31488
} | class ____ {
private final Map<Integer, OperatorCoordinator.SubtaskGateway>[] gateways;
private SubtaskGateways(int parallelism) {
gateways = new Map[parallelism];
for (int i = 0; i < parallelism; i++) {
gateways[i] = new HashMap<>();
}
}
private void registerSubtaskGateway(OperatorCoordinator.SubtaskGateway gateway) {
final int subtaskIndex = gateway.getSubtask();
final int attemptNumber = gateway.getExecution().getAttemptNumber();
checkState(
!gateways[subtaskIndex].containsKey(attemptNumber),
"Already have a subtask gateway for %s (#%s).",
subtaskIndex,
attemptNumber);
gateways[subtaskIndex].put(attemptNumber, gateway);
}
private void unregisterSubtaskGateway(int subtaskIndex, int attemptNumber) {
gateways[subtaskIndex].remove(attemptNumber);
}
private OperatorCoordinator.SubtaskGateway getOnlyGatewayAndCheckReady(int subtaskIndex) {
checkState(
gateways[subtaskIndex].size() > 0,
"Subtask %s is not ready yet to receive events.",
subtaskIndex);
return Iterables.getOnlyElement(gateways[subtaskIndex].values());
}
private OperatorCoordinator.SubtaskGateway getOnlyGatewayAndNotCheckReady(
int subtaskIndex) {
if (gateways[subtaskIndex].size() > 0) {
return Iterables.getOnlyElement(gateways[subtaskIndex].values());
} else {
return null;
}
}
private OperatorCoordinator.SubtaskGateway getGatewayAndCheckReady(
int subtaskIndex, int attemptNumber) {
final OperatorCoordinator.SubtaskGateway gateway =
gateways[subtaskIndex].get(attemptNumber);
if (gateway != null) {
return gateway;
}
throw new IllegalStateException(
String.format(
"Subtask %d (#%d) is not ready yet to receive events.",
subtaskIndex, attemptNumber));
}
private void reset(int subtaskIndex) {
gateways[subtaskIndex].clear();
}
}
}
| SubtaskGateways |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/EndpointDisabledTest.java | {
"start": 532,
"end": 1999
} | class ____ {
@RegisterExtension
static QuarkusUnitTest TEST = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(InvalidEncodingTest.FeedbackBody.class, InvalidEncodingTest.FeedbackResource.class)
.addAsResource(new StringAsset("dummy.disabled=true"),
"application.properties"));
@Test
public void endpointWithNoAnnotation() {
get("/no-annotation")
.then()
.statusCode(200)
.body(equalTo("no"));
}
@Test
public void shouldBeDisabledBecauseOfMatchingProperty() {
get("/dummy-disabled-true")
.then()
.statusCode(404);
}
@Test
public void shouldBeEnabledBecauseOfNonMatchingProperty() {
get("/dummy-disabled-false")
.then()
.statusCode(200)
.body(equalTo("dummy.disabled=false"));
}
@Test
public void shouldBeDisabledBecauseOfNonExistingProperty() {
get("/other-dummy-disabled-missing-true")
.then()
.statusCode(404);
}
@Test
public void shouldBeEnabledBecauseOfNonExistingProperty() {
get("/other-dummy-disabled-missing-false")
.then()
.statusCode(200)
.body(equalTo("missing=false"));
}
@Path("no-annotation")
public static | EndpointDisabledTest |
java | elastic__elasticsearch | test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java | {
"start": 1626,
"end": 9820
} | class ____ extends ESTestCase {
public void testLoggerUsageChecks() throws IOException {
for (Method method : getClass().getMethods()) {
if (method.getDeclaringClass().equals(getClass())) {
if (method.getName().startsWith("check")) {
logger.info("Checking logger usage for method {}", method.getName());
InputStream classInputStream = getClass().getResourceAsStream(getClass().getSimpleName() + ".class");
List<WrongLoggerUsage> errors = new ArrayList<>();
ESLoggerUsageChecker.check(
errors::add,
classInputStream,
m -> m.equals(method.getName()) || m.startsWith("lambda$" + method.getName())
);
if (method.getName().startsWith("checkFail")) {
assertFalse("Expected " + method.getName() + " to have wrong Logger usage", errors.isEmpty());
} else {
assertTrue("Method " + method.getName() + " has unexpected Logger usage errors: " + errors, errors.isEmpty());
}
} else {
assertTrue("only allow methods starting with test or check in this class", method.getName().startsWith("test"));
}
}
}
}
public void testLoggerUsageCheckerCompatibilityWithLog4j2Logger() throws NoSuchMethodException {
for (Method method : Logger.class.getMethods()) {
if (ESLoggerUsageChecker.LOGGER_METHODS.contains(method.getName())) {
assertThat(method.getParameterTypes().length, greaterThanOrEqualTo(1));
int markerOffset = method.getParameterTypes()[0].equals(Marker.class) ? 1 : 0;
int paramLength = method.getParameterTypes().length - markerOffset;
if (method.isVarArgs()) {
assertEquals(2, paramLength);
assertEquals(String.class, method.getParameterTypes()[markerOffset]);
assertThat(method.getParameterTypes()[markerOffset + 1], is(oneOf(Object[].class, Supplier[].class)));
} else {
assertThat(
method.getParameterTypes()[markerOffset],
is(oneOf(Message.class, MessageSupplier.class, CharSequence.class, Object.class, String.class, Supplier.class))
);
if (paramLength == 2) {
assertThat(method.getParameterTypes()[markerOffset + 1], is(oneOf(Throwable.class, Object.class)));
if (method.getParameterTypes()[markerOffset + 1].equals(Object.class)) {
assertEquals(String.class, method.getParameterTypes()[markerOffset]);
}
}
if (paramLength > 2) {
assertEquals(String.class, method.getParameterTypes()[markerOffset]);
assertThat(paramLength, lessThanOrEqualTo(11));
for (int i = 1; i < paramLength; i++) {
assertEquals(Object.class, method.getParameterTypes()[markerOffset + i]);
}
}
}
}
}
for (String methodName : ESLoggerUsageChecker.LOGGER_METHODS) {
assertEquals(48, Stream.of(Logger.class.getMethods()).filter(m -> methodName.equals(m.getName())).count());
}
for (Constructor<?> constructor : ParameterizedMessage.class.getConstructors()) {
assertThat(constructor.getParameterTypes().length, greaterThanOrEqualTo(2));
assertEquals(String.class, constructor.getParameterTypes()[0]);
assertThat(constructor.getParameterTypes()[1], is(oneOf(String[].class, Object[].class, Object.class)));
if (constructor.getParameterTypes().length > 2) {
assertEquals(3, constructor.getParameterTypes().length);
if (constructor.getParameterTypes()[1].equals(Object.class)) {
assertEquals(Object.class, constructor.getParameterTypes()[2]);
} else {
assertEquals(Throwable.class, constructor.getParameterTypes()[2]);
}
}
}
assertEquals(5, ParameterizedMessage.class.getConstructors().length);
}
public void checkArgumentsProvidedInConstructor() {
logger.debug(new ESLogMessage("message {}", "some-arg").field("x-opaque-id", "some-value"));
}
public void checkWithUsage() {
logger.debug(
new ESLogMessage("message {}").argAndField("x-opaque-id", "some-value").field("field", "value").with("field2", "value2")
);
}
public void checkFailArraySizeForSubclasses(Object... arr) {
logger.debug(new ESLogMessage("message {}", arr));
}
public void checkFailForTooManyArgumentsInConstr() {
logger.debug(new ESLogMessage("message {}", "arg1", "arg2"));
}
public void checkFailForTooManyArgumentsWithChain() {
logger.debug(new ESLogMessage("message {}").argAndField("x-opaque-id", "some-value").argAndField("too-many-arg", "xxx"));
}
public void checkNumberOfArguments1() {
logger.info("Hello {}", "world");
}
public void checkFailNumberOfArguments1() {
logger.info("Hello {}");
}
@SuppressLoggerChecks(reason = "test ignore functionality")
public void checkIgnoreWhenAnnotationPresent() {
logger.info("Hello {}");
}
public void checkNumberOfArguments2() {
logger.info("Hello {}, {}, {}", "world", 2, "third argument");
}
public void checkFailNumberOfArguments2() {
logger.info("Hello {}, {}", "world", 2, "third argument");
}
public void checkNumberOfArguments3() {
logger.info("Hello {}, {}, {}, {}, {}, {}, {}", "world", 2, "third argument", 4, 5, 6, new String("last arg"));
}
public void checkFailNumberOfArguments3() {
logger.info("Hello {}, {}, {}, {}, {}, {}, {}", "world", 2, "third argument", 4, 5, 6, 7, new String("last arg"));
}
public void checkOrderOfExceptionArgument() {
logger.info("Hello", new Exception());
}
public void checkOrderOfExceptionArgument1() {
logger.info(() -> format("Hello %s", "world"), new Exception());
}
public void checkFailOrderOfExceptionArgument1() {
logger.info("Hello {}", "world", new Exception());
}
public void checkOrderOfExceptionArgument2() {
logger.info(() -> format("Hello %s, %s", "world", 42), new Exception());
}
public void checkFailOrderOfExceptionArgument2() {
logger.info("Hello {}, {}", "world", 42, new Exception());
}
public void checkNonConstantMessageWithZeroArguments(boolean b) {
logger.info(Boolean.toString(b), new Exception());
}
public void checkComplexUsage(boolean b) {
String message = "Hello {}, {}";
Object[] args = new Object[] { "world", 42 };
if (b) {
message = "also two args {}{}";
args = new Object[] { "world", 43 };
}
logger.info(message, args);
}
public void checkFailComplexUsage1(boolean b) {
String message = "Hello {}, {}";
Object[] args = new Object[] { "world", 42 };
if (b) {
message = "just one arg {}";
args = new Object[] { "world", 43 };
}
logger.info(message, args);
}
public void checkFailComplexUsage2(boolean b) {
String message = "Hello {}, {}";
Object[] args = new Object[] { "world", 42 };
if (b) {
message = "also two args {}{}";
args = new Object[] { "world", 43, "another argument" };
}
logger.info(message, args);
}
public void checkDeprecationLogger() {
DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(ESLoggerUsageTests.class);
deprecationLogger.warn(DeprecationCategory.OTHER, "key", "message {}", 123);
}
}
| ESLoggerUsageTests |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/result_handler_type/PersonMapper.java | {
"start": 852,
"end": 995
} | interface ____ {
List<Person> doSelect();
@Select("select * from person")
@MapKey("id")
Map<Integer, Person> selectAsMap();
}
| PersonMapper |
java | apache__camel | components/camel-azure/camel-azure-storage-queue/src/main/java/org/apache/camel/component/azure/storage/queue/QueueConfigurationOptionsProxy.java | {
"start": 1135,
"end": 1333
} | class ____ {@link QueueConfiguration} and {@link QueueExchangeHeaders}. Ideally this is responsible to obtain
* the correct configurations options either from configs or exchange headers
*/
public | for |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/deser/SmartMatchTest_snake2.java | {
"start": 147,
"end": 401
} | class ____ extends TestCase {
public void test_0() throws Exception {
String text = "{\"_id\":1001}";
VO vo = JSON.parseObject(text, VO.class);
Assert.assertEquals(1001, vo.id);
}
public static | SmartMatchTest_snake2 |
java | google__dagger | dagger-compiler/main/java/dagger/internal/codegen/binding/BindingDeclaration.java | {
"start": 942,
"end": 2084
} | class ____ extends Declaration {
/**
* Returns {@code true} if using this binding requires an instance of the {@link
* #contributingModule()}.
*/
public abstract boolean requiresModuleInstance();
/**
* Returns {@code true} if this binding may provide {@code null} instead of an instance of {@link
* #key()}. Nullable bindings cannot be requested from {@linkplain DependencyRequest#isNullable()
* non-nullable dependency requests}.
*/
public abstract boolean isNullable();
/** The kind of binding this instance represents. */
public abstract BindingKind kind();
/** The set of {@link DependencyRequest dependencies} required to satisfy this binding. */
public abstract ImmutableSet<DependencyRequest> dependencies();
/**
* If this binding's key's type parameters are different from those of the {@link
* #bindingTypeElement()}, this is the binding for the {@link #bindingTypeElement()}'s unresolved
* type.
*/
public abstract Optional<? extends Binding> unresolved();
/** Returns the optional scope used on the binding. */
public abstract Optional<Scope> scope();
}
| BindingDeclaration |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/ServletApiConfigurerTests.java | {
"start": 13336,
"end": 13621
} | class ____ {
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.servletApi(withDefaults());
return http.build();
// @formatter:on
}
}
@Configuration
@EnableWebSecurity
static | ServletApiWithDefaultsInLambdaConfig |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/vertx/src/test/java/org/jboss/resteasy/reactive/server/vertx/test/matching/RegexMatchTest.java | {
"start": 468,
"end": 1755
} | class ____ {
@RegisterExtension
static ResteasyReactiveUnitTest test = new ResteasyReactiveUnitTest()
.setArchiveProducer(new Supplier<>() {
@Override
public JavaArchive get() {
return ShrinkWrap.create(JavaArchive.class)
.addClass(RegexResource.class);
}
});
@Test
public void testRegexMatch() {
RestAssured.get("/regex/1234")
.then()
.statusCode(200)
.body(equalTo("pin 1234"));
RestAssured.get("/regex/12345")
.then()
.statusCode(404);
}
@Test
public void testLiteralInRegex() {
RestAssured.get("/regex/abb/foo/alongpathtotriggerbug")
.then()
.statusCode(200)
.body(equalTo("plain:abb/foo/alongpathtotriggerbug"));
RestAssured.get("/regex/first space/foo/second space")
.then()
.statusCode(200)
.body(equalTo("plain:first space/foo/second space"));
RestAssured.get("/regex/abb/literal/ddc")
.then()
.statusCode(200)
.body(equalTo("literal:abb/ddc"));
}
}
| RegexMatchTest |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/util/PatternMatcher.java | {
"start": 950,
"end": 1441
} | interface ____ {
/**
* Returns <code>true</code> if the given <code>source</code> matches the specified <code>pattern</code>,
* <code>false</code> otherwise.
*
* @param pattern the pattern to match against
* @param source the source to match
* @return <code>true</code> if the given <code>source</code> matches the specified <code>pattern</code>,
* <code>false</code> otherwise.
*/
boolean matches(String pattern, String source);
}
| PatternMatcher |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/checkpointing/UnalignedCheckpointITCase.java | {
"start": 19876,
"end": 20575
} | class ____ extends KeyedProcessFunction<Long, Long, Long> {
ValueState<Long> state;
@Override
public void open(OpenContext openContext) throws Exception {
super.open(openContext);
state =
getRuntimeContext()
.getState(
new ValueStateDescriptor<>(
"keyedState", BasicTypeInfo.LONG_TYPE_INFO));
}
@Override
public void processElement(Long value, Context ctx, Collector<Long> out) {
checkHeader(value);
out.collect(value);
}
}
private static | KeyedIdentityFunction |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/ConfigurationPropertiesBeanTests.java | {
"start": 19204,
"end": 19372
} | class ____ {
@Bean
ValidatedBean validatedBean() {
return new ValidatedBean();
}
}
@Configuration(proxyBeanMethods = false)
static | ValidatedBeanConfiguration |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/spi/RestRegistry.java | {
"start": 2495,
"end": 2580
} | class ____.
* <p/>
* If the input accepts a list, then <tt>List< | name |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchSparseRequestManager.java | {
"start": 1443,
"end": 3496
} | class ____ extends AlibabaCloudSearchRequestManager {
private static final Logger logger = LogManager.getLogger(AlibabaCloudSearchSparseRequestManager.class);
private static final ResponseHandler HANDLER = createEmbeddingsHandler();
private static ResponseHandler createEmbeddingsHandler() {
return new AlibabaCloudSearchResponseHandler(
"alibaba cloud search sparse embedding",
AlibabaCloudSearchSparseResponseEntity::fromResponse
);
}
public static AlibabaCloudSearchSparseRequestManager of(
AlibabaCloudSearchAccount account,
AlibabaCloudSearchSparseModel model,
ThreadPool threadPool
) {
return new AlibabaCloudSearchSparseRequestManager(
Objects.requireNonNull(account),
Objects.requireNonNull(model),
Objects.requireNonNull(threadPool)
);
}
private final AlibabaCloudSearchSparseModel model;
private final AlibabaCloudSearchAccount account;
private AlibabaCloudSearchSparseRequestManager(
AlibabaCloudSearchAccount account,
AlibabaCloudSearchSparseModel model,
ThreadPool threadPool
) {
super(threadPool, model);
this.account = Objects.requireNonNull(account);
this.model = Objects.requireNonNull(model);
}
@Override
public void execute(
InferenceInputs inferenceInputs,
RequestSender requestSender,
Supplier<Boolean> hasRequestCompletedFunction,
ActionListener<InferenceServiceResults> listener
) {
EmbeddingsInput input = inferenceInputs.castTo(EmbeddingsInput.class);
List<String> docsInput = input.getTextInputs();
InputType inputType = input.getInputType();
AlibabaCloudSearchSparseRequest request = new AlibabaCloudSearchSparseRequest(account, docsInput, inputType, model);
execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener));
}
}
| AlibabaCloudSearchSparseRequestManager |
java | google__dagger | dagger-compiler/main/java/dagger/internal/codegen/model/DaggerAnnotation.java | {
"start": 1079,
"end": 1767
} | class ____ {
public static DaggerAnnotation from(XAnnotation annotation) {
Preconditions.checkNotNull(annotation);
return new AutoValue_DaggerAnnotation(XAnnotations.equivalence().wrap(annotation));
}
abstract Equivalence.Wrapper<XAnnotation> equivalenceWrapper();
public DaggerTypeElement annotationTypeElement() {
return DaggerTypeElement.from(xprocessing().getType().getTypeElement());
}
public XAnnotation xprocessing() {
return equivalenceWrapper().get();
}
public AnnotationMirror javac() {
return toJavac(xprocessing());
}
@Override
public final String toString() {
return XAnnotations.toString(xprocessing());
}
}
| DaggerAnnotation |
java | quarkusio__quarkus | extensions/resteasy-classic/resteasy-client/runtime/src/test/java/io/quarkus/restclient/runtime/RestClientBaseTest.java | {
"start": 1616,
"end": 11753
} | class ____ {
private static final String TRUSTSTORE_PASSWORD = "truststorePassword";
private static final String KEYSTORE_PASSWORD = "keystorePassword";
private static Path truststorePath;
private static Path keystorePath;
@BeforeAll
public static void beforeAll() throws IOException, KeyStoreException, CertificateException, NoSuchAlgorithmException {
// prepare keystore and truststore
truststorePath = Files.createTempFile("truststore", ".jks");
try (OutputStream truststoreOs = Files.newOutputStream(truststorePath)) {
KeyStore truststore = KeyStore.getInstance("JKS");
truststore.load(null, TRUSTSTORE_PASSWORD.toCharArray());
truststore.store(truststoreOs, TRUSTSTORE_PASSWORD.toCharArray());
}
keystorePath = Files.createTempFile("keystore", ".jks");
try (OutputStream keystoreOs = Files.newOutputStream(keystorePath)) {
KeyStore keystore = KeyStore.getInstance("JKS");
keystore.load(null, KEYSTORE_PASSWORD.toCharArray());
keystore.store(keystoreOs, KEYSTORE_PASSWORD.toCharArray());
}
}
@AfterAll
public static void afterAll() {
if (truststorePath != null) {
try {
Files.deleteIfExists(truststorePath);
} catch (IOException e) {
// ignore it
}
}
if (keystorePath != null) {
try {
Files.deleteIfExists(keystorePath);
} catch (IOException e) {
// ignore it
}
}
}
@Test
void clientSpecificConfigs() throws Exception {
RestClientsConfig configRoot = ConfigUtils.emptyConfigBuilder()
.setAddDefaultSources(false)
.withMapping(RestClientsConfig.class)
.withCustomizers(new SmallRyeConfigBuilderCustomizer() {
@Override
public void configBuilder(final SmallRyeConfigBuilder builder) {
new AbstractRestClientConfigBuilder() {
@Override
public List<RegisteredRestClient> getRestClients() {
return List.of(new RegisteredRestClient(TestClient.class, "test-client"));
}
}.configBuilder(builder);
}
})
.withDefaultValues(createSampleConfigRoot())
.withDefaultValues(createSampleClientConfig("test-client"))
.build()
.getConfigMapping(RestClientsConfig.class);
assertEquals(1, configRoot.clients().size());
assertTrue(configRoot.clients().containsKey(TestClient.class.getName()));
RestClientBuilder restClientBuilderMock = Mockito.mock(RestClientBuilder.class);
RestClientBase restClientBase = new RestClientBase(TestClient.class,
"http://localhost:8080",
"test-client",
null,
configRoot);
restClientBase.configureBuilder(restClientBuilderMock);
verify(restClientBuilderMock).baseUrl(new URL("http://localhost:8080"));
verify(restClientBuilderMock).proxyAddress("host1", 123);
verify(restClientBuilderMock).connectTimeout(100, MILLISECONDS);
verify(restClientBuilderMock).readTimeout(101, MILLISECONDS);
verify(restClientBuilderMock).hostnameVerifier(Mockito.any(MyHostnameVerifier1.class));
verify(restClientBuilderMock).property("resteasy.connectionTTL", Arrays.asList(102, MILLISECONDS));
verify(restClientBuilderMock).property("resteasy.connectionPoolSize", 103);
verify(restClientBuilderMock).followRedirects(true);
verify(restClientBuilderMock).register(MyResponseFilter1.class);
verify(restClientBuilderMock).queryParamStyle(COMMA_SEPARATED);
verify(restClientBuilderMock).trustStore(Mockito.any());
verify(restClientBuilderMock).keyStore(Mockito.any(), Mockito.anyString());
}
@Test
void globalConfigs() throws MalformedURLException {
RestClientsConfig configRoot = ConfigUtils.emptyConfigBuilder()
.setAddDefaultSources(false)
.withMapping(RestClientsConfig.class)
.withCustomizers(new SmallRyeConfigBuilderCustomizer() {
@Override
public void configBuilder(final SmallRyeConfigBuilder builder) {
new AbstractRestClientConfigBuilder() {
@Override
public List<RegisteredRestClient> getRestClients() {
return List.of(new RegisteredRestClient(TestClient.class, "test-client"));
}
}.configBuilder(builder);
}
})
.withDefaultValues(createSampleConfigRoot())
.build()
.getConfigMapping(RestClientsConfig.class);
assertEquals(1, configRoot.clients().size());
assertTrue(configRoot.clients().containsKey(TestClient.class.getName()));
RestClientBuilder restClientBuilderMock = Mockito.mock(RestClientBuilder.class);
RestClientBase restClientBase = new RestClientBase(TestClient.class,
"http://localhost:8080",
"test-client",
null,
configRoot);
restClientBase.configureBuilder(restClientBuilderMock);
// then
verify(restClientBuilderMock).baseUrl(new URL("http://localhost:8080"));
verify(restClientBuilderMock).proxyAddress("host2", 123);
verify(restClientBuilderMock).connectTimeout(200, MILLISECONDS);
verify(restClientBuilderMock).readTimeout(201, MILLISECONDS);
verify(restClientBuilderMock).hostnameVerifier(Mockito.any(MyHostnameVerifier2.class));
verify(restClientBuilderMock).property("resteasy.connectionTTL", Arrays.asList(202, MILLISECONDS));
verify(restClientBuilderMock).property("resteasy.connectionPoolSize", 203);
verify(restClientBuilderMock).followRedirects(true);
verify(restClientBuilderMock).register(MyResponseFilter2.class);
verify(restClientBuilderMock).queryParamStyle(MULTI_PAIRS);
verify(restClientBuilderMock).trustStore(Mockito.any());
verify(restClientBuilderMock).keyStore(Mockito.any(), Mockito.anyString());
}
private static Map<String, String> createSampleConfigRoot() {
Map<String, String> rootConfig = new HashMap<>();
rootConfig.put("quarkus.rest-client.proxy-address", "host2:123");
rootConfig.put("quarkus.rest-client.connect-timeout", "200");
rootConfig.put("quarkus.rest-client.read-timeout", "201");
rootConfig.put("quarkus.rest-client.hostname-verifier",
"io.quarkus.restclient.runtime.RestClientBaseTest$MyHostnameVerifier2");
rootConfig.put("quarkus.rest-client.connection-ttl", "202");
rootConfig.put("quarkus.rest-client.connection-pool-size", "203");
rootConfig.put("quarkus.rest-client.follow-redirects", "true");
rootConfig.put("quarkus.rest-client.providers", "io.quarkus.restclient.runtime.RestClientBaseTest$MyResponseFilter2");
rootConfig.put("quarkus.rest-client.query-param-style", "multi-pairs");
rootConfig.put("quarkus.rest-client.trust-store", truststorePath.toAbsolutePath().toString());
rootConfig.put("quarkus.rest-client.trust-store-password", "truststorePassword");
rootConfig.put("quarkus.rest-client.trust-store-type", "JKS");
rootConfig.put("quarkus.rest-client.key-store", keystorePath.toAbsolutePath().toString());
rootConfig.put("quarkus.rest-client.key-store-password", "keystorePassword");
rootConfig.put("quarkus.rest-client.key-store-type", "JKS");
return rootConfig;
}
private static Map<String, String> createSampleClientConfig(final String restClientName) {
Map<String, String> clientConfig = new HashMap<>();
// properties only configurable via client config
clientConfig.put("quarkus.rest-client." + restClientName + ".url", "http://localhost");
// properties that override configRoot counterparts
clientConfig.put("quarkus.rest-client." + restClientName + ".proxy-address", "host1:123");
clientConfig.put("quarkus.rest-client." + restClientName + ".connect-timeout", "100");
clientConfig.put("quarkus.rest-client." + restClientName + ".read-timeout", "101");
clientConfig.put("quarkus.rest-client." + restClientName + ".hostname-verifier",
"io.quarkus.restclient.runtime.RestClientBaseTest$MyHostnameVerifier1");
clientConfig.put("quarkus.rest-client." + restClientName + ".connection-ttl", "102");
clientConfig.put("quarkus.rest-client." + restClientName + ".connection-pool-size", "103");
clientConfig.put("quarkus.rest-client." + restClientName + ".follow-redirects", "true");
clientConfig.put("quarkus.rest-client." + restClientName + ".providers",
"io.quarkus.restclient.runtime.RestClientBaseTest$MyResponseFilter1");
clientConfig.put("quarkus.rest-client." + restClientName + ".query-param-style", "comma-separated");
clientConfig.put("quarkus.rest-client." + restClientName + ".trust-store", truststorePath.toAbsolutePath().toString());
clientConfig.put("quarkus.rest-client." + restClientName + ".trust-store-password", "truststorePassword");
clientConfig.put("quarkus.rest-client." + restClientName + ".trust-store-type", "JKS");
clientConfig.put("quarkus.rest-client." + restClientName + ".key-store", keystorePath.toAbsolutePath().toString());
clientConfig.put("quarkus.rest-client." + restClientName + ".key-store-password", "keystorePassword");
clientConfig.put("quarkus.rest-client." + restClientName + ".key-store-type", "JKS");
return clientConfig;
}
@RegisterRestClient
| RestClientBaseTest |
java | netty__netty | transport/src/main/java/io/netty/channel/DelegatingChannelPromiseNotifier.java | {
"start": 1194,
"end": 6450
} | class ____ implements ChannelPromise, ChannelFutureListener {
private static final InternalLogger logger =
InternalLoggerFactory.getInstance(DelegatingChannelPromiseNotifier.class);
private final ChannelPromise delegate;
private final boolean logNotifyFailure;
public DelegatingChannelPromiseNotifier(ChannelPromise delegate) {
this(delegate, !(delegate instanceof VoidChannelPromise));
}
public DelegatingChannelPromiseNotifier(ChannelPromise delegate, boolean logNotifyFailure) {
this.delegate = checkNotNull(delegate, "delegate");
this.logNotifyFailure = logNotifyFailure;
}
@Override
public void operationComplete(ChannelFuture future) throws Exception {
InternalLogger internalLogger = logNotifyFailure ? logger : null;
if (future.isSuccess()) {
Void result = future.get();
PromiseNotificationUtil.trySuccess(delegate, result, internalLogger);
} else if (future.isCancelled()) {
PromiseNotificationUtil.tryCancel(delegate, internalLogger);
} else {
Throwable cause = future.cause();
PromiseNotificationUtil.tryFailure(delegate, cause, internalLogger);
}
}
@Override
public Channel channel() {
return delegate.channel();
}
@Override
public ChannelPromise setSuccess(Void result) {
delegate.setSuccess(result);
return this;
}
@Override
public ChannelPromise setSuccess() {
delegate.setSuccess();
return this;
}
@Override
public boolean trySuccess() {
return delegate.trySuccess();
}
@Override
public boolean trySuccess(Void result) {
return delegate.trySuccess(result);
}
@Override
public ChannelPromise setFailure(Throwable cause) {
delegate.setFailure(cause);
return this;
}
@Override
public ChannelPromise addListener(GenericFutureListener<? extends Future<? super Void>> listener) {
delegate.addListener(listener);
return this;
}
@Override
public ChannelPromise addListeners(GenericFutureListener<? extends Future<? super Void>>... listeners) {
delegate.addListeners(listeners);
return this;
}
@Override
public ChannelPromise removeListener(GenericFutureListener<? extends Future<? super Void>> listener) {
delegate.removeListener(listener);
return this;
}
@Override
public ChannelPromise removeListeners(GenericFutureListener<? extends Future<? super Void>>... listeners) {
delegate.removeListeners(listeners);
return this;
}
@Override
public boolean tryFailure(Throwable cause) {
return delegate.tryFailure(cause);
}
@Override
public boolean setUncancellable() {
return delegate.setUncancellable();
}
@Override
public ChannelPromise await() throws InterruptedException {
delegate.await();
return this;
}
@Override
public ChannelPromise awaitUninterruptibly() {
delegate.awaitUninterruptibly();
return this;
}
@Override
public boolean isVoid() {
return delegate.isVoid();
}
@Override
public ChannelPromise unvoid() {
return isVoid() ? new DelegatingChannelPromiseNotifier(delegate.unvoid()) : this;
}
@Override
public boolean await(long timeout, TimeUnit unit) throws InterruptedException {
return delegate.await(timeout, unit);
}
@Override
public boolean await(long timeoutMillis) throws InterruptedException {
return delegate.await(timeoutMillis);
}
@Override
public boolean awaitUninterruptibly(long timeout, TimeUnit unit) {
return delegate.awaitUninterruptibly(timeout, unit);
}
@Override
public boolean awaitUninterruptibly(long timeoutMillis) {
return delegate.awaitUninterruptibly(timeoutMillis);
}
@Override
public Void getNow() {
return delegate.getNow();
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return delegate.cancel(mayInterruptIfRunning);
}
@Override
public boolean isCancelled() {
return delegate.isCancelled();
}
@Override
public boolean isDone() {
return delegate.isDone();
}
@Override
public Void get() throws InterruptedException, ExecutionException {
return delegate.get();
}
@Override
public Void get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
return delegate.get(timeout, unit);
}
@Override
public ChannelPromise sync() throws InterruptedException {
delegate.sync();
return this;
}
@Override
public ChannelPromise syncUninterruptibly() {
delegate.syncUninterruptibly();
return this;
}
@Override
public boolean isSuccess() {
return delegate.isSuccess();
}
@Override
public boolean isCancellable() {
return delegate.isCancellable();
}
@Override
public Throwable cause() {
return delegate.cause();
}
}
| DelegatingChannelPromiseNotifier |
java | spring-projects__spring-security | webauthn/src/main/java/org/springframework/security/web/webauthn/jackson/BytesJackson2Mixin.java | {
"start": 1194,
"end": 1353
} | class ____ {
@JsonCreator
static Bytes fromBase64(String value) {
return Bytes.fromBase64(value);
}
private BytesJackson2Mixin() {
}
}
| BytesJackson2Mixin |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/bytearray/ByteArrayAssert_hasSameSizeAs_with_Iterable_Test.java | {
"start": 995,
"end": 1418
} | class ____ extends ByteArrayAssertBaseTest {
private final List<String> other = newArrayList("Yoda", "Luke");
@Override
protected ByteArrayAssert invoke_api_method() {
return assertions.hasSameSizeAs(other);
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertHasSameSizeAs(getInfo(assertions), getActual(assertions), other);
}
}
| ByteArrayAssert_hasSameSizeAs_with_Iterable_Test |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/SplitTimeoutCancelTaskTest.java | {
"start": 1073,
"end": 2708
} | class ____ extends ContextTestSupport {
final String payload1 = "<items><item><id>1</id><name>one</name></item><item><id>2</id><name>two</name></item></items>";
final String payload2 = "<items><item><id>3</id><name>three</name></item><item><id>4</id><name>four</name></item></items>";
@Test
public void testSplitterTimeoutShouldNotExhaustThreadPool() throws Exception {
MockEndpoint mockEndpoint = getMockEndpoint("mock:split");
mockEndpoint.expectedMessageCount(4);
template.sendBody("direct:start", payload1);
template.sendBody("direct:start", payload2);
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
ThreadPoolProfile myThreadPoolProfile = new ThreadPoolProfile("testProfile");
myThreadPoolProfile.setMaxPoolSize(20);
myThreadPoolProfile.setPoolSize(10);
myThreadPoolProfile.setMaxQueueSize(1);
getContext().getExecutorServiceManager().setDefaultThreadPoolProfile(myThreadPoolProfile);
from("direct:start")
.split()
.xpath("//items/item")
.parallelProcessing(true)
.streaming(true)
.stopOnException(true)
.timeout("30000")
.executorService("testProfile")
.to("mock:split");
}
};
}
}
| SplitTimeoutCancelTaskTest |
java | apache__camel | core/camel-main/src/test/java/org/apache/camel/main/MainIoCTest.java | {
"start": 1626,
"end": 1783
} | class
____ main = new Main();
// add the configuration
main.configure().withConfigurations(MyConfiguration.class);
// add as | Main |
java | apache__dubbo | dubbo-cluster/src/main/java/org/apache/dubbo/rpc/cluster/router/mesh/rule/virtualservice/destination/DubboDestination.java | {
"start": 892,
"end": 1617
} | class ____ {
private String host;
private String subset;
private int port;
private DubboRouteDestination fallback;
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public String getSubset() {
return subset;
}
public void setSubset(String subset) {
this.subset = subset;
}
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
}
public DubboRouteDestination getFallback() {
return fallback;
}
public void setFallback(DubboRouteDestination fallback) {
this.fallback = fallback;
}
}
| DubboDestination |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/functions/aggfunctions/BatchApproxCountDistinctAggFunctionTest.java | {
"start": 7567,
"end": 9346
} | class ____
extends ApproxCountDistinctAggFunctionTestBase<DecimalData> {
private final int precision;
private final int scale;
DecimalApproxCountDistinctAggFunctionTestBase(int precision, int scale) {
this.precision = precision;
this.scale = scale;
}
@Override
protected List<List<DecimalData>> getInputValueSets() {
return Arrays.asList(
Arrays.asList(
DecimalDataUtils.castFrom("1", precision, scale),
DecimalDataUtils.castFrom("1000.000001", precision, scale),
DecimalDataUtils.castFrom("-1", precision, scale),
DecimalDataUtils.castFrom("-999.998999", precision, scale),
null,
DecimalDataUtils.castFrom("0", precision, scale),
DecimalDataUtils.castFrom("-999.999", precision, scale),
null,
DecimalDataUtils.castFrom("999.999", precision, scale)),
Arrays.asList(null, null, null, null, null),
Arrays.asList(null, DecimalDataUtils.castFrom("0", precision, scale)));
}
@Override
protected List<Long> getExpectedResults() {
return Arrays.asList(7L, 0L, 1L);
}
@Override
protected AggregateFunction<Long, HllBuffer> getAggregator() {
return new DecimalApproxCountDistinctAggFunction(new DecimalType(precision, scale));
}
}
/** Test for {@link DecimalApproxCountDistinctAggFunction} for 20 precision and 6 scale. */
@Nested
final | DecimalApproxCountDistinctAggFunctionTestBase |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/cache/SoftCacheTest.java | {
"start": 1065,
"end": 2539
} | class ____ {
@Test
void shouldDemonstrateObjectsBeingCollectedAsNeeded() {
final int N = 3000000;
SoftCache cache = new SoftCache(new PerpetualCache("default"));
for (int i = 0; i < N; i++) {
byte[] array = new byte[5001]; // waste a bunch of memory
array[5000] = 1;
cache.putObject(i, array);
cache.getObject(i);
if (cache.getSize() < i + 1) {
// System.out.println("Cache exceeded with " + (i + 1) + " entries.");
break;
}
}
assertTrue(cache.getSize() < N);
}
@Test
void shouldDemonstrateCopiesAreEqual() {
Cache cache = new SoftCache(new PerpetualCache("default"));
cache = new SerializedCache(cache);
for (int i = 0; i < 1000; i++) {
cache.putObject(i, i);
Object value = cache.getObject(i);
assertTrue(value == null || value.equals(i));
}
}
@Test
void shouldRemoveItemOnDemand() {
Cache cache = new SoftCache(new PerpetualCache("default"));
cache.putObject(0, 0);
assertNotNull(cache.getObject(0));
cache.removeObject(0);
assertNull(cache.getObject(0));
}
@Test
void shouldFlushAllItemsOnDemand() {
Cache cache = new SoftCache(new PerpetualCache("default"));
for (int i = 0; i < 5; i++) {
cache.putObject(i, i);
}
assertNotNull(cache.getObject(0));
assertNotNull(cache.getObject(4));
cache.clear();
assertNull(cache.getObject(0));
assertNull(cache.getObject(4));
}
}
| SoftCacheTest |
java | spring-projects__spring-boot | module/spring-boot-integration/src/main/java/org/springframework/boot/integration/autoconfigure/IntegrationAutoConfiguration.java | {
"start": 13591,
"end": 13770
} | class ____ {
/**
* Check if either an {@link IntegrationRSocketEndpoint} or
* {@link RSocketOutboundGateway} bean is available.
*/
static | IntegrationRSocketConfiguration |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/issues/SplitterCorrelationIdIssueTest.java | {
"start": 1216,
"end": 3037
} | class ____ extends ContextTestSupport {
@Test
public void testSplitCorrelationId() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:split");
mock.expectedMessageCount(3);
Exchange exchange = template.send("direct:start", new Processor() {
public void process(Exchange exchange) {
exchange.getIn().setBody("A,B,C");
}
});
assertMockEndpointsSatisfied();
// match that all exchange id is unique
String parent = exchange.getExchangeId();
String split1 = mock.getReceivedExchanges().get(0).getExchangeId();
String split2 = mock.getReceivedExchanges().get(1).getExchangeId();
String split3 = mock.getReceivedExchanges().get(2).getExchangeId();
assertNotSame(parent, split1);
assertNotSame(parent, split2);
assertNotSame(parent, split3);
assertNotSame(split1, split2);
assertNotSame(split2, split3);
assertNotSame(split3, split1);
// match correlation id from split -> parent
String corr1 = mock.getReceivedExchanges().get(0).getProperty(Exchange.CORRELATION_ID, String.class);
String corr2 = mock.getReceivedExchanges().get(1).getProperty(Exchange.CORRELATION_ID, String.class);
String corr3 = mock.getReceivedExchanges().get(2).getProperty(Exchange.CORRELATION_ID, String.class);
assertEquals(parent, corr1);
assertEquals(parent, corr2);
assertEquals(parent, corr3);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").split(body().tokenize(",")).to("mock:split");
}
};
}
}
| SplitterCorrelationIdIssueTest |
java | square__retrofit | retrofit-adapters/rxjava2/src/test/java/retrofit2/adapter/rxjava2/ObservableThrowingTest.java | {
"start": 1405,
"end": 1667
} | class ____ {
@Rule public final MockWebServer server = new MockWebServer();
@Rule public final TestRule resetRule = new RxJavaPluginsResetRule();
@Rule public final RecordingObserver.Rule subscriberRule = new RecordingObserver.Rule();
| ObservableThrowingTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/matchers/MatchersTest.java | {
"start": 18562,
"end": 18985
} | class ____ extends BugChecker
implements MethodInvocationTreeMatcher {
@Override
public Description matchMethodInvocation(MethodInvocationTree tree, VisitorState state) {
if (Matchers.symbolHasAnnotation("java.lang.Deprecated").matches(tree, state)) {
return describeMatch(tree);
}
return Description.NO_MATCH;
}
}
/** Checker that checks if a | NoAnnotatedDeclarationCallsChecker |
java | apache__dubbo | dubbo-configcenter/dubbo-configcenter-nacos/src/main/java/org/apache/dubbo/configcenter/support/nacos/NacosDynamicConfiguration.java | {
"start": 3090,
"end": 13509
} | class ____ implements DynamicConfiguration {
private static final String GET_CONFIG_KEYS_PATH = "/v1/cs/configs";
private final ErrorTypeAwareLogger logger = LoggerFactory.getErrorTypeAwareLogger(getClass());
/**
* the default timeout in millis to get config from nacos
*/
private static final long DEFAULT_TIMEOUT = 5000L;
private final Properties nacosProperties;
private static final String NACOS_RETRY_KEY = "nacos.retry";
private static final String NACOS_RETRY_WAIT_KEY = "nacos.retry-wait";
private static final String NACOS_CHECK_KEY = "nacos.check";
/**
* The nacos configService
*/
private final NacosConfigServiceWrapper configService;
private ApplicationModel applicationModel;
/**
* The map store the key to {@link NacosConfigListener} mapping
*/
private final ConcurrentMap<String, NacosConfigListener> watchListenerMap;
private final MD5Utils md5Utils = new MD5Utils();
NacosDynamicConfiguration(URL url, ApplicationModel applicationModel) {
this.nacosProperties = buildNacosProperties(url);
this.configService = buildConfigService(url);
this.watchListenerMap = new ConcurrentHashMap<>();
this.applicationModel = applicationModel;
}
private NacosConfigServiceWrapper buildConfigService(URL url) {
int retryTimes = url.getPositiveParameter(NACOS_RETRY_KEY, 10);
int sleepMsBetweenRetries = url.getPositiveParameter(NACOS_RETRY_WAIT_KEY, 1000);
boolean check = url.getParameter(NACOS_CHECK_KEY, true);
ConfigService tmpConfigServices = null;
try {
for (int i = 0; i < retryTimes + 1; i++) {
tmpConfigServices = NacosFactory.createConfigService(nacosProperties);
String serverStatus = tmpConfigServices.getServerStatus();
boolean configServiceAvailable = testConfigService(tmpConfigServices);
if (!check || (UP.equals(serverStatus) && configServiceAvailable)) {
break;
} else {
logger.warn(
LoggerCodeConstants.CONFIG_ERROR_NACOS,
"",
"",
"Failed to connect to nacos config server. " + "Server status: "
+ serverStatus + ". " + "Config Service Available: "
+ configServiceAvailable + ". "
+ (i < retryTimes
? "Dubbo will try to retry in " + sleepMsBetweenRetries + ". "
: "Exceed retry max times.")
+ "Try times: "
+ (i + 1));
}
tmpConfigServices.shutDown();
tmpConfigServices = null;
Thread.sleep(sleepMsBetweenRetries);
}
} catch (NacosException e) {
logger.error(CONFIG_ERROR_NACOS, "", "", e.getErrMsg(), e);
throw new IllegalStateException(e);
} catch (InterruptedException e) {
logger.error(INTERNAL_INTERRUPTED, "", "", "Interrupted when creating nacos config service client.", e);
Thread.currentThread().interrupt();
throw new IllegalStateException(e);
}
if (tmpConfigServices == null) {
logger.error(
CONFIG_ERROR_NACOS,
"",
"",
"Failed to create nacos config service client. Reason: server status check failed.");
throw new IllegalStateException(
"Failed to create nacos config service client. Reason: server status check failed.");
}
return new NacosConfigServiceWrapper(tmpConfigServices);
}
private boolean testConfigService(ConfigService configService) {
try {
configService.getConfig("Dubbo-Nacos-Test", "Dubbo-Nacos-Test", DEFAULT_TIMEOUT);
return true;
} catch (NacosException e) {
return false;
}
}
private Properties buildNacosProperties(URL url) {
Properties properties = new Properties();
setServerAddr(url, properties);
setProperties(url, properties);
return properties;
}
private void setServerAddr(URL url, Properties properties) {
StringBuilder serverAddrBuilder = new StringBuilder(url.getHost()) // Host
.append(':')
.append(url.getPort()); // Port
// Append backup parameter as other servers
String backup = url.getParameter(BACKUP_KEY);
if (backup != null) {
serverAddrBuilder.append(',').append(backup);
}
String serverAddr = serverAddrBuilder.toString();
properties.put(SERVER_ADDR, serverAddr);
}
private static void setProperties(URL url, Properties properties) {
// Get the parameters from constants
Map<String, String> parameters = url.getParameters(of(PropertyKeyConst.class));
// Put all parameters
properties.putAll(parameters);
if (StringUtils.isNotEmpty(url.getUsername())) {
properties.put(USERNAME, url.getUsername());
}
if (StringUtils.isNotEmpty(url.getPassword())) {
properties.put(PASSWORD, url.getPassword());
}
}
private static void putPropertyIfAbsent(URL url, Properties properties, String propertyName) {
String propertyValue = url.getParameter(propertyName);
if (StringUtils.isNotEmpty(propertyValue)) {
properties.setProperty(propertyName, propertyValue);
}
}
private static void putPropertyIfAbsent(URL url, Properties properties, String propertyName, String defaultValue) {
String propertyValue = url.getParameter(propertyName);
if (StringUtils.isNotEmpty(propertyValue)) {
properties.setProperty(propertyName, propertyValue);
} else {
properties.setProperty(propertyName, defaultValue);
}
}
/**
* Ignores the group parameter.
*
* @param key property key the native listener will listen on
* @param group to distinguish different set of properties
* @return
*/
private NacosConfigListener createTargetListener(String key, String group) {
NacosConfigListener configListener = new NacosConfigListener();
configListener.fillContext(key, group);
return configListener;
}
@Override
public void close() throws Exception {
configService.shutdown();
}
@Override
public void addListener(String key, String group, ConfigurationListener listener) {
String listenerKey = buildListenerKey(key, group);
NacosConfigListener nacosConfigListener = ConcurrentHashMapUtils.computeIfAbsent(
watchListenerMap, listenerKey, k -> createTargetListener(key, group));
nacosConfigListener.addListener(listener);
try {
configService.addListener(key, group, nacosConfigListener);
} catch (NacosException e) {
logger.error(CONFIG_ERROR_NACOS, "", "", e.getMessage(), e);
}
}
@Override
public void removeListener(String key, String group, ConfigurationListener listener) {
String listenerKey = buildListenerKey(key, group);
NacosConfigListener eventListener = watchListenerMap.get(listenerKey);
if (eventListener != null) {
eventListener.removeListener(listener);
}
}
@Override
public String getConfig(String key, String group, long timeout) throws IllegalStateException {
try {
long nacosTimeout = timeout < 0 ? getDefaultTimeout() : timeout;
if (StringUtils.isEmpty(group)) {
group = DEFAULT_GROUP;
}
return configService.getConfig(key, group, nacosTimeout);
} catch (NacosException e) {
logger.error(CONFIG_ERROR_NACOS, "", "", e.getMessage(), e);
}
return null;
}
@Override
public ConfigItem getConfigItem(String key, String group) {
String content = getConfig(key, group);
String casMd5 = "";
if (StringUtils.isNotEmpty(content)) {
casMd5 = md5Utils.getMd5(content);
}
return new ConfigItem(content, casMd5);
}
@Override
public Object getInternalProperty(String key) {
try {
return configService.getConfig(key, DEFAULT_GROUP, getDefaultTimeout());
} catch (NacosException e) {
logger.error(CONFIG_ERROR_NACOS, "", "", e.getMessage(), e);
}
return null;
}
@Override
public boolean publishConfig(String key, String group, String content) {
boolean published = false;
try {
published = configService.publishConfig(key, group, content);
} catch (NacosException e) {
logger.error(CONFIG_ERROR_NACOS, "", "", e.getMessage(), e);
}
return published;
}
@Override
public boolean publishConfigCas(String key, String group, String content, Object ticket) {
try {
if (!(ticket instanceof String)) {
throw new IllegalArgumentException("nacos publishConfigCas requires string type ticket");
}
return configService.publishConfigCas(key, group, content, (String) ticket);
} catch (NacosException e) {
logger.warn(CONFIG_ERROR_NACOS, "nacos publishConfigCas failed.", "", e.getMessage(), e);
return false;
}
}
@Override
public long getDefaultTimeout() {
return DEFAULT_TIMEOUT;
}
@Override
public boolean removeConfig(String key, String group) {
boolean removed = false;
try {
removed = configService.removeConfig(key, group);
} catch (NacosException e) {
if (logger.isErrorEnabled()) {
logger.error(CONFIG_ERROR_NACOS, "", "", e.getMessage(), e);
}
}
return removed;
}
private String getProperty(String name, String defaultValue) {
return nacosProperties.getProperty(name, defaultValue);
}
public | NacosDynamicConfiguration |
java | apache__kafka | trogdor/src/main/java/org/apache/kafka/trogdor/rest/TaskDone.java | {
"start": 1100,
"end": 2539
} | class ____ extends TaskState {
/**
* The time on the coordinator when the task was started.
*/
private final long startedMs;
/**
* The time on the coordinator when the task was completed.
*/
private final long doneMs;
/**
* Empty if the task completed without error; the error message otherwise.
*/
private final String error;
/**
* True if the task was manually cancelled, rather than terminating itself.
*/
private final boolean cancelled;
@JsonCreator
public TaskDone(@JsonProperty("spec") TaskSpec spec,
@JsonProperty("startedMs") long startedMs,
@JsonProperty("doneMs") long doneMs,
@JsonProperty("error") String error,
@JsonProperty("cancelled") boolean cancelled,
@JsonProperty("status") JsonNode status) {
super(spec, status);
this.startedMs = startedMs;
this.doneMs = doneMs;
this.error = error;
this.cancelled = cancelled;
}
@JsonProperty
public long startedMs() {
return startedMs;
}
@JsonProperty
public long doneMs() {
return doneMs;
}
@JsonProperty
public String error() {
return error;
}
@JsonProperty
public boolean cancelled() {
return cancelled;
}
@Override
public TaskStateType stateType() {
return TaskStateType.DONE;
}
}
| TaskDone |
java | micronaut-projects__micronaut-core | test-suite/src/test/java/io/micronaut/docs/qualifiers/replaces/defaultimpl/CustomResponseStrategy.java | {
"start": 812,
"end": 889
} | class ____ implements ResponseStrategy {
}
//end::clazz[]
| CustomResponseStrategy |
java | elastic__elasticsearch | modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclaration.java | {
"start": 700,
"end": 1787
} | class ____ extends AStatement {
private final String canonicalTypeName;
private final String symbol;
private final AExpression valueNode;
public SDeclaration(int identifier, Location location, String canonicalTypeName, String symbol, AExpression valueNode) {
super(identifier, location);
this.canonicalTypeName = Objects.requireNonNull(canonicalTypeName);
this.symbol = Objects.requireNonNull(symbol);
this.valueNode = valueNode;
}
public String getCanonicalTypeName() {
return canonicalTypeName;
}
public String getSymbol() {
return symbol;
}
public AExpression getValueNode() {
return valueNode;
}
@Override
public <Scope> void visit(UserTreeVisitor<Scope> userTreeVisitor, Scope scope) {
userTreeVisitor.visitDeclaration(this, scope);
}
@Override
public <Scope> void visitChildren(UserTreeVisitor<Scope> userTreeVisitor, Scope scope) {
if (valueNode != null) {
valueNode.visit(userTreeVisitor, scope);
}
}
}
| SDeclaration |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/resilience/ReactiveRetryInterceptorTests.java | {
"start": 16553,
"end": 16819
} | class ____ {
AtomicInteger counter = new AtomicInteger();
public Flux<Object> retryOperation() {
return Flux.from(Mono.fromCallable(() -> {
counter.incrementAndGet();
throw new IOException(counter.toString());
}));
}
}
static | FluxMultiValueBean |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/single/SingleZipTest.java | {
"start": 893,
"end": 6691
} | class ____ extends RxJavaTest {
@Test
public void zip2() {
Single.zip(Single.just(1), Single.just(2), new BiFunction<Integer, Integer, Object>() {
@Override
public Object apply(Integer a, Integer b) throws Exception {
return a + "" + b;
}
})
.test()
.assertResult("12");
}
@Test
public void zip3() {
Single.zip(Single.just(1), Single.just(2), Single.just(3), new Function3<Integer, Integer, Integer, Object>() {
@Override
public Object apply(Integer a, Integer b, Integer c) throws Exception {
return a + "" + b + c;
}
})
.test()
.assertResult("123");
}
@Test
public void zip4() {
Single.zip(Single.just(1), Single.just(2), Single.just(3),
Single.just(4),
new Function4<Integer, Integer, Integer, Integer, Object>() {
@Override
public Object apply(Integer a, Integer b, Integer c, Integer d) throws Exception {
return a + "" + b + c + d;
}
})
.test()
.assertResult("1234");
}
@Test
public void zip5() {
Single.zip(Single.just(1), Single.just(2), Single.just(3),
Single.just(4), Single.just(5),
new Function5<Integer, Integer, Integer, Integer, Integer, Object>() {
@Override
public Object apply(Integer a, Integer b, Integer c, Integer d, Integer e) throws Exception {
return a + "" + b + c + d + e;
}
})
.test()
.assertResult("12345");
}
@Test
public void zip6() {
Single.zip(Single.just(1), Single.just(2), Single.just(3),
Single.just(4), Single.just(5), Single.just(6),
new Function6<Integer, Integer, Integer, Integer, Integer, Integer, Object>() {
@Override
public Object apply(Integer a, Integer b, Integer c, Integer d, Integer e, Integer f)
throws Exception {
return a + "" + b + c + d + e + f;
}
})
.test()
.assertResult("123456");
}
@Test
public void zip7() {
Single.zip(Single.just(1), Single.just(2), Single.just(3),
Single.just(4), Single.just(5), Single.just(6),
Single.just(7),
new Function7<Integer, Integer, Integer, Integer, Integer, Integer, Integer, Object>() {
@Override
public Object apply(Integer a, Integer b, Integer c, Integer d, Integer e, Integer f, Integer g)
throws Exception {
return a + "" + b + c + d + e + f + g;
}
})
.test()
.assertResult("1234567");
}
@Test
public void zip8() {
Single.zip(Single.just(1), Single.just(2), Single.just(3),
Single.just(4), Single.just(5), Single.just(6),
Single.just(7), Single.just(8),
new Function8<Integer, Integer, Integer, Integer, Integer, Integer, Integer, Integer, Object>() {
@Override
public Object apply(Integer a, Integer b, Integer c, Integer d, Integer e, Integer f, Integer g,
Integer h) throws Exception {
return a + "" + b + c + d + e + f + g + h;
}
})
.test()
.assertResult("12345678");
}
@Test
public void zip9() {
Single.zip(Single.just(1), Single.just(2), Single.just(3),
Single.just(4), Single.just(5), Single.just(6),
Single.just(7), Single.just(8), Single.just(9),
new Function9<Integer, Integer, Integer, Integer, Integer, Integer, Integer, Integer, Integer, Object>() {
@Override
public Object apply(Integer a, Integer b, Integer c, Integer d, Integer e, Integer f, Integer g,
Integer h, Integer i) throws Exception {
return a + "" + b + c + d + e + f + g + h + i;
}
})
.test()
.assertResult("123456789");
}
@Test
public void noDisposeOnAllSuccess() {
final AtomicInteger counter = new AtomicInteger();
Single<Integer> source = Single.just(1).doOnDispose(new Action() {
@Override
public void run() throws Exception {
counter.getAndIncrement();
}
});
Single.zip(source, source, new BiFunction<Integer, Integer, Object>() {
@Override
public Integer apply(Integer a, Integer b) throws Exception {
return a + b;
}
})
.test()
.assertResult(2);
assertEquals(0, counter.get());
}
@Test
public void noDisposeOnAllSuccess2() {
final AtomicInteger counter = new AtomicInteger();
Single<Integer> source = Single.just(1).doOnDispose(new Action() {
@Override
public void run() throws Exception {
counter.getAndIncrement();
}
});
Single.zip(Arrays.asList(source, source), new Function<Object[], Object>() {
@Override
public Integer apply(Object[] o) throws Exception {
return (Integer)o[0] + (Integer)o[1];
}
})
.test()
.assertResult(2);
assertEquals(0, counter.get());
}
}
| SingleZipTest |
java | google__dagger | dagger-compiler/main/java/dagger/internal/codegen/base/DaggerSuperficialValidation.java | {
"start": 5156,
"end": 23468
} | class ____ or throws {@link ValidationException} if it
* is not accessible in the current compilation.
*/
public static XTypeElement requireTypeElement(XProcessingEnv processingEnv, String className) {
XTypeElement type = processingEnv.findTypeElement(className);
if (type == null) {
throw new ValidationException.KnownErrorType(className);
}
return type;
}
private final boolean isStrictValidationEnabled;
private final XProcessingEnv processingEnv;
private final KeywordValidator keywordValidator;
@Inject
DaggerSuperficialValidation(
XProcessingEnv processingEnv,
CompilerOptions compilerOptions,
KeywordValidator keywordValidator) {
this.processingEnv = processingEnv;
this.isStrictValidationEnabled = compilerOptions.strictSuperficialValidation();
this.keywordValidator = keywordValidator;
}
/**
* Validates the {@link XElement#getType()} type of the given element.
*
* <p>Validating the type also validates any types it references, such as any type arguments or
* type bounds. For an {@link XExecutableType}, the parameter and return types must be fully
* defined, as must types declared in a {@code throws} clause or in the bounds of any type
* parameters.
*/
public void validateTypeOf(XElement element) {
try {
// In XProcessing, there is no generic way to get an element "asType" so we break this down
// differently for different element kinds.
String kindName = Ascii.toLowerCase(getKindName(element));
if (isTypeElement(element)) {
validateType(kindName, asTypeElement(element).getType());
} else if (isVariableElement(element)) {
validateType(kindName + " type", asVariable(element).getType());
} else if (isExecutable(element)) {
validateExecutableType(asExecutable(element).getExecutableType());
} else if (isEnumEntry(element)) {
validateType(kindName, asEnumEntry(element).getEnumTypeElement().getType());
}
} catch (RuntimeException exception) {
throw ValidationException.from(exception).append(element);
}
ValidationReport report = keywordValidator.validateJavaKeyword(element);
if (!report.isClean()) {
throw new ValidationException.JavaKeywordErrorType(report);
}
}
/**
* Validates the {@link XElement#getSuperType()} type of the given element.
*
* <p>Validating the type also validates any types it references, such as any type arguments or
* type bounds.
*/
public void validateSuperTypeOf(XTypeElement element) {
try {
validateType("superclass", element.getSuperType());
} catch (RuntimeException exception) {
throw ValidationException.from(exception).append(element);
}
}
/**
* Validates the {@link XExecutableElement#getThrownTypes()} types of the given element.
*
* <p>Validating the type also validates any types it references, such as any type arguments or
* type bounds.
*/
public void validateThrownTypesOf(XExecutableElement element) {
try {
validateTypes("thrown type", element.getThrownTypes());
} catch (RuntimeException exception) {
throw ValidationException.from(exception).append(element);
}
}
/**
* Validates the annotation types of the given element.
*
* <p>Note: this method does not validate annotation values. This method is useful if you care
* about the annotation's annotations (e.g. to check for {@code Scope} or {@code Qualifier}). In
* such cases, we just need to validate the annotation's type.
*/
public void validateAnnotationTypesOf(XElement element) {
element
.getAllAnnotations()
.forEach(annotation -> validateAnnotationTypeOf(element, annotation));
}
/**
* Validates the type of the given annotation.
*
* <p>The annotation is assumed to be annotating the given element, but this is not checked. The
* element is only in the error message if a {@link ValidatationException} is thrown.
*
* <p>Note: this method does not validate annotation values. This method is useful if you care
* about the annotation's annotations (e.g. to check for {@code Scope} or {@code Qualifier}). In
* such cases, we just need to validate the annotation's type.
*/
// TODO(bcorso): See CL/427767370 for suggestions to make this API clearer.
public void validateAnnotationTypeOf(XElement element, XAnnotation annotation) {
try {
validateType("annotation type", annotation.getType());
} catch (RuntimeException exception) {
throw ValidationException.from(exception).append(annotation).append(element);
}
}
/** Validate the annotations of the given element. */
public void validateAnnotationsOf(XElement element) {
try {
validateAnnotations(element.getAllAnnotations());
} catch (RuntimeException exception) {
throw ValidationException.from(exception).append(element);
}
}
public void validateAnnotationOf(XElement element, XAnnotation annotation) {
try {
validateAnnotation(annotation);
} catch (RuntimeException exception) {
throw ValidationException.from(exception).append(element);
}
}
/**
* Validate the type hierarchy for the given type (with the given type description) within the
* given element.
*
* <p>Validation includes all superclasses, interfaces, and type parameters of those types.
*/
public void validateTypeHierarchyOf(String typeDescription, XElement element, XType type) {
try {
validateTypeHierarchy(typeDescription, type, new HashSet<>());
} catch (RuntimeException exception) {
throw ValidationException.from(exception).append(element);
}
}
private void validateTypeHierarchy(String desc, XType type, Set<XTypeName> visited) {
if (!visited.add(type.asTypeName())) {
return;
}
validateType(desc, type);
try {
if (isArray(type)) {
validateTypeHierarchy("array component type", asArray(type).getComponentType(), visited);
} else if (isDeclared(type)) {
type.getTypeArguments()
.forEach(typeArg -> validateTypeHierarchy("type argument", typeArg, visited));
type.getSuperTypes()
.forEach(supertype -> validateTypeHierarchy("supertype", supertype, visited));
} else if (isWildcard(type) && type.extendsBound() != null) {
validateTypeHierarchy("extends bound type", type.extendsBound(), visited);
} else if (isTypeVariable(type)) {
asTypeVariable(type)
.getUpperBounds()
.forEach(bound -> validateTypeHierarchy("type variable bound type", bound, visited));
}
} catch (RuntimeException exception) {
throw ValidationException.from(exception).append(desc, type);
}
}
/**
* Returns true if all of the given elements return true from {@link #validateElement(XElement)}.
*/
private void validateElements(Collection<? extends XElement> elements) {
elements.forEach(this::validateElement);
}
/**
* Returns true if all types referenced by the given element are defined. The exact meaning of
* this depends on the kind of element. For packages, it means that all annotations on the package
* are fully defined. For other element kinds, it means that types referenced by the element,
* anything it contains, and any of its annotations element are all defined.
*/
public void validateElement(XElement element) {
checkNotNull(element);
// Validate the annotations first since these are common to all element kinds. We don't
// need to wrap these in try-catch because the *Of() methods are already wrapped.
validateAnnotationsOf(element);
// Validate enclosed elements based on the given element's kind.
try {
if (isTypeElement(element)) {
XTypeElement typeElement = asTypeElement(element);
validateElements(typeElement.getTypeParameters());
validateTypes("interface", typeElement.getSuperInterfaces());
if (typeElement.getSuperType() != null) {
validateType("superclass", typeElement.getSuperType());
}
// TODO (b/286313067) move the logic to ComponentValidator once the validation logic is
// split into individual validators to satisfy different needs.
// Dagger doesn't use components' static method, therefore, they shouldn't be validated to
// be able to stop component generation.
if (typeElement.hasAnnotation(XTypeNames.COMPONENT)) {
validateElements(
typeElement.getEnclosedElements().stream()
.filter(member -> !XElements.isStatic(member))
.collect(toImmutableList()));
} else {
validateElements(typeElement.getEnclosedElements());
}
} else if (isExecutable(element)) {
if (isMethod(element)) {
validateType("return type", asMethod(element).getReturnType());
}
XExecutableElement executableElement = asExecutable(element);
validateTypes("thrown type", executableElement.getThrownTypes());
validateElements(executableElement.getTypeParameters());
validateElements(executableElement.getParameters());
} else if (isTypeParameter(element)) {
validateTypes("bound type", asTypeParameter(element).getBounds());
}
} catch (RuntimeException exception) {
throw ValidationException.from(exception).append(element);
}
// Validate the type last. This allows errors on more specific elements to be caught above.
// E.g. errors on parameters will be attributed to the parameter elements rather than the method
// type, which generally leads to nicer error messages. We don't need to wrap these in try-catch
// because the *Of() methods are already wrapped.
validateTypeOf(element);
}
private void validateTypes(String desc, Collection<? extends XType> types) {
types.forEach(type -> validateType(desc, type));
}
/**
* Returns true if the given type is fully defined. This means that the type itself is defined, as
* are any types it references, such as any type arguments or type bounds.
*/
private void validateType(String desc, XType type) {
checkNotNull(type);
try {
if (isArray(type)) {
validateType("array component type", asArray(type).getComponentType());
} else if (isDeclared(type)) {
if (isStrictValidationEnabled) {
// There's a bug in TypeVisitor which will visit the visitDeclared() method rather than
// visitError() even when it's an ERROR kind. Thus, we check the kind directly here and
// fail validation if it's an ERROR kind (see b/213880825).
if (isErrorKind(type)) {
throw new ValidationException.KnownErrorType(type);
}
}
type.getTypeArguments().forEach(typeArg -> validateType("type argument", typeArg));
} else if (isWildcard(type)) {
if (type.extendsBound() != null) {
validateType("extends bound type", type.extendsBound());
}
} else if (isErrorKind(type)) {
throw new ValidationException.KnownErrorType(type);
}
} catch (RuntimeException e) {
throw ValidationException.from(e).append(desc, type);
}
}
// TODO(bcorso): Consider moving this over to XProcessing. There's some complication due to
// b/248552462 and the fact that XProcessing also uses the error.NonExistentClass type for invalid
// types in KSP, which we may want to keep as error kinds in KSP.
private boolean isErrorKind(XType type) {
// https://youtrack.jetbrains.com/issue/KT-34193/Kapt-CorrectErrorTypes-doesnt-work-for-generics
// XProcessing treats 'error.NonExistentClass' as an error type. However, due to the bug in KAPT
// (linked above), 'error.NonExistentClass' can still be referenced in the stub classes even
// when 'correctErrorTypes=true' is enabled. Thus, we can't treat 'error.NonExistentClass' as an
// actual error type, as that would completely prevent processing of stubs that exhibit this
// bug. This behavior also matches how things work in Javac, as 'error.NonExistentClass' is
// treated as a TypeKind.DECLARED rather than a TypeKind.ERROR since the type is a real class
// that exists on the classpath.
return type.isError()
&& !(processingEnv.getBackend() == Backend.JAVAC
&& type.getTypeName().toString().contentEquals("error.NonExistentClass"));
}
/**
* Returns true if the given type is fully defined. This means that the parameter and return types
* must be fully defined, as must types declared in a {@code throws} clause or in the bounds of
* any type parameters.
*/
private void validateExecutableType(XExecutableType type) {
try {
validateTypes("parameter type", type.getParameterTypes());
validateTypes("thrown type", type.getThrownTypes());
validateTypes("type variable", getTypeVariables(type));
if (isMethodType(type)) {
validateType("return type", asMethodType(type).getReturnType());
}
} catch (RuntimeException e) {
throw ValidationException.from(e).append(type);
}
}
private ImmutableList<XType> getTypeVariables(XExecutableType executableType) {
switch (processingEnv.getBackend()) {
case JAVAC:
return toJavac(executableType).getTypeVariables().stream()
.map(typeVariable -> toXProcessing(typeVariable, processingEnv))
.collect(toImmutableList());
case KSP:
// TODO(b/247851395): Add a way to get type variables as XTypes from XExecutableType --
// currently, we can only get TypeVariableNames from XMethodType. For now, just skip
// validating type variables of methods in KSP.
return ImmutableList.of();
}
throw new AssertionError("Unexpected backend: " + processingEnv.getBackend());
}
private void validateAnnotations(Collection<XAnnotation> annotations) {
annotations.forEach(this::validateAnnotation);
}
private void validateAnnotation(XAnnotation annotation) {
try {
validateType("annotation type", annotation.getType());
try {
// Note: We separate this into its own try-catch since there's a bug where we could get an
// error when getting the annotation values due to b/264089557. This way we will at least
// report the name of the annotation in the error message.
validateAnnotationValues(getDefaultValues(annotation));
validateAnnotationValues(annotation.getAnnotationValues());
} catch (RuntimeException exception) {
throw ValidationException.from(exception).append(annotation);
}
} catch (RuntimeException exception) {
throw ValidationException.from(exception)
.append(
"annotation type: "
+ (annotation.getType().isError()
? annotation.getName() // SUPPRESS_GET_NAME_CHECK
: annotation.getClassName().canonicalName()));
}
}
private ImmutableList<XAnnotationValue> getDefaultValues(XAnnotation annotation) {
switch (processingEnv.getBackend()) {
case JAVAC:
return annotation.getTypeElement().getDeclaredMethods().stream()
.map(XConverters::toJavac)
.filter(method -> method.getDefaultValue() != null)
.map(method -> toXProcessing(method.getDefaultValue(), method, processingEnv))
.collect(toImmutableList());
case KSP:
// TODO(b/231170716): Add a generic way to retrieve default values from XAnnotation
// For now, just ignore them in KSP when doing validation.
return ImmutableList.of();
}
throw new AssertionError("Unexpected backend: " + processingEnv.getBackend());
}
private void validateAnnotationValues(Collection<XAnnotationValue> values) {
values.forEach(this::validateAnnotationValue);
}
private void validateAnnotationValue(XAnnotationValue value) {
try {
XType expectedType = value.getValueType();
// TODO(b/249834057): In KSP error types in annotation values are just null, so check this
// first and throw KnownErrorType of "<error>" to match Javac for now.
if (processingEnv.getBackend() == Backend.KSP && value.getValue() == null) {
throw new ValidationException.KnownErrorType("<error>");
}
if (value.hasListValue()) {
validateAnnotationValues(value.asAnnotationValueList());
} else if (value.hasAnnotationValue()) {
validateIsEquivalentType(value.asAnnotation().getType(), expectedType);
validateAnnotation(value.asAnnotation());
} else if (value.hasEnumValue()) {
validateIsEquivalentType(value.asEnum().getEnumTypeElement().getType(), expectedType);
validateElement(value.asEnum());
} else if (value.hasTypeValue()) {
validateType("annotation value type", value.asType());
} else {
// Validates all other types, e.g. primitives and String values.
validateIsTypeOf(expectedType, value.getValue().getClass());
}
} catch (RuntimeException e) {
throw ValidationException.from(e).append(value);
}
}
private void validateIsTypeOf(XType expectedType, Class<?> clazz) {
// TODO(b/248633751): We get the XClassName via an XTypeElement rather than XClassName.get()
// because the latter does not handle interop types correctly.
XClassName actualClassName =
processingEnv.requireTypeElement(clazz.getCanonicalName()).asClassName();
if (!isTypeOf(expectedType.boxed(), actualClassName)) {
throw new ValidationException.UnknownErrorType()
.append(
String.format(
"Expected type %s, but got %s",
expectedType.boxed().asTypeName(),
actualClassName));
}
}
private void validateIsEquivalentType(XType type, XType expectedType) {
if (!XTypes.equivalence().equivalent(type, expectedType)) {
throw new ValidationException.KnownErrorType(type);
}
}
/**
* A runtime exception that can be used during superficial validation to collect information about
* unexpected exceptions during validation.
*/
public abstract static | name |
java | bumptech__glide | library/test/src/test/java/com/bumptech/glide/manager/Issue117Activity.java | {
"start": 1224,
"end": 1531
} | class ____ extends FragmentPagerAdapter {
Issue117Adapter(FragmentManager fm) {
super(fm);
}
@Override
public Fragment getItem(int position) {
return new Issue117Fragment();
}
@Override
public int getCount() {
return 1;
}
}
public static | Issue117Adapter |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java | {
"start": 16068,
"end": 17974
} | class ____ implements SectionProcessor {
static final String NAME = "NameSection";
@Override
public void process() throws IOException {
Node node = new Node();
loadNodeChildren(node, "NameSection fields");
NameSystemSection.Builder b = NameSystemSection.newBuilder();
Integer namespaceId = node.removeChildInt(NAME_SECTION_NAMESPACE_ID);
if (namespaceId == null) {
throw new IOException("<NameSection> is missing <namespaceId>");
}
b.setNamespaceId(namespaceId);
Long lval = node.removeChildLong(NAME_SECTION_GENSTAMPV1);
if (lval != null) {
b.setGenstampV1(lval);
}
lval = node.removeChildLong(NAME_SECTION_GENSTAMPV2);
if (lval != null) {
b.setGenstampV2(lval);
}
lval = node.removeChildLong(NAME_SECTION_GENSTAMPV1_LIMIT);
if (lval != null) {
b.setGenstampV1Limit(lval);
}
lval = node.removeChildLong(NAME_SECTION_LAST_ALLOCATED_BLOCK_ID);
if (lval != null) {
b.setLastAllocatedBlockId(lval);
}
lval = node.removeChildLong(NAME_SECTION_TXID);
if (lval != null) {
b.setTransactionId(lval);
}
lval = node.removeChildLong(
NAME_SECTION_ROLLING_UPGRADE_START_TIME);
if (lval != null) {
b.setRollingUpgradeStartTime(lval);
}
lval = node.removeChildLong(
NAME_SECTION_LAST_ALLOCATED_STRIPED_BLOCK_ID);
if (lval != null) {
b.setLastAllocatedStripedBlockId(lval);
}
node.verifyNoRemainingKeys("NameSection");
NameSystemSection s = b.build();
if (LOG.isDebugEnabled()) {
LOG.debug(SectionName.NS_INFO.name() + " writing header: {" +
TextFormat.printToString(s) + "}");
}
s.writeDelimitedTo(out);
recordSectionLength(SectionName.NS_INFO.name());
}
}
private | NameSectionProcessor |
java | apache__dubbo | dubbo-cluster/src/main/java/org/apache/dubbo/rpc/cluster/router/mesh/rule/BaseRule.java | {
"start": 888,
"end": 1706
} | class ____ {
private String apiVersion;
private String kind;
private Map<String, String> metadata;
public String getApiVersion() {
return apiVersion;
}
public void setApiVersion(String apiVersion) {
this.apiVersion = apiVersion;
}
public String getKind() {
return kind;
}
public void setKind(String kind) {
this.kind = kind;
}
public Map<String, String> getMetadata() {
return metadata;
}
public void setMetadata(Map<String, String> metadata) {
this.metadata = metadata;
}
@Override
public String toString() {
return "BaseRule{" + "apiVersion='"
+ apiVersion + '\'' + ", kind='"
+ kind + '\'' + ", metadata="
+ metadata + '}';
}
}
| BaseRule |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java | {
"start": 4287,
"end": 13718
} | class ____ implements
DatanodeProtocolPB {
private final DatanodeProtocol impl;
private final int maxDataLength;
private static final ErrorReportResponseProto
VOID_ERROR_REPORT_RESPONSE_PROTO =
ErrorReportResponseProto.newBuilder().build();
private static final BlockReceivedAndDeletedResponseProto
VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE =
BlockReceivedAndDeletedResponseProto.newBuilder().build();
private static final ReportBadBlocksResponseProto
VOID_REPORT_BAD_BLOCK_RESPONSE =
ReportBadBlocksResponseProto.newBuilder().build();
private static final CommitBlockSynchronizationResponseProto
VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO =
CommitBlockSynchronizationResponseProto.newBuilder().build();
public DatanodeProtocolServerSideTranslatorPB(DatanodeProtocol impl,
int maxDataLength) {
this.impl = impl;
this.maxDataLength = maxDataLength;
}
@Override
public RegisterDatanodeResponseProto registerDatanode(
RpcController controller, RegisterDatanodeRequestProto request)
throws ServiceException {
DatanodeRegistration registration = PBHelper.convert(request
.getRegistration());
DatanodeRegistration registrationResp;
try {
registrationResp = impl.registerDatanode(registration);
} catch (IOException e) {
throw new ServiceException(e);
}
return RegisterDatanodeResponseProto.newBuilder()
.setRegistration(PBHelper.convert(registrationResp)).build();
}
@Override
public HeartbeatResponseProto sendHeartbeat(RpcController controller,
HeartbeatRequestProto request) throws ServiceException {
HeartbeatResponse response;
try {
final StorageReport[] report = PBHelperClient.convertStorageReports(
request.getReportsList());
VolumeFailureSummary volumeFailureSummary =
request.hasVolumeFailureSummary() ? PBHelper.convertVolumeFailureSummary(
request.getVolumeFailureSummary()) : null;
response = impl.sendHeartbeat(PBHelper.convert(request.getRegistration()),
report, request.getCacheCapacity(), request.getCacheUsed(),
request.getXmitsInProgress(),
request.getXceiverCount(), request.getFailedVolumes(),
volumeFailureSummary, request.getRequestFullBlockReportLease(),
PBHelper.convertSlowPeerInfo(request.getSlowPeersList()),
PBHelper.convertSlowDiskInfo(request.getSlowDisksList()));
} catch (IOException e) {
throw new ServiceException(e);
}
HeartbeatResponseProto.Builder builder = HeartbeatResponseProto
.newBuilder();
DatanodeCommand[] cmds = response.getCommands();
if (cmds != null) {
for (int i = 0; i < cmds.length; i++) {
if (cmds[i] != null) {
builder.addCmds(PBHelper.convert(cmds[i]));
}
}
}
builder.setHaStatus(PBHelper.convert(response.getNameNodeHaState()));
RollingUpgradeStatus rollingUpdateStatus = response
.getRollingUpdateStatus();
if (rollingUpdateStatus != null) {
// V2 is always set for newer datanodes.
// To be compatible with older datanodes, V1 is set to null
// if the RU was finalized.
RollingUpgradeStatusProto rus = PBHelperClient.
convertRollingUpgradeStatus(rollingUpdateStatus);
builder.setRollingUpgradeStatusV2(rus);
if (!rollingUpdateStatus.isFinalized()) {
builder.setRollingUpgradeStatus(rus);
}
}
builder.setFullBlockReportLeaseId(response.getFullBlockReportLeaseId());
builder.setIsSlownode(response.getIsSlownode());
return builder.build();
}
@Override
public BlockReportResponseProto blockReport(RpcController controller,
BlockReportRequestProto request) throws ServiceException {
DatanodeCommand cmd = null;
StorageBlockReport[] report =
new StorageBlockReport[request.getReportsCount()];
int index = 0;
for (StorageBlockReportProto s : request.getReportsList()) {
final BlockListAsLongs blocks;
if (s.hasNumberOfBlocks()) { // new style buffer based reports
int num = (int)s.getNumberOfBlocks();
Preconditions.checkState(s.getBlocksCount() == 0,
"cannot send both blocks list and buffers");
blocks = BlockListAsLongs.decodeBuffers(num, s.getBlocksBuffersList(),
maxDataLength);
} else {
blocks = BlockListAsLongs.decodeLongs(s.getBlocksList(), maxDataLength);
}
report[index++] = new StorageBlockReport(PBHelperClient.convert(s.getStorage()),
blocks);
}
try {
cmd = impl.blockReport(PBHelper.convert(request.getRegistration()),
request.getBlockPoolId(), report,
request.hasContext() ?
PBHelper.convert(request.getContext()) : null);
} catch (IOException e) {
throw new ServiceException(e);
}
BlockReportResponseProto.Builder builder =
BlockReportResponseProto.newBuilder();
if (cmd != null) {
builder.setCmd(PBHelper.convert(cmd));
}
return builder.build();
}
@Override
public CacheReportResponseProto cacheReport(RpcController controller,
CacheReportRequestProto request) throws ServiceException {
DatanodeCommand cmd = null;
try {
cmd = impl.cacheReport(
PBHelper.convert(request.getRegistration()),
request.getBlockPoolId(),
request.getBlocksList());
} catch (IOException e) {
throw new ServiceException(e);
}
CacheReportResponseProto.Builder builder =
CacheReportResponseProto.newBuilder();
if (cmd != null) {
builder.setCmd(PBHelper.convert(cmd));
}
return builder.build();
}
@Override
public BlockReceivedAndDeletedResponseProto blockReceivedAndDeleted(
RpcController controller, BlockReceivedAndDeletedRequestProto request)
throws ServiceException {
List<StorageReceivedDeletedBlocksProto> sBlocks = request.getBlocksList();
StorageReceivedDeletedBlocks[] info =
new StorageReceivedDeletedBlocks[sBlocks.size()];
for (int i = 0; i < sBlocks.size(); i++) {
StorageReceivedDeletedBlocksProto sBlock = sBlocks.get(i);
List<ReceivedDeletedBlockInfoProto> list = sBlock.getBlocksList();
ReceivedDeletedBlockInfo[] rdBlocks =
new ReceivedDeletedBlockInfo[list.size()];
for (int j = 0; j < list.size(); j++) {
rdBlocks[j] = PBHelper.convert(list.get(j));
}
if (sBlock.hasStorage()) {
info[i] = new StorageReceivedDeletedBlocks(
PBHelperClient.convert(sBlock.getStorage()), rdBlocks);
} else {
info[i] = new StorageReceivedDeletedBlocks(
new DatanodeStorage(sBlock.getStorageUuid()), rdBlocks);
}
}
try {
impl.blockReceivedAndDeleted(PBHelper.convert(request.getRegistration()),
request.getBlockPoolId(), info);
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE;
}
@Override
public ErrorReportResponseProto errorReport(RpcController controller,
ErrorReportRequestProto request) throws ServiceException {
try {
impl.errorReport(PBHelper.convert(request.getRegistartion()),
request.getErrorCode(), request.getMsg());
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_ERROR_REPORT_RESPONSE_PROTO;
}
@Override
public VersionResponseProto versionRequest(RpcController controller,
VersionRequestProto request) throws ServiceException {
NamespaceInfo info;
try {
info = impl.versionRequest();
} catch (IOException e) {
throw new ServiceException(e);
}
return VersionResponseProto.newBuilder()
.setInfo(PBHelper.convert(info)).build();
}
@Override
public ReportBadBlocksResponseProto reportBadBlocks(RpcController controller,
ReportBadBlocksRequestProto request) throws ServiceException {
List<LocatedBlockProto> lbps = request.getBlocksList();
LocatedBlock [] blocks = new LocatedBlock [lbps.size()];
for(int i=0; i<lbps.size(); i++) {
blocks[i] = PBHelperClient.convertLocatedBlockProto(lbps.get(i));
}
try {
impl.reportBadBlocks(blocks);
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_REPORT_BAD_BLOCK_RESPONSE;
}
@Override
public CommitBlockSynchronizationResponseProto commitBlockSynchronization(
RpcController controller, CommitBlockSynchronizationRequestProto request)
throws ServiceException {
List<DatanodeIDProto> dnprotos = request.getNewTaragetsList();
DatanodeID[] dns = new DatanodeID[dnprotos.size()];
for (int i = 0; i < dnprotos.size(); i++) {
dns[i] = PBHelperClient.convert(dnprotos.get(i));
}
final List<String> sidprotos = request.getNewTargetStoragesList();
final String[] storageIDs = sidprotos.toArray(new String[sidprotos.size()]);
try {
impl.commitBlockSynchronization(PBHelperClient.convert(request.getBlock()),
request.getNewGenStamp(), request.getNewLength(),
request.getCloseFile(), request.getDeleteBlock(), dns, storageIDs);
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
}
}
| DatanodeProtocolServerSideTranslatorPB |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/proxy/DruidDriverTest.java | {
"start": 1506,
"end": 1728
} | class ____ extends FilterAdapter {
public InitErrorJdbcFilterAdapter() throws InstantiationException {
throw new InstantiationException("init error");
}
}
static | InitErrorJdbcFilterAdapter |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/layout/PatternLayout.java | {
"start": 14147,
"end": 14238
} | interface ____ extends Serializer, LocationAware {}
private static final | PatternSerializer |
java | apache__maven | impl/maven-core/src/test/java/org/apache/maven/di/DiTest.java | {
"start": 2047,
"end": 3793
} | class ____ {
PlexusContainer container;
@BeforeEach
void setup() throws Exception {
container = new DefaultPlexusContainer(
new DefaultContainerConfiguration(),
new AbstractModule() {
@Override
protected void configure() {
bind(ModelParser.class).to(TestModelParser.class);
}
},
new SisuDiBridgeModule(false));
}
@Test
void testPlexus() throws Exception {
List<ModelParser> parsers = container.lookupList(ModelParser.class);
assertNotNull(parsers);
assertEquals(1, parsers.size());
Map<String, ModelParser> parsersMap = container.lookupMap(ModelParser.class);
assertNotNull(parsersMap);
assertEquals(1, parsersMap.size());
}
@Test
void testGuice() throws Exception {
List<Binding<ModelParser>> parsers =
container.lookup(Injector.class).findBindingsByType(TypeLiteral.get(ModelParser.class));
assertNotNull(parsers);
assertEquals(1, parsers.size());
}
@Test
void testDI() throws Exception {
DiInjected diInjected = new DiInjected();
container.lookup(org.apache.maven.di.Injector.class).injectInstance(diInjected);
assertNotNull(diInjected.parser);
assertNotNull(diInjected.parsers);
assertEquals(1, diInjected.parsers.size());
assertNotNull(diInjected.parsersMap);
assertEquals(1, diInjected.parsersMap.size());
}
static | DiTest1 |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/AclSetuserArgs.java | {
"start": 23966,
"end": 24096
} | class ____ extends KeywordArgument {
public NoPass() {
super(NOPASS);
}
}
private static | NoPass |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeohex.java | {
"start": 1523,
"end": 3798
} | class ____ extends AbstractConvertFunction {
public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "ToGeohex", ToGeohex::new);
private static final Map<DataType, BuildFactory> EVALUATORS = Map.ofEntries(
Map.entry(GEOHEX, (source, fieldEval) -> fieldEval),
Map.entry(LONG, (source, fieldEval) -> fieldEval),
Map.entry(KEYWORD, ToGeohexFromStringEvaluator.Factory::new),
Map.entry(TEXT, ToGeohexFromStringEvaluator.Factory::new)
);
@FunctionInfo(
returnType = "geohex",
preview = true,
appliesTo = { @FunctionAppliesTo(lifeCycle = FunctionAppliesToLifecycle.PREVIEW) },
description = """
Converts an input value to a `geohex` value.
A string will only be successfully converted if it respects the
`geohex` format, as described for the
[geohex grid aggregation](/reference/aggregations/search-aggregations-bucket-geohexgrid-aggregation.md).""",
examples = @Example(file = "spatial-grid", tag = "to_geohex")
)
public ToGeohex(
Source source,
@Param(
name = "field",
type = { "geohex", "long", "keyword", "text" },
description = "Input value. The input can be a single- or multi-valued column or an expression."
) Expression field
) {
super(source, field);
}
private ToGeohex(StreamInput in) throws IOException {
super(in);
}
@Override
public String getWriteableName() {
return ENTRY.name;
}
@Override
protected Map<DataType, BuildFactory> factories() {
return EVALUATORS;
}
@Override
public DataType dataType() {
return GEOHEX;
}
@Override
public Expression replaceChildren(List<Expression> newChildren) {
return new ToGeohex(source(), newChildren.get(0));
}
@Override
protected NodeInfo<? extends Expression> info() {
return NodeInfo.create(this, ToGeohex::new, field());
}
@ConvertEvaluator(extraName = "FromString", warnExceptions = { IllegalArgumentException.class })
static long fromString(BytesRef in) {
return H3.stringToH3(in.utf8ToString());
}
}
| ToGeohex |
java | apache__camel | components/camel-jaxb/src/test/java/org/apache/camel/converter/jaxb/address/Address.java | {
"start": 1066,
"end": 1802
} | class ____ address complex type.
*
* <p>
* The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="address">
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element name="addressLine1" type="{http://www.w3.org/2001/XMLSchema}string"/>
* <element name="addressLine2" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "address", propOrder = { "addressLine1", "addressLine2" })
public | for |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/testdata/BanSerializableReadNegativeCases.java | {
"start": 5442,
"end": 6209
} | class ____ extends ObjectInputStream {
ObjectInputStreamIsExempt() throws IOException, ClassNotFoundException {
super();
}
@Override
public Object readObjectOverride() throws IOException, ClassNotFoundException {
// Calling readObjectOverride is banned by the checker; therefore, overrides can
// call other banned methods without added risk.
return super.readObject();
}
}
@Override
public void readExternal(ObjectInput in) {
try {
testField = (Integer) in.readObject();
} catch (IOException | ClassNotFoundException e) {
}
}
@Override
public void writeExternal(ObjectOutput out) {
try {
out.writeObject(testField);
} catch (IOException e) {
}
}
}
| ObjectInputStreamIsExempt |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/jdbc/DataSourceOnlySqlScriptsTests.java | {
"start": 1817,
"end": 2521
} | class ____ {
private JdbcTemplate jdbcTemplate;
@Autowired
void setDataSource(DataSource dataSource) {
this.jdbcTemplate = new JdbcTemplate(dataSource);
}
@Test
@Order(1)
void classLevelScripts() {
assertThatTransaction().isNotActive();
assertNumUsers(1);
}
@Test
@Sql({ "drop-schema.sql", "schema.sql", "data.sql", "data-add-dogbert.sql" })
@Order(2)
void methodLevelScripts() {
assertThatTransaction().isNotActive();
assertNumUsers(2);
}
protected void assertNumUsers(int expected) {
assertThat(JdbcTestUtils.countRowsInTable(jdbcTemplate, "user")).as(
"Number of rows in the 'user' table.").isEqualTo(expected);
}
@Configuration
static | DataSourceOnlySqlScriptsTests |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng6972AllowAccessToGraphPackageTest.java | {
"start": 1004,
"end": 2553
} | class ____ extends AbstractMavenIntegrationTestCase {
@Test
public void testit() throws Exception {
// The testdir is computed from the location of this file.
final File testDir = extractResources("/mng-6972-allow-access-to-graph-package");
Verifier verifier;
/*
* We must first make sure that any artifact created
* by this test has been removed from the local
* repository. Failing to do this could cause
* unstable test results. Fortunately, the verifier
* makes it easy to do this.
*/
verifier = newVerifier(testDir.getAbsolutePath());
verifier.deleteArtifact("mng-6972-allow-access-to-graph-package", "build-plugin", "1.0", "jar");
verifier.deleteArtifact("mng-6972-allow-access-to-graph-package", "using-module", "1.0", "jar");
verifier = newVerifier(new File(testDir.getAbsolutePath(), "build-plugin").getAbsolutePath());
verifier.getSystemProperties().put("maven.multiModuleProjectDirectory", testDir.getAbsolutePath());
verifier.addCliArgument("install");
verifier.execute();
verifier.verifyErrorFreeLog();
verifier = newVerifier(new File(testDir.getAbsolutePath(), "using-module").getAbsolutePath());
verifier.getSystemProperties().put("maven.multiModuleProjectDirectory", testDir.getAbsolutePath());
verifier.addCliArgument("install");
verifier.execute();
verifier.verifyErrorFreeLog();
}
}
| MavenITmng6972AllowAccessToGraphPackageTest |
java | dropwizard__dropwizard | docs/source/examples/core/src/main/java/io/dropwizard/documentation/db/DatabaseHealthCheck.java | {
"start": 126,
"end": 585
} | class ____ extends HealthCheck {
private final Database database;
public DatabaseHealthCheck(Database database) {
this.database = database;
}
@Override
protected Result check() throws Exception {
if (database.isConnected()) {
return Result.healthy();
} else {
return Result.unhealthy("Cannot connect to " + database.getUrl());
}
}
}
// core: DatabaseHealthCheck
| DatabaseHealthCheck |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/maybe/MaybeFromActionTest.java | {
"start": 1153,
"end": 5003
} | class ____ extends RxJavaTest {
@Test
public void fromAction() {
final AtomicInteger atomicInteger = new AtomicInteger();
Maybe.fromAction(new Action() {
@Override
public void run() throws Exception {
atomicInteger.incrementAndGet();
}
})
.test()
.assertResult();
assertEquals(1, atomicInteger.get());
}
@Test
public void fromActionTwice() {
final AtomicInteger atomicInteger = new AtomicInteger();
Action run = new Action() {
@Override
public void run() throws Exception {
atomicInteger.incrementAndGet();
}
};
Maybe.fromAction(run)
.test()
.assertResult();
assertEquals(1, atomicInteger.get());
Maybe.fromAction(run)
.test()
.assertResult();
assertEquals(2, atomicInteger.get());
}
@Test
public void fromActionInvokesLazy() {
final AtomicInteger atomicInteger = new AtomicInteger();
Maybe<Object> maybe = Maybe.fromAction(new Action() {
@Override
public void run() throws Exception {
atomicInteger.incrementAndGet();
}
});
assertEquals(0, atomicInteger.get());
maybe
.test()
.assertResult();
assertEquals(1, atomicInteger.get());
}
@Test
public void fromActionThrows() {
Maybe.fromAction(new Action() {
@Override
public void run() throws Exception {
throw new UnsupportedOperationException();
}
})
.test()
.assertFailure(UnsupportedOperationException.class);
}
@SuppressWarnings("unchecked")
@Test
public void callable() throws Throwable {
final int[] counter = { 0 };
Maybe<Void> m = Maybe.fromAction(new Action() {
@Override
public void run() throws Exception {
counter[0]++;
}
});
assertTrue(m.getClass().toString(), m instanceof Supplier);
assertNull(((Supplier<Void>)m).get());
assertEquals(1, counter[0]);
}
@Test
public void noErrorLoss() throws Exception {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
final CountDownLatch cdl1 = new CountDownLatch(1);
final CountDownLatch cdl2 = new CountDownLatch(1);
TestObserver<Object> to = Maybe.fromAction(new Action() {
@Override
public void run() throws Exception {
cdl1.countDown();
cdl2.await(5, TimeUnit.SECONDS);
}
}).subscribeOn(Schedulers.single()).test();
assertTrue(cdl1.await(5, TimeUnit.SECONDS));
to.dispose();
int timeout = 10;
while (timeout-- > 0 && errors.isEmpty()) {
Thread.sleep(100);
}
TestHelper.assertUndeliverable(errors, 0, InterruptedException.class);
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void disposedUpfront() throws Throwable {
Action run = mock(Action.class);
Maybe.fromAction(run)
.test(true)
.assertEmpty();
verify(run, never()).run();
}
@Test
public void cancelWhileRunning() {
final TestObserver<Object> to = new TestObserver<>();
Maybe.fromAction(new Action() {
@Override
public void run() throws Exception {
to.dispose();
}
})
.subscribeWith(to)
.assertEmpty();
assertTrue(to.isDisposed());
}
}
| MaybeFromActionTest |
java | alibaba__nacos | naming/src/main/java/com/alibaba/nacos/naming/healthcheck/v2/processor/MysqlHealthCheckProcessor.java | {
"start": 4370,
"end": 9194
} | class ____ implements Runnable {
private final HealthCheckTaskV2 task;
private final Service service;
private final HealthCheckInstancePublishInfo instance;
private final ClusterMetadata metadata;
private long startTime = System.currentTimeMillis();
public MysqlCheckTask(HealthCheckTaskV2 task, Service service, HealthCheckInstancePublishInfo instance,
ClusterMetadata metadata) {
this.task = task;
this.service = service;
this.instance = instance;
this.metadata = metadata;
}
@Override
public void run() {
Statement statement = null;
ResultSet resultSet = null;
try {
String clusterName = instance.getCluster();
String key =
service.getGroupedServiceName() + ":" + clusterName + ":" + instance.getIp() + ":" + instance
.getPort();
Connection connection = CONNECTION_POOL.get(key);
Mysql config = (Mysql) metadata.getHealthChecker();
if (connection == null || connection.isClosed()) {
String url = "jdbc:mysql://" + instance.getIp() + ":" + instance.getPort() + "?connectTimeout="
+ CONNECT_TIMEOUT_MS + "&socketTimeout=" + CONNECT_TIMEOUT_MS + "&loginTimeout=" + 1;
connection = DriverManager.getConnection(url, config.getUser(), config.getPwd());
CONNECTION_POOL.put(key, connection);
}
statement = connection.createStatement();
statement.setQueryTimeout(1);
resultSet = statement.executeQuery(config.getCmd());
int resultColumnIndex = 2;
if (CHECK_MYSQL_MASTER_SQL.equals(config.getCmd())) {
resultSet.next();
if (MYSQL_SLAVE_READONLY.equals(resultSet.getString(resultColumnIndex))) {
throw new IllegalStateException("current node is slave!");
}
}
healthCheckCommon.checkOk(task, service, "mysql:+ok");
healthCheckCommon.reEvaluateCheckRt(System.currentTimeMillis() - startTime, task,
switchDomain.getMysqlHealthParams());
} catch (SQLException e) {
// fail immediately
healthCheckCommon.checkFailNow(task, service, "mysql:" + e.getMessage());
healthCheckCommon.reEvaluateCheckRt(switchDomain.getHttpHealthParams().getMax(), task,
switchDomain.getMysqlHealthParams());
} catch (Throwable t) {
Throwable cause = t;
int maxStackDepth = 50;
for (int deepth = 0; deepth < maxStackDepth && cause != null; deepth++) {
if (cause instanceof SocketTimeoutException || cause instanceof ConnectException
|| cause instanceof TimeoutException || cause.getCause() instanceof TimeoutException) {
healthCheckCommon.checkFail(task, service, "mysql:timeout:" + cause.getMessage());
healthCheckCommon.reEvaluateCheckRt(task.getCheckRtNormalized() * 2, task,
switchDomain.getMysqlHealthParams());
return;
}
cause = cause.getCause();
}
// connection error, probably not reachable
healthCheckCommon.checkFail(task, service, "mysql:error:" + t.getMessage());
healthCheckCommon.reEvaluateCheckRt(switchDomain.getMysqlHealthParams().getMax(), task,
switchDomain.getMysqlHealthParams());
} finally {
instance.setCheckRt(System.currentTimeMillis() - startTime);
if (statement != null) {
try {
statement.close();
} catch (SQLException e) {
Loggers.SRV_LOG.error("[MYSQL-CHECK] failed to close statement:" + statement, e);
}
}
if (resultSet != null) {
try {
resultSet.close();
} catch (SQLException e) {
Loggers.SRV_LOG.error("[MYSQL-CHECK] failed to close resultSet:" + resultSet, e);
}
}
}
}
}
}
| MysqlCheckTask |
java | micronaut-projects__micronaut-core | inject-java/src/test/groovy/io/micronaut/event/DeadlockProducer.java | {
"start": 915,
"end": 1454
} | class ____ {
private final ApplicationEventPublisher eventPublisher;
@Inject
public DeadlockProducer(ApplicationEventPublisher eventPublisher) {
this.eventPublisher = eventPublisher;
}
@PostConstruct
public void init() {
try {
eventPublisher.publishEventAsync(new ApplicationEvent("Event")).get();
} catch (InterruptedException | ExecutionException e) {
e.printStackTrace();
}
}
public String method() {
return "value";
}
}
| DeadlockProducer |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskCompletionEvent.java | {
"start": 1268,
"end": 1432
} | class ____ implements Writable{
@InterfaceAudience.Public
@InterfaceStability.Evolving
/**
* Task Completion Statuses
*/
static public | TaskCompletionEvent |
java | apache__spark | sql/catalyst/src/main/java/org/apache/spark/sql/connector/write/DataWriter.java | {
"start": 3557,
"end": 6963
} | interface ____<T> extends Closeable {
/**
* Writes one record with metadata.
* <p>
* This method is used by group-based row-level operations to pass back metadata for records
* that are updated or copied. New records added during a MERGE operation are written using
* {@link #write(Object)} as there is no metadata associated with those records.
* <p>
* If this method fails (by throwing an exception), {@link #abort()} will be called and this
* data writer is considered to have been failed.
*
* @throws IOException if failure happens during disk/network IO like writing files.
*
* @since 4.0.0
*/
default void write(T metadata, T record) throws IOException {
write(record);
}
/**
* Writes one record.
* <p>
* If this method fails (by throwing an exception), {@link #abort()} will be called and this
* data writer is considered to have been failed.
*
* @throws IOException if failure happens during disk/network IO like writing files.
*/
void write(T record) throws IOException;
/**
* Writes all records provided by the given iterator. By default, it calls the {@link #write}
* method for each record in the iterator.
* <p>
* If this method fails (by throwing an exception), {@link #abort()} will be called and this
* data writer is considered to have been failed.
*
* @throws IOException if failure happens during disk/network IO like writing files.
*
* @since 4.0.0
*/
default void writeAll(Iterator<T> records) throws IOException {
while (records.hasNext()) {
write(records.next());
}
}
/**
* Commits this writer after all records are written successfully, returns a commit message which
* will be sent back to driver side and passed to
* {@link BatchWrite#commit(WriterCommitMessage[])}.
* <p>
* The written data should only be visible to data source readers after
* {@link BatchWrite#commit(WriterCommitMessage[])} succeeds, which means this method
* should still "hide" the written data and ask the {@link BatchWrite} at driver side to
* do the final commit via {@link WriterCommitMessage}.
* <p>
* If this method fails (by throwing an exception), {@link #abort()} will be called and this
* data writer is considered to have been failed.
*
* @throws IOException if failure happens during disk/network IO like writing files.
*/
WriterCommitMessage commit() throws IOException;
/**
* Aborts this writer if it is failed. Implementations should clean up the data for already
* written records.
* <p>
* This method will only be called if there is one record failed to write, or {@link #commit()}
* failed.
* <p>
* If this method fails(by throwing an exception), the underlying data source may have garbage
* that need to be cleaned by {@link BatchWrite#abort(WriterCommitMessage[])} or manually,
* but these garbage should not be visible to data source readers.
*
* @throws IOException if failure happens during disk/network IO like writing files.
*/
void abort() throws IOException;
/**
* Returns an array of custom task metrics. By default it returns empty array. Note that it is
* not recommended to put heavy logic in this method as it may affect writing performance.
*/
default CustomTaskMetric[] currentMetricsValues() {
return new CustomTaskMetric[]{};
}
}
| DataWriter |
java | apache__camel | core/camel-main/src/main/java/org/apache/camel/main/DefaultConfigurationProperties.java | {
"start": 73033,
"end": 80187
} | class ____ can be separated by comma.
*/
public T withStreamCachingDenyClasses(String streamCachingDenyClasses) {
this.streamCachingDenyClasses = streamCachingDenyClasses;
return (T) this;
}
/**
* To enable stream caching spooling to disk. This means, for large stream messages (over 128 KB by default) will be
* cached in a temporary file instead, and Camel will handle deleting the temporary file once the cached stream is
* no longer necessary.
*
* Default is false.
*/
public T withStreamCachingSpoolEnabled(boolean streamCachingSpoolEnabled) {
this.streamCachingSpoolEnabled = streamCachingSpoolEnabled;
return (T) this;
}
/**
* Sets the stream caching spool (temporary) directory to use for overflow and spooling to disk.
*
* If no spool directory has been explicit configured, then a temporary directory is created in the java.io.tmpdir
* directory.
*/
public T withStreamCachingSpoolDirectory(String streamCachingSpoolDirectory) {
this.streamCachingSpoolDirectory = streamCachingSpoolDirectory;
return (T) this;
}
/**
* Sets a stream caching cipher name to use when spooling to disk to write with encryption. By default the data is
* not encrypted.
*/
public T withStreamCachingSpoolCipher(String streamCachingSpoolCipher) {
this.streamCachingSpoolCipher = streamCachingSpoolCipher;
return (T) this;
}
/**
* Stream caching threshold in bytes when overflow to disk is activated. The default threshold is 128kb. Use -1 to
* disable overflow to disk.
*/
public T withStreamCachingSpoolThreshold(long streamCachingSpoolThreshold) {
this.streamCachingSpoolThreshold = streamCachingSpoolThreshold;
return (T) this;
}
/**
* Sets a percentage (1-99) of used heap memory threshold to activate stream caching spooling to disk.
*/
public T withStreamCachingSpoolUsedHeapMemoryThreshold(int streamCachingSpoolUsedHeapMemoryThreshold) {
this.streamCachingSpoolUsedHeapMemoryThreshold = streamCachingSpoolUsedHeapMemoryThreshold;
return (T) this;
}
/**
* Sets what the upper bounds should be when streamCachingSpoolUsedHeapMemoryThreshold is in use.
*/
public T withStreamCachingSpoolUsedHeapMemoryLimit(String streamCachingSpoolUsedHeapMemoryLimit) {
this.streamCachingSpoolUsedHeapMemoryLimit = streamCachingSpoolUsedHeapMemoryLimit;
return (T) this;
}
/**
* Sets whether if just any of the org.apache.camel.spi.StreamCachingStrategy.SpoolRule rules returns true then
* shouldSpoolCache(long) returns true, to allow spooling to disk. If this option is false, then all the
* org.apache.camel.spi.StreamCachingStrategy.SpoolRule must return true.
*
* The default value is false which means that all the rules must return true.
*/
public T withStreamCachingAnySpoolRules(boolean streamCachingAnySpoolRules) {
this.streamCachingAnySpoolRules = streamCachingAnySpoolRules;
return (T) this;
}
/**
* Sets the stream caching buffer size to use when allocating in-memory buffers used for in-memory stream caches.
*
* The default size is 4096.
*/
public T withStreamCachingBufferSize(int streamCachingBufferSize) {
this.streamCachingBufferSize = streamCachingBufferSize;
return (T) this;
}
/**
* Whether to remove stream caching temporary directory when stopping. This option is default true.
*/
public T withStreamCachingRemoveSpoolDirectoryWhenStopping(boolean streamCachingRemoveSpoolDirectoryWhenStopping) {
this.streamCachingRemoveSpoolDirectoryWhenStopping = streamCachingRemoveSpoolDirectoryWhenStopping;
return (T) this;
}
/**
* Sets whether stream caching statistics is enabled.
*/
public T withStreamCachingStatisticsEnabled(boolean streamCachingStatisticsEnabled) {
this.streamCachingStatisticsEnabled = streamCachingStatisticsEnabled;
return (T) this;
}
/**
* Sets whether type converter statistics is enabled.
*
* By default the type converter utilization statistics is disabled. Notice: If enabled then there is a slight
* performance impact under very heavy load.
*/
public T withTypeConverterStatisticsEnabled(boolean typeConverterStatisticsEnabled) {
this.typeConverterStatisticsEnabled = typeConverterStatisticsEnabled;
return (T) this;
}
/**
* Sets whether tracing is enabled or not.
*
* Default is false.
*/
public T withTracing(boolean tracing) {
this.tracing = tracing;
return (T) this;
}
/**
* Whether to set tracing on standby. If on standby then the tracer is installed and made available. Then the tracer
* can be enabled later at runtime via JMX or via {@link Tracer#setEnabled(boolean)}.
*
* Default is false.
*/
public T withTracingStandby(boolean tracingStandby) {
this.tracingStandby = tracingStandby;
return (T) this;
}
/**
* Whether tracing should trace inner details from route templates (or kamelets). Turning this on increases the
* verbosity of tracing by including events from internal routes in the templates or kamelets.
*
* Default is false.
*/
public T withTracingTemplates(boolean tracingTemplates) {
this.tracingTemplates = tracingTemplates;
return (T) this;
}
/**
* Sets whether message history is enabled or not.
*
* Default is false.
*/
public T withMessageHistory(boolean messageHistory) {
this.messageHistory = messageHistory;
return (T) this;
}
/**
* Whether to capture precise source location:line-number for all EIPs in Camel routes.
*
* Enabling this will impact parsing Java based routes (also Groovy, etc.) on startup as this uses JDK
* StackTraceElement to calculate the location from the Camel route, which comes with a performance cost. This only
* impact startup, not the performance of the routes at runtime.
*/
public T withSourceLocationEnabled(boolean sourceLocationEnabled) {
this.sourceLocationEnabled = sourceLocationEnabled;
return (T) this;
}
/**
* Sets whether log mask is enabled or not.
*
* Default is false.
*/
public T withLogMask(boolean logMask) {
this.logMask = logMask;
return (T) this;
}
/**
* Sets whether to log exhausted message body with message history.
*
* Default is false.
*/
public T withLogExhaustedMessageBody(boolean logExhaustedMessageBody) {
this.logExhaustedMessageBody = logExhaustedMessageBody;
return (T) this;
}
/**
* The global name to use for Log EIP
*
* The name is default the routeId or the source:line if source location is enabled. You can also specify the name
* using tokens:
*
* <br/>
* ${class} - the logger | names |
java | reactor__reactor-core | reactor-core/src/jcstress/java/reactor/core/publisher/FluxReplayStressTest.java | {
"start": 21430,
"end": 22313
} | class ____
extends RefCntConcurrentSubscriptionBaseStressTest<Object> {
public RefCntGraceConcurrentSubscriptionEmptyNoneFusionStressTest() {
super(Flux.empty().hide(), Duration.ofSeconds(1));
}
@Actor
public void subscribe1() {
sharedSource.subscribe(subscriber1);
}
@Actor
public void subscribe2() {
sharedSource.subscribe(subscriber2);
}
@Arbiter
public void arbiter(IIIIII_Result r) {
r.r1 = subscriber1.onNextCalls.get();
r.r2 = subscriber2.onNextCalls.get();
r.r3 = subscriber1.onCompleteCalls.get();
r.r4 = subscriber2.onCompleteCalls.get();
r.r5 = subscriber1.onErrorCalls.get();
r.r6 = subscriber2.onErrorCalls.get();
}
}
@JCStressTest
@Outcome(id = {"0, 0, 1, 1, 0, 0"}, expect = ACCEPTABLE, desc = "concurrent subscription succeeded")
@State
public static | RefCntGraceConcurrentSubscriptionEmptyNoneFusionStressTest |
java | apache__dubbo | dubbo-remoting/dubbo-remoting-netty/src/main/java/org/apache/dubbo/remoting/transport/netty/NettyHelper.java | {
"start": 1774,
"end": 3684
} | class ____ extends AbstractInternalLogger {
public static final String LOGGER_CAUSE_STRING = "unknown error in remoting-netty module";
private ErrorTypeAwareLogger logger;
DubboLogger(ErrorTypeAwareLogger logger) {
this.logger = logger;
}
@Override
public boolean isDebugEnabled() {
return logger.isDebugEnabled();
}
@Override
public boolean isInfoEnabled() {
return logger.isInfoEnabled();
}
@Override
public boolean isWarnEnabled() {
return logger.isWarnEnabled();
}
@Override
public boolean isErrorEnabled() {
return logger.isErrorEnabled();
}
@Override
public void debug(String msg) {
logger.debug(msg);
}
@Override
public void debug(String msg, Throwable cause) {
logger.debug(msg, cause);
}
@Override
public void info(String msg) {
logger.info(msg);
}
@Override
public void info(String msg, Throwable cause) {
logger.info(msg, cause);
}
@Override
public void warn(String msg) {
logger.warn(INTERNAL_ERROR, LOGGER_CAUSE_STRING, "", msg);
}
@Override
public void warn(String msg, Throwable cause) {
logger.warn(INTERNAL_ERROR, LOGGER_CAUSE_STRING, "", msg, cause);
}
@Override
public void error(String msg) {
logger.error(INTERNAL_ERROR, LOGGER_CAUSE_STRING, "", msg);
}
@Override
public void error(String msg, Throwable cause) {
logger.error(INTERNAL_ERROR, LOGGER_CAUSE_STRING, "", msg, cause);
}
@Override
public String toString() {
return logger.toString();
}
}
}
| DubboLogger |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/support/DynamicPropertiesContextCustomizerFactoryTests.java | {
"start": 3524,
"end": 3681
} | class ____ extends BaseDynamicPropertySource {
@DynamicPropertySource
static void p2(DynamicPropertyRegistry registry) {
}
}
}
| SubDynamicPropertySource |
java | apache__kafka | storage/src/main/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerConfig.java | {
"start": 1965,
"end": 14012
} | class ____ {
public static final String REMOTE_LOG_METADATA_TOPIC_NAME = "__remote_log_metadata";
public static final String REMOTE_LOG_METADATA_TOPIC_REPLICATION_FACTOR_PROP = "remote.log.metadata.topic.replication.factor";
public static final String REMOTE_LOG_METADATA_TOPIC_PARTITIONS_PROP = "remote.log.metadata.topic.num.partitions";
public static final String REMOTE_LOG_METADATA_TOPIC_RETENTION_MS_PROP = "remote.log.metadata.topic.retention.ms";
public static final String REMOTE_LOG_METADATA_CONSUME_WAIT_MS_PROP = "remote.log.metadata.consume.wait.ms";
public static final String REMOTE_LOG_METADATA_INITIALIZATION_RETRY_MAX_TIMEOUT_MS_PROP = "remote.log.metadata.initialization.retry.max.timeout.ms";
public static final String REMOTE_LOG_METADATA_INITIALIZATION_RETRY_INTERVAL_MS_PROP = "remote.log.metadata.initialization.retry.interval.ms";
public static final int DEFAULT_REMOTE_LOG_METADATA_TOPIC_PARTITIONS = 50;
public static final long DEFAULT_REMOTE_LOG_METADATA_TOPIC_RETENTION_MS = -1L;
public static final short DEFAULT_REMOTE_LOG_METADATA_TOPIC_REPLICATION_FACTOR = 3;
public static final long DEFAULT_REMOTE_LOG_METADATA_CONSUME_WAIT_MS = 2 * 60 * 1000L;
public static final long DEFAULT_REMOTE_LOG_METADATA_INITIALIZATION_RETRY_MAX_TIMEOUT_MS = 2 * 60 * 1000L;
public static final long DEFAULT_REMOTE_LOG_METADATA_INITIALIZATION_RETRY_INTERVAL_MS = 100L;
public static final String REMOTE_LOG_METADATA_TOPIC_REPLICATION_FACTOR_DOC = "Replication factor of remote log metadata topic.";
public static final String REMOTE_LOG_METADATA_TOPIC_PARTITIONS_DOC = "The number of partitions for remote log metadata topic.";
public static final String REMOTE_LOG_METADATA_TOPIC_RETENTION_MS_DOC = "Retention of remote log metadata topic in milliseconds. " +
"Default: -1, that means unlimited. Users can configure this value based on their use cases. " +
"To avoid any data loss, this value should be more than the maximum retention period of any topic enabled with " +
"tiered storage in the cluster.";
public static final String REMOTE_LOG_METADATA_CONSUME_WAIT_MS_DOC = "The amount of time in milliseconds to wait for the local consumer to " +
"receive the published event.";
public static final String REMOTE_LOG_METADATA_INITIALIZATION_RETRY_INTERVAL_MS_DOC = "The retry interval in milliseconds for " +
"retrying RemoteLogMetadataManager resources initialization again.";
public static final String REMOTE_LOG_METADATA_INITIALIZATION_RETRY_MAX_TIMEOUT_MS_DOC = "The maximum amount of time in milliseconds " +
"for retrying RemoteLogMetadataManager resources initialization. " +
"For TopicBasedRemoteLogMetadataManager's initialization, the timer starts after this local broker is ready to process requests " +
"(primarily for ensuring the local cluster is ready when metadata is stored locally as an internal topic). " +
"If initialization fails within this timeout, this broker process will terminate.";
public static final String REMOTE_LOG_METADATA_COMMON_CLIENT_PREFIX = "remote.log.metadata.common.client.";
public static final String REMOTE_LOG_METADATA_PRODUCER_PREFIX = "remote.log.metadata.producer.";
public static final String REMOTE_LOG_METADATA_CONSUMER_PREFIX = "remote.log.metadata.consumer.";
public static final String BROKER_ID = "broker.id";
public static final String LOG_DIR = "log.dir";
private static final String REMOTE_LOG_METADATA_CLIENT_PREFIX = "__remote_log_metadata_client";
private static final ConfigDef CONFIG = new ConfigDef();
static {
CONFIG.define(REMOTE_LOG_METADATA_TOPIC_REPLICATION_FACTOR_PROP, SHORT, DEFAULT_REMOTE_LOG_METADATA_TOPIC_REPLICATION_FACTOR, atLeast(1), LOW,
REMOTE_LOG_METADATA_TOPIC_REPLICATION_FACTOR_DOC)
.define(REMOTE_LOG_METADATA_TOPIC_PARTITIONS_PROP, INT, DEFAULT_REMOTE_LOG_METADATA_TOPIC_PARTITIONS, atLeast(1), LOW,
REMOTE_LOG_METADATA_TOPIC_PARTITIONS_DOC)
.define(REMOTE_LOG_METADATA_TOPIC_RETENTION_MS_PROP, LONG, DEFAULT_REMOTE_LOG_METADATA_TOPIC_RETENTION_MS, LOW,
REMOTE_LOG_METADATA_TOPIC_RETENTION_MS_DOC)
.define(REMOTE_LOG_METADATA_CONSUME_WAIT_MS_PROP, LONG, DEFAULT_REMOTE_LOG_METADATA_CONSUME_WAIT_MS, atLeast(0), LOW,
REMOTE_LOG_METADATA_CONSUME_WAIT_MS_DOC)
.define(REMOTE_LOG_METADATA_INITIALIZATION_RETRY_MAX_TIMEOUT_MS_PROP, LONG,
DEFAULT_REMOTE_LOG_METADATA_INITIALIZATION_RETRY_MAX_TIMEOUT_MS, atLeast(0), LOW,
REMOTE_LOG_METADATA_INITIALIZATION_RETRY_MAX_TIMEOUT_MS_DOC)
.define(REMOTE_LOG_METADATA_INITIALIZATION_RETRY_INTERVAL_MS_PROP, LONG,
DEFAULT_REMOTE_LOG_METADATA_INITIALIZATION_RETRY_INTERVAL_MS, atLeast(0), LOW,
REMOTE_LOG_METADATA_INITIALIZATION_RETRY_INTERVAL_MS_DOC);
}
private final String clientIdPrefix;
private final int metadataTopicPartitionsCount;
private final String logDir;
private final long consumeWaitMs;
private final long metadataTopicRetentionMs;
private final short metadataTopicReplicationFactor;
private final long initializationRetryMaxTimeoutMs;
private final long initializationRetryIntervalMs;
private Map<String, Object> commonProps;
private Map<String, Object> consumerProps;
private Map<String, Object> producerProps;
public TopicBasedRemoteLogMetadataManagerConfig(Map<String, ?> props) {
Objects.requireNonNull(props, "props can not be null");
Map<String, Object> parsedConfigs = CONFIG.parse(props);
logDir = (String) props.get(LOG_DIR);
if (logDir == null || logDir.isEmpty()) {
throw new IllegalArgumentException(LOG_DIR + " config must not be null or empty.");
}
metadataTopicPartitionsCount = (int) parsedConfigs.get(REMOTE_LOG_METADATA_TOPIC_PARTITIONS_PROP);
metadataTopicReplicationFactor = (short) parsedConfigs.get(REMOTE_LOG_METADATA_TOPIC_REPLICATION_FACTOR_PROP);
metadataTopicRetentionMs = (long) parsedConfigs.get(REMOTE_LOG_METADATA_TOPIC_RETENTION_MS_PROP);
if (metadataTopicRetentionMs != -1 && metadataTopicRetentionMs <= 0) {
throw new IllegalArgumentException("Invalid metadata topic retention in millis: " + metadataTopicRetentionMs);
}
consumeWaitMs = (long) parsedConfigs.get(REMOTE_LOG_METADATA_CONSUME_WAIT_MS_PROP);
initializationRetryIntervalMs = (long) parsedConfigs.get(REMOTE_LOG_METADATA_INITIALIZATION_RETRY_INTERVAL_MS_PROP);
initializationRetryMaxTimeoutMs = (long) parsedConfigs.get(REMOTE_LOG_METADATA_INITIALIZATION_RETRY_MAX_TIMEOUT_MS_PROP);
clientIdPrefix = REMOTE_LOG_METADATA_CLIENT_PREFIX + "_" + props.get(BROKER_ID);
initializeProducerConsumerProperties(props);
}
private void initializeProducerConsumerProperties(Map<String, ?> configs) {
Map<String, Object> commonClientConfigs = new HashMap<>();
Map<String, Object> producerOnlyConfigs = new HashMap<>();
Map<String, Object> consumerOnlyConfigs = new HashMap<>();
for (Map.Entry<String, ?> entry : configs.entrySet()) {
String key = entry.getKey();
if (key.startsWith(REMOTE_LOG_METADATA_COMMON_CLIENT_PREFIX)) {
commonClientConfigs.put(key.substring(REMOTE_LOG_METADATA_COMMON_CLIENT_PREFIX.length()), entry.getValue());
} else if (key.startsWith(REMOTE_LOG_METADATA_PRODUCER_PREFIX)) {
producerOnlyConfigs.put(key.substring(REMOTE_LOG_METADATA_PRODUCER_PREFIX.length()), entry.getValue());
} else if (key.startsWith(REMOTE_LOG_METADATA_CONSUMER_PREFIX)) {
consumerOnlyConfigs.put(key.substring(REMOTE_LOG_METADATA_CONSUMER_PREFIX.length()), entry.getValue());
}
}
commonProps = new HashMap<>(commonClientConfigs);
Map<String, Object> allProducerConfigs = new HashMap<>(commonClientConfigs);
allProducerConfigs.putAll(producerOnlyConfigs);
producerProps = createProducerProps(allProducerConfigs);
Map<String, Object> allConsumerConfigs = new HashMap<>(commonClientConfigs);
allConsumerConfigs.putAll(consumerOnlyConfigs);
consumerProps = createConsumerProps(allConsumerConfigs);
}
public String remoteLogMetadataTopicName() {
return REMOTE_LOG_METADATA_TOPIC_NAME;
}
public int metadataTopicPartitionsCount() {
return metadataTopicPartitionsCount;
}
public short metadataTopicReplicationFactor() {
return metadataTopicReplicationFactor;
}
public long metadataTopicRetentionMs() {
return metadataTopicRetentionMs;
}
public long consumeWaitMs() {
return consumeWaitMs;
}
public long initializationRetryMaxTimeoutMs() {
return initializationRetryMaxTimeoutMs;
}
public long initializationRetryIntervalMs() {
return initializationRetryIntervalMs;
}
public String logDir() {
return logDir;
}
public Map<String, Object> commonProperties() {
return commonProps;
}
public Map<String, Object> consumerProperties() {
return consumerProps;
}
public Map<String, Object> producerProperties() {
return producerProps;
}
private Map<String, Object> createConsumerProps(Map<String, Object> allConsumerConfigs) {
Map<String, Object> props = new HashMap<>(allConsumerConfigs);
props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientIdPrefix + "_consumer");
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
props.put(ConsumerConfig.EXCLUDE_INTERNAL_TOPICS_CONFIG, false);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
return props;
}
private Map<String, Object> createProducerProps(Map<String, Object> allProducerConfigs) {
Map<String, Object> props = new HashMap<>(allProducerConfigs);
props.put(ProducerConfig.CLIENT_ID_CONFIG, clientIdPrefix + "_producer");
props.put(ProducerConfig.ACKS_CONFIG, "all");
props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
return Collections.unmodifiableMap(props);
}
@Override
public String toString() {
return "TopicBasedRemoteLogMetadataManagerConfig{" +
"clientIdPrefix='" + clientIdPrefix + '\'' +
", metadataTopicPartitionsCount=" + metadataTopicPartitionsCount +
", consumeWaitMs=" + consumeWaitMs +
", metadataTopicRetentionMs=" + metadataTopicRetentionMs +
", metadataTopicReplicationFactor=" + metadataTopicReplicationFactor +
", initializationRetryMaxTimeoutMs=" + initializationRetryMaxTimeoutMs +
", initializationRetryIntervalMs=" + initializationRetryIntervalMs +
", commonProps=" + configMapToRedactedString(commonProps, AdminClientConfig.configDef()) +
", consumerProps=" + configMapToRedactedString(consumerProps, ConsumerConfig.configDef()) +
", producerProps=" + configMapToRedactedString(producerProps, ProducerConfig.configDef()) +
'}';
}
public static void main(String[] args) {
System.out.println(CONFIG.toHtml(4, config -> "remote_log_metadata_manager_" + config));
}
}
| TopicBasedRemoteLogMetadataManagerConfig |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/resultcache/AggregateEntity.java | {
"start": 339,
"end": 1226
} | class ____ {
@Id
private Integer id;
private String name;
@ManyToOne(cascade = CascadeType.ALL)
private TestEntity value1;
@ManyToOne(cascade = CascadeType.ALL)
private TestEntity value2;
protected AggregateEntity() {
}
public AggregateEntity(
Integer id,
String name,
TestEntity value1,
TestEntity value2) {
this.id = id;
this.name = name;
this.value1 = value1;
this.value2 = value2;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public TestEntity getValue1() {
return value1;
}
public void setValue1(TestEntity value1) {
this.value1 = value1;
}
public TestEntity getValue2() {
return value2;
}
public void setValue2(TestEntity value2) {
this.value2 = value2;
}
}
| AggregateEntity |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/type/descriptor/jdbc/spi/DescriptiveJsonGeneratingVisitor.java | {
"start": 1657,
"end": 7229
} | class ____ extends JsonGeneratingVisitor {
private Map<String, IdentitySet<Object>> circularityTracker;
@Override
protected void serializeEntity(Object value, EntityMappingType entityType, WrapperOptions options, JsonDocumentWriter writer) {
final EntityIdentifierMapping identifierMapping = entityType.getIdentifierMapping();
trackingEntity( value, entityType, shouldProcessEntity -> {
try {
writer.startObject();
writer.objectKey( identifierMapping.getAttributeName() );
serializeEntityIdentifier( value, identifierMapping, options, writer );
if ( shouldProcessEntity ) {
// if it wasn't already encountered, append all properties
serializeObjectValues( entityType, value, options, writer );
}
writer.endObject();
}
catch (IOException e) {
throw new UncheckedIOException( "Error serializing entity", e );
}
} );
}
private void trackingEntity(Object entity, EntityMappingType entityType, Consumer<Boolean> action) {
if ( circularityTracker == null ) {
circularityTracker = new HashMap<>();
}
final IdentitySet<Object> entities = circularityTracker.computeIfAbsent(
entityType.getEntityName(),
k -> new IdentitySet<>()
);
final boolean added = entities.add( entity );
action.accept( added );
if ( added ) {
entities.remove( entity );
}
}
@Override
protected boolean handleNullOrLazy(Object value, JsonDocumentWriter writer) {
if ( value == null ) {
writer.nullValue();
return true;
}
else if ( value == LazyPropertyInitializer.UNFETCHED_PROPERTY ) {
writer.stringValue( value.toString() );
return true;
}
else if ( !isInitialized( value ) ) {
writer.stringValue( "<uninitialized>" );
return true;
}
else {
return false;
}
}
@Override
protected void serializeModelPart(
ValuedModelPart modelPart,
Object value,
WrapperOptions options,
JsonDocumentWriter writer) throws IOException {
// Extended version of default method that always expands embeddable
// objects and can handle entities and plural attributes
if ( modelPart instanceof SelectableMapping ) {
writer.objectKey( modelPart.getPartName() );
visit( modelPart.getMappedType(), value, options, writer );
}
else if ( modelPart instanceof EmbeddedAttributeMapping embeddedAttribute ) {
writer.objectKey( embeddedAttribute.getAttributeName() );
visit( embeddedAttribute.getMappedType(), value, options, writer );
}
else if ( modelPart instanceof EntityValuedModelPart entityPart ) {
writer.objectKey( entityPart.getPartName() );
visit( entityPart.getEntityMappingType(), value, options, writer );
}
else if ( modelPart instanceof PluralAttributeMapping plural ) {
writer.objectKey( plural.getPartName() );
serializePluralAttribute( value, plural, options, writer );
}
else {
// could not handle model part, throw exception
throw new UnsupportedOperationException(
"Support for model part type not yet implemented: "
+ (modelPart != null ? modelPart.getClass().getName() : "null")
);
}
}
private void serializePluralAttribute(
Object value,
PluralAttributeMapping plural,
WrapperOptions options,
JsonDocumentWriter writer) throws IOException {
if ( handleNullOrLazy( value, writer ) ) {
// nothing left to do
return;
}
final CollectionPart element = plural.getElementDescriptor();
final CollectionSemantics<?, ?> collectionSemantics = plural.getMappedType().getCollectionSemantics();
switch ( collectionSemantics.getCollectionClassification() ) {
case MAP:
case SORTED_MAP:
case ORDERED_MAP:
serializePersistentMap(
(PersistentMap<?, ?>) value,
plural.getIndexDescriptor(),
element,
options,
writer
);
break;
default:
serializePersistentCollection(
(PersistentCollection<?>) value,
plural.getCollectionDescriptor(),
element,
options,
writer
);
}
}
/**
* Serializes a persistent map to JSON [{key: ..., value: ...}, ...]
*/
private <K, E> void serializePersistentMap(
PersistentMap<K, E> map,
CollectionPart key,
CollectionPart value,
WrapperOptions options,
JsonDocumentWriter writer) throws IOException {
writer.startArray();
for ( final Map.Entry<K, E> entry : map.entrySet() ) {
writer.startObject();
writer.objectKey( "key" );
serializeCollectionPart( entry.getKey(), key, options, writer );
writer.objectKey( "value" );
serializeCollectionPart( entry.getValue(), value, options, writer );
writer.endObject();
}
writer.endArray();
}
/**
* Serializes a persistent collection to a JSON array
*/
private <E> void serializePersistentCollection(
PersistentCollection<E> collection,
CollectionPersister persister,
CollectionPart element,
WrapperOptions options,
JsonDocumentWriter appender) throws IOException {
appender.startArray();
final Iterator<?> entries = collection.entries( persister );
while ( entries.hasNext() ) {
serializeCollectionPart( entries.next(), element, options, appender );
}
appender.endArray();
}
private void serializeCollectionPart(
Object value,
CollectionPart collectionPart,
WrapperOptions options,
JsonDocumentWriter appender) throws IOException {
if ( collectionPart instanceof BasicValuedCollectionPart basic ) {
appender.serializeJsonValue( value, basic.getJavaType(), basic.getJdbcMapping().getJdbcType(), options );
}
else {
visit( collectionPart.getMappedType(), value, options, appender );
}
}
}
| DescriptiveJsonGeneratingVisitor |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/sqm/domain/NestedInjectedLookupListItem.java | {
"start": 185,
"end": 464
} | class ____ extends InjectedLookupListItem implements NestedLookupListItem {
private LookupListItem nested;
public void setNested(LookupListItem nested) {
this.nested = nested;
}
@Override
public LookupListItem getNested() {
return nested;
}
}
| NestedInjectedLookupListItem |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java | {
"start": 2967,
"end": 25271
} | class ____ extends Engine {
public static final String FIELD_RANGE_SEARCH_SOURCE = "field_range";
/**
* Reader attributes used for read only engines. These attributes prevent loading term dictionaries on-heap even if the field is an
* ID field.
*/
private final SegmentInfos lastCommittedSegmentInfos;
private final SeqNoStats seqNoStats;
private final ElasticsearchReaderManager readerManager;
private final IndexCommit indexCommit;
private final Lock indexWriterLock;
private final SafeCommitInfo safeCommitInfo;
private final CompletionStatsCache completionStatsCache;
private final boolean requireCompleteHistory;
final boolean lazilyLoadSoftDeletes;
protected volatile TranslogStats translogStats;
private final String commitId;
/**
* Creates a new ReadOnlyEngine. This ctor can also be used to open a read-only engine on top of an already opened
* read-write engine. It allows to optionally obtain the writer locks for the shard which would time-out if another
* engine is still open.
*
* @param config the engine configuration
* @param seqNoStats sequence number statistics for this engine or null if not provided
* @param translogStats translog stats for this engine or null if not provided
* @param obtainLock if <code>true</code> this engine will try to obtain the {@link IndexWriter#WRITE_LOCK_NAME} lock. Otherwise
* the lock won't be obtained
* @param readerWrapperFunction allows to wrap the index-reader for this engine.
* @param requireCompleteHistory indicates whether this engine permits an incomplete history (i.e. LCP < MSN)
* @param lazilyLoadSoftDeletes indicates whether this engine should load the soft-delete based liveDocs eagerly, or on first access
*/
@SuppressWarnings("this-escape")
public ReadOnlyEngine(
EngineConfig config,
@Nullable SeqNoStats seqNoStats,
@Nullable TranslogStats translogStats,
boolean obtainLock,
Function<DirectoryReader, DirectoryReader> readerWrapperFunction,
boolean requireCompleteHistory,
boolean lazilyLoadSoftDeletes
) {
super(config);
this.requireCompleteHistory = requireCompleteHistory;
try {
Store store = config.getStore();
store.incRef();
ElasticsearchDirectoryReader reader = null;
Directory directory = store.directory();
Lock indexWriterLock = null;
boolean success = false;
try {
// we obtain the IW lock even though we never modify the index.
// yet this makes sure nobody else does. including some testing tools that try to be messy
indexWriterLock = obtainLock ? directory.obtainLock(IndexWriter.WRITE_LOCK_NAME) : null;
this.lastCommittedSegmentInfos = Lucene.readSegmentInfos(directory);
this.commitId = generateSearcherId(lastCommittedSegmentInfos);
if (seqNoStats == null) {
seqNoStats = buildSeqNoStats(config, lastCommittedSegmentInfos);
ensureMaxSeqNoEqualsToGlobalCheckpoint(seqNoStats);
}
this.seqNoStats = seqNoStats;
this.indexCommit = Lucene.getIndexCommit(lastCommittedSegmentInfos, directory);
this.lazilyLoadSoftDeletes = lazilyLoadSoftDeletes;
reader = wrapReader(open(indexCommit), readerWrapperFunction, null);
readerManager = new ElasticsearchReaderManager(reader);
assert translogStats != null || obtainLock : "mutiple translogs instances should not be opened at the same time";
this.translogStats = translogStats != null ? translogStats : translogStats(config, lastCommittedSegmentInfos);
this.indexWriterLock = indexWriterLock;
this.safeCommitInfo = new SafeCommitInfo(seqNoStats.getLocalCheckpoint(), lastCommittedSegmentInfos.totalMaxDoc());
completionStatsCache = new CompletionStatsCache(() -> acquireSearcher("completion_stats"));
// no need to register a refresh listener to invalidate completionStatsCache since this engine is readonly
success = true;
} finally {
if (success == false) {
IOUtils.close(reader, indexWriterLock, store::decRef);
}
}
} catch (IOException e) {
throw new UncheckedIOException(e); // this is stupid
}
}
/**
* Generate a searcher id using the ids of the underlying segments of an index commit. Here we can't use the commit id directly
* as the search id because the commit id changes whenever IndexWriter#commit is called although the segment files stay unchanged.
* Any recovery except the local recovery performs IndexWriter#commit to generate a new translog uuid or history_uuid.
*/
static String generateSearcherId(SegmentInfos sis) {
final MessageDigest md = MessageDigests.sha256();
for (SegmentCommitInfo si : sis) {
final byte[] segmentId = si.getId();
if (segmentId != null) {
md.update(segmentId);
} else {
// old segments do not have segment ids
return null;
}
}
return MessageDigests.toHexString(md.digest());
}
protected void ensureMaxSeqNoEqualsToGlobalCheckpoint(final SeqNoStats seqNoStats) {
if (requireCompleteHistory == false) {
return;
}
// Before 8.0 the global checkpoint is not known and up to date when the engine is created after
// peer recovery, so we only check the max seq no / global checkpoint coherency when the global
// checkpoint is different from the unassigned sequence number value.
// In addition to that we only execute the check if the index the engine belongs to has been
// created after the refactoring of the Close Index API and its TransportVerifyShardBeforeCloseAction
// that guarantee that all operations have been flushed to Lucene.
IndexVersion indexVersionCreated = engineConfig.getIndexSettings().getIndexVersionCreated();
if (indexVersionCreated.onOrAfter(IndexVersions.V_7_2_0)
|| (seqNoStats.getGlobalCheckpoint() != SequenceNumbers.UNASSIGNED_SEQ_NO)) {
assert assertMaxSeqNoEqualsToGlobalCheckpoint(seqNoStats.getMaxSeqNo(), seqNoStats.getGlobalCheckpoint());
if (seqNoStats.getMaxSeqNo() != seqNoStats.getGlobalCheckpoint()) {
throw new IllegalStateException(
"Maximum sequence number ["
+ seqNoStats.getMaxSeqNo()
+ "] from last commit does not match global checkpoint ["
+ seqNoStats.getGlobalCheckpoint()
+ "]"
);
}
}
}
protected boolean assertMaxSeqNoEqualsToGlobalCheckpoint(final long maxSeqNo, final long globalCheckpoint) {
assert maxSeqNo == globalCheckpoint : "max seq. no. [" + maxSeqNo + "] does not match [" + globalCheckpoint + "]";
return true;
}
@Override
public void verifyEngineBeforeIndexClosing() throws IllegalStateException {
// the value of the global checkpoint is verified when the read-only engine is opened,
// and it is not expected to change during the lifecycle of the engine. We could also
// check this value before closing the read-only engine but if something went wrong
// and the global checkpoint is not in-sync with the max. sequence number anymore,
// checking the value here again would prevent the read-only engine to be closed and
// reopened as an internal engine, which would be the path to fix the issue.
}
protected final ElasticsearchDirectoryReader wrapReader(
DirectoryReader reader,
Function<DirectoryReader, DirectoryReader> readerWrapperFunction,
@Nullable ESCacheHelper esCacheHelper
) throws IOException {
reader = readerWrapperFunction.apply(reader);
return ElasticsearchDirectoryReader.wrap(reader, engineConfig.getShardId(), esCacheHelper);
}
protected DirectoryReader open(IndexCommit commit) throws IOException {
assert Transports.assertNotTransportThread("opening index commit of a read-only engine");
DirectoryReader directoryReader = DirectoryReader.open(
commit,
IndexVersions.MINIMUM_READONLY_COMPATIBLE.luceneVersion().major,
engineConfig.getLeafSorter()
);
if (lazilyLoadSoftDeletes) {
return new LazySoftDeletesDirectoryReaderWrapper(directoryReader, Lucene.SOFT_DELETES_FIELD);
} else {
return new SoftDeletesDirectoryReaderWrapper(directoryReader, Lucene.SOFT_DELETES_FIELD);
}
}
@Override
protected void closeNoLock(String reason, CountDownLatch closedLatch) {
if (isClosed.compareAndSet(false, true)) {
try {
IOUtils.close(readerManager, indexWriterLock, store::decRef);
} catch (Exception ex) {
logger.warn("failed to close reader", ex);
} finally {
closedLatch.countDown();
}
}
}
private static TranslogStats translogStats(final EngineConfig config, final SegmentInfos infos) throws IOException {
assert config.getTranslogConfig().hasTranslog();
final String translogUuid = infos.getUserData().get(Translog.TRANSLOG_UUID_KEY);
if (translogUuid == null) {
throw new IllegalStateException("commit doesn't contain translog unique id");
}
final TranslogConfig translogConfig = config.getTranslogConfig();
final TranslogDeletionPolicy translogDeletionPolicy = new TranslogDeletionPolicy();
final long localCheckpoint = Long.parseLong(infos.getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY));
translogDeletionPolicy.setLocalCheckpointOfSafeCommit(localCheckpoint);
try (
Translog translog = new Translog(
translogConfig,
translogUuid,
translogDeletionPolicy,
config.getGlobalCheckpointSupplier(),
config.getPrimaryTermSupplier(),
seqNo -> {},
TranslogOperationAsserter.DEFAULT
)
) {
return translog.stats();
}
}
@Override
public GetResult get(
Get get,
MappingLookup mappingLookup,
DocumentParser documentParser,
Function<Searcher, Searcher> searcherWrapper
) {
return getFromSearcher(get, acquireSearcher("get", SearcherScope.EXTERNAL, searcherWrapper), false);
}
@Override
protected ReferenceManager<ElasticsearchDirectoryReader> getReferenceManager(SearcherScope scope) {
return readerManager;
}
@Override
public SegmentInfos getLastCommittedSegmentInfos() {
return lastCommittedSegmentInfos;
}
@Override
public String getHistoryUUID() {
return lastCommittedSegmentInfos.userData.get(Engine.HISTORY_UUID_KEY);
}
@Override
public long getWritingBytes() {
return 0;
}
@Override
public long getIndexThrottleTimeInMillis() {
return 0;
}
@Override
public boolean isThrottled() {
return false;
}
@Override
public IndexResult index(Index index) {
assert false : "this should not be called";
throw new UnsupportedOperationException("indexing is not supported on a read-only engine");
}
@Override
public DeleteResult delete(Delete delete) {
assert false : "this should not be called";
throw new UnsupportedOperationException("deletes are not supported on a read-only engine");
}
@Override
public NoOpResult noOp(NoOp noOp) {
assert false : "this should not be called";
throw new UnsupportedOperationException("no-ops are not supported on a read-only engine");
}
@Override
public boolean isTranslogSyncNeeded() {
return false;
}
@Override
public void asyncEnsureTranslogSynced(Translog.Location location, Consumer<Exception> listener) {
listener.accept(null);
}
@Override
public void asyncEnsureGlobalCheckpointSynced(long globalCheckpoint, Consumer<Exception> listener) {
listener.accept(null);
}
@Override
public void syncTranslog() {}
@Override
public Closeable acquireHistoryRetentionLock() {
return () -> {};
}
@Override
public int countChanges(String source, long fromSeqNo, long toSeqNo) throws IOException {
try (Translog.Snapshot snapshot = newChangesSnapshot(source, fromSeqNo, toSeqNo, false, true, true, -1)) {
return snapshot.totalOperations();
}
}
@Override
public Translog.Snapshot newChangesSnapshot(
String source,
long fromSeqNo,
long toSeqNo,
boolean requiredFullRange,
boolean singleConsumer,
boolean accessStats,
long maxChunkSize
) {
return Translog.Snapshot.EMPTY;
}
@Override
public boolean hasCompleteOperationHistory(String reason, long startingSeqNo) {
// we can do operation-based recovery if we don't have to replay any operation.
return startingSeqNo > seqNoStats.getMaxSeqNo();
}
@Override
public long getMinRetainedSeqNo() {
throw new UnsupportedOperationException();
}
@Override
public TranslogStats getTranslogStats() {
return translogStats;
}
@Override
public Translog.Location getTranslogLastWriteLocation() {
return new Translog.Location(0, 0, 0);
}
@Override
public long getMaxSeqNo() {
return seqNoStats.getMaxSeqNo();
}
@Override
public long getProcessedLocalCheckpoint() {
return seqNoStats.getLocalCheckpoint();
}
@Override
public long getPersistedLocalCheckpoint() {
return seqNoStats.getLocalCheckpoint();
}
@Override
public SeqNoStats getSeqNoStats(long globalCheckpoint) {
return new SeqNoStats(seqNoStats.getMaxSeqNo(), seqNoStats.getLocalCheckpoint(), globalCheckpoint);
}
@Override
public long getLastSyncedGlobalCheckpoint() {
return seqNoStats.getGlobalCheckpoint();
}
@Override
public long getIndexBufferRAMBytesUsed() {
return 0;
}
@Override
public List<Segment> segments() {
return Arrays.asList(getSegmentInfo(lastCommittedSegmentInfos));
}
@Override
public List<Segment> segments(boolean includeVectorFormatsInfo) {
return Arrays.asList(getSegmentInfo(lastCommittedSegmentInfos, includeVectorFormatsInfo));
}
@Override
public RefreshResult refresh(String source) {
// we could allow refreshes if we want down the road the reader manager will then reflect changes to a rw-engine
// opened side-by-side
return RefreshResult.NO_REFRESH;
}
@Override
public void maybeRefresh(String source, ActionListener<RefreshResult> listener) throws EngineException {
ActionListener.completeWith(listener, () -> refresh(source));
}
@Override
public void writeIndexingBuffer() {}
@Override
public boolean shouldPeriodicallyFlush() {
return false;
}
@Override
protected void flushHoldingLock(boolean force, boolean waitIfOngoing, ActionListener<FlushResult> listener) throws EngineException {
listener.onResponse(new FlushResult(false, lastCommittedSegmentInfos.getGeneration()));
}
@Override
public void forceMerge(boolean flush, int maxNumSegments, boolean onlyExpungeDeletes, String forceMergeUUID) {
if (maxNumSegments == ForceMergeRequest.Defaults.MAX_NUM_SEGMENTS) {
// noop
} else if (maxNumSegments < lastCommittedSegmentInfos.size()) {
throw new UnsupportedOperationException(
"force merge is not supported on a read-only engine, "
+ "target max number of segments["
+ maxNumSegments
+ "], "
+ "current number of segments["
+ lastCommittedSegmentInfos.size()
+ "]."
);
} else {
logger.debug(
"current number of segments[{}] is not greater than target max number of segments[{}].",
lastCommittedSegmentInfos.size(),
maxNumSegments
);
}
}
@Override
public IndexCommitRef acquireLastIndexCommit(boolean flushFirst) {
store.incRef();
return new IndexCommitRef(indexCommit, store::decRef);
}
@Override
public IndexCommitRef acquireSafeIndexCommit() {
return acquireLastIndexCommit(false);
}
@Override
public SafeCommitInfo getSafeCommitInfo() {
return safeCommitInfo;
}
@Override
public void activateThrottling() {}
@Override
public void deactivateThrottling() {}
@Override
public void suspendThrottling() {}
@Override
public void resumeThrottling() {}
@Override
public void trimUnreferencedTranslogFiles() {}
@Override
public boolean shouldRollTranslogGeneration() {
return false;
}
@Override
public void rollTranslogGeneration() {}
@Override
public int restoreLocalHistoryFromTranslog(TranslogRecoveryRunner translogRecoveryRunner) {
return 0;
}
@Override
public int fillSeqNoGaps(long primaryTerm) {
return 0;
}
@Override
public void recoverFromTranslog(
final TranslogRecoveryRunner translogRecoveryRunner,
final long recoverUpToSeqNo,
ActionListener<Void> listener
) {
ActionListener.runWithResource(listener, this::acquireEnsureOpenRef, (l, ignoredRef) -> {
try {
translogRecoveryRunner.run(this, Translog.Snapshot.EMPTY);
} catch (final Exception e) {
throw new EngineException(shardId, "failed to recover from empty translog snapshot", e);
}
l.onResponse(null);
});
}
@Override
public void skipTranslogRecovery() {}
@Override
public void trimOperationsFromTranslog(long belowTerm, long aboveSeqNo) {}
@Override
public void maybePruneDeletes() {}
@Override
public void updateMaxUnsafeAutoIdTimestamp(long newTimestamp) {
}
@Override
public boolean refreshNeeded() {
return false;
}
@Override
public long getMaxSeqNoOfUpdatesOrDeletes() {
return seqNoStats.getMaxSeqNo();
}
@Override
public void advanceMaxSeqNoOfUpdatesOrDeletes(long maxSeqNoOfUpdatesOnPrimary) {
assert maxSeqNoOfUpdatesOnPrimary <= getMaxSeqNoOfUpdatesOrDeletes()
: maxSeqNoOfUpdatesOnPrimary + ">" + getMaxSeqNoOfUpdatesOrDeletes();
}
protected DirectoryReader openDirectory(Directory directory) throws IOException {
assert Transports.assertNotTransportThread("opening directory reader of a read-only engine");
var commit = Lucene.getIndexCommit(Lucene.readSegmentInfos(directory), directory);
final DirectoryReader reader = DirectoryReader.open(commit, IndexVersions.MINIMUM_READONLY_COMPATIBLE.luceneVersion().major, null);
if (lazilyLoadSoftDeletes) {
return new LazySoftDeletesDirectoryReaderWrapper(reader, Lucene.SOFT_DELETES_FIELD);
} else {
return new SoftDeletesDirectoryReaderWrapper(reader, Lucene.SOFT_DELETES_FIELD);
}
}
@Override
public CompletionStats completionStats(String... fieldNamePatterns) {
return completionStatsCache.get(fieldNamePatterns);
}
/**
* @return a {@link ShardLongFieldRange} containing the min and max raw values of the given field for this shard, or {@link
* ShardLongFieldRange#EMPTY} if this field is not found or empty.
*/
@Override
public ShardLongFieldRange getRawFieldRange(String field) throws IOException {
try (Searcher searcher = acquireSearcher(FIELD_RANGE_SEARCH_SOURCE)) {
final DirectoryReader directoryReader = searcher.getDirectoryReader();
final byte[] minPackedValue = PointValues.getMinPackedValue(directoryReader, field);
final byte[] maxPackedValue = PointValues.getMaxPackedValue(directoryReader, field);
if (minPackedValue != null && maxPackedValue != null) {
return ShardLongFieldRange.of(LongPoint.decodeDimension(minPackedValue, 0), LongPoint.decodeDimension(maxPackedValue, 0));
}
long minValue = DocValuesSkipper.globalMinValue(searcher, field);
long maxValue = DocValuesSkipper.globalMaxValue(searcher, field);
if (minValue == Long.MAX_VALUE && maxValue == Long.MIN_VALUE) {
// no skipper
return ShardLongFieldRange.EMPTY;
}
return ShardLongFieldRange.of(minValue, maxValue);
}
}
@Override
public SearcherSupplier acquireSearcherSupplier(
Function<Searcher, Searcher> wrapper,
SearcherScope scope,
SplitShardCountSummary splitShardCountSummary
) throws EngineException {
final SearcherSupplier delegate = super.acquireSearcherSupplier(wrapper, scope, splitShardCountSummary);
return new SearcherSupplier(wrapper) {
@Override
protected void doClose() {
delegate.close();
}
@Override
protected Searcher acquireSearcherInternal(String source) {
return delegate.acquireSearcherInternal(source);
}
@Override
public String getSearcherId() {
return commitId;
}
};
}
public final String getCommitId() {
return commitId;
}
}
| ReadOnlyEngine |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/conditional/basic/ErroneousSourceParameterConditionalWithMappingTargetMapper.java | {
"start": 385,
"end": 724
} | interface ____ {
BasicEmployee map(BasicEmployeeDto employee);
@Condition(appliesTo = ConditionStrategy.SOURCE_PARAMETERS)
default boolean isNotBlank(String value, @MappingTarget BasicEmployee employee) {
return value != null && !value.trim().isEmpty();
}
}
| ErroneousSourceParameterConditionalWithMappingTargetMapper |
java | spring-projects__spring-boot | build-plugin/spring-boot-maven-plugin/src/intTest/java/org/springframework/boot/maven/JarIntegrationTests.java | {
"start": 1495,
"end": 24387
} | class ____ extends AbstractArchiveIntegrationTests {
@Override
protected String getLayersIndexLocation() {
return "BOOT-INF/layers.idx";
}
@TestTemplate
void whenJarIsRepackagedInPlaceOnlyRepackagedJarIsInstalled(MavenBuild mavenBuild) {
mavenBuild.project("jar").goals("install").execute((project) -> {
File original = new File(project, "target/jar-0.0.1.BUILD-SNAPSHOT.jar.original");
assertThat(original).isFile();
File repackaged = new File(project, "target/jar-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(jar(repackaged)).manifest((manifest) -> {
manifest.hasMainClass("org.springframework.boot.loader.launch.JarLauncher");
manifest.hasStartClass("some.random.Main");
manifest.hasAttribute("Not-Used", "Foo");
})
.hasEntryWithNameStartingWith("BOOT-INF/lib/spring-context")
.hasEntryWithNameStartingWith("BOOT-INF/lib/spring-core")
.hasEntryWithNameStartingWith("BOOT-INF/lib/commons-logging")
.hasEntryWithNameStartingWith("BOOT-INF/lib/jakarta.servlet-api-6")
.hasEntryWithName("BOOT-INF/classes/org/test/SampleApplication.class")
.hasEntryWithName("org/springframework/boot/loader/launch/JarLauncher.class");
assertThat(buildLog(project))
.contains("Replacing main artifact " + repackaged + " with repackaged archive,")
.contains("The original artifact has been renamed to " + original)
.contains("Installing " + repackaged + " to")
.doesNotContain("Installing " + original + " to");
});
}
@TestTemplate
void whenAttachIsDisabledOnlyTheOriginalJarIsInstalled(MavenBuild mavenBuild) {
mavenBuild.project("jar-attach-disabled").goals("install").execute((project) -> {
File original = new File(project, "target/jar-attach-disabled-0.0.1.BUILD-SNAPSHOT.jar.original");
assertThat(original).isFile();
File main = new File(project, "target/jar-attach-disabled-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(main).isFile();
assertThat(buildLog(project)).contains("Updating main artifact " + main + " to " + original)
.contains("Installing " + original + " to")
.doesNotContain("Installing " + main + " to");
});
}
@TestTemplate
void whenAClassifierIsConfiguredTheRepackagedJarHasAClassifierAndBothItAndTheOriginalAreInstalled(
MavenBuild mavenBuild) {
mavenBuild.project("jar-classifier-main").goals("install").execute((project) -> {
assertThat(new File(project, "target/jar-classifier-main-0.0.1.BUILD-SNAPSHOT.jar.original"))
.doesNotExist();
File main = new File(project, "target/jar-classifier-main-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(main).isFile();
File repackaged = new File(project, "target/jar-classifier-main-0.0.1.BUILD-SNAPSHOT-test.jar");
assertThat(jar(repackaged)).hasEntryWithNameStartingWith("BOOT-INF/classes/");
assertThat(buildLog(project))
.contains("Attaching repackaged archive " + repackaged + " with classifier test")
.doesNotContain("Creating repackaged archive " + repackaged + " with classifier test")
.contains("Installing " + main + " to")
.contains("Installing " + repackaged + " to");
});
}
@TestTemplate
void whenBothJarsHaveTheSameClassifierRepackagingIsDoneInPlaceAndOnlyRepackagedJarIsInstalled(
MavenBuild mavenBuild) {
mavenBuild.project("jar-classifier-source").goals("install").execute((project) -> {
File original = new File(project, "target/jar-classifier-source-0.0.1.BUILD-SNAPSHOT-test.jar.original");
assertThat(original).isFile();
File repackaged = new File(project, "target/jar-classifier-source-0.0.1.BUILD-SNAPSHOT-test.jar");
assertThat(jar(repackaged)).hasEntryWithNameStartingWith("BOOT-INF/classes/");
assertThat(buildLog(project))
.contains("Replacing artifact with classifier test " + repackaged + " with repackaged archive,")
.contains("The original artifact has been renamed to " + original)
.doesNotContain("Installing " + original + " to")
.contains("Installing " + repackaged + " to");
});
}
@TestTemplate
void whenBothJarsHaveTheSameClassifierAndAttachIsDisabledOnlyTheOriginalJarIsInstalled(MavenBuild mavenBuild) {
mavenBuild.project("jar-classifier-source-attach-disabled").goals("install").execute((project) -> {
File original = new File(project,
"target/jar-classifier-source-attach-disabled-0.0.1.BUILD-SNAPSHOT-test.jar.original");
assertThat(original).isFile();
File repackaged = new File(project,
"target/jar-classifier-source-attach-disabled-0.0.1.BUILD-SNAPSHOT-test.jar");
assertThat(jar(repackaged)).hasEntryWithNameStartingWith("BOOT-INF/classes/");
assertThat(buildLog(project))
.doesNotContain("Attaching repackaged archive " + repackaged + " with classifier test")
.contains("Updating artifact with classifier test " + repackaged + " to " + original)
.contains("Installing " + original + " to")
.doesNotContain("Installing " + repackaged + " to");
});
}
@TestTemplate
void whenAClassifierAndAnOutputDirectoryAreConfiguredTheRepackagedJarHasAClassifierAndIsWrittenToTheOutputDirectory(
MavenBuild mavenBuild) {
mavenBuild.project("jar-create-dir").goals("install").execute((project) -> {
File repackaged = new File(project, "target/foo/jar-create-dir-0.0.1.BUILD-SNAPSHOT-foo.jar");
assertThat(jar(repackaged)).hasEntryWithNameStartingWith("BOOT-INF/classes/");
assertThat(buildLog(project)).contains("Installing " + repackaged + " to");
});
}
@TestTemplate
void whenAnOutputDirectoryIsConfiguredTheRepackagedJarIsWrittenToIt(MavenBuild mavenBuild) {
mavenBuild.project("jar-custom-dir").goals("install").execute((project) -> {
File repackaged = new File(project, "target/foo/jar-custom-dir-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(jar(repackaged)).hasEntryWithNameStartingWith("BOOT-INF/classes/");
assertThat(buildLog(project)).contains("Installing " + repackaged + " to");
});
}
@TestTemplate
void whenAnEntryIsExcludedItDoesNotAppearInTheRepackagedJar(MavenBuild mavenBuild) {
mavenBuild.project("jar-exclude-entry").goals("install").execute((project) -> {
File repackaged = new File(project, "target/jar-exclude-entry-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(jar(repackaged)).hasEntryWithNameStartingWith("BOOT-INF/classes/")
.hasEntryWithNameStartingWith("BOOT-INF/lib/spring-context")
.hasEntryWithNameStartingWith("BOOT-INF/lib/spring-core")
.hasEntryWithNameStartingWith("BOOT-INF/lib/commons-logging")
.doesNotHaveEntryWithNameStartingWith("BOOT-INF/lib/servlet-api-");
});
}
@TestTemplate
void whenAnEntryIsOptionalByDefaultDoesNotAppearInTheRepackagedJar(MavenBuild mavenBuild) {
mavenBuild.project("jar-optional-default").goals("install").execute((project) -> {
File repackaged = new File(project, "target/jar-optional-default-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(jar(repackaged)).hasEntryWithNameStartingWith("BOOT-INF/classes/")
.hasEntryWithNameStartingWith("BOOT-INF/lib/spring-context")
.hasEntryWithNameStartingWith("BOOT-INF/lib/spring-core")
.doesNotHaveEntryWithNameStartingWith("BOOT-INF/lib/log4j-api-");
});
}
@TestTemplate
void whenAnEntryIsOptionalAndOptionalsIncludedAppearsInTheRepackagedJar(MavenBuild mavenBuild) {
mavenBuild.project("jar-optional-include").goals("install").execute((project) -> {
File repackaged = new File(project, "target/jar-optional-include-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(jar(repackaged)).hasEntryWithNameStartingWith("BOOT-INF/classes/")
.hasEntryWithNameStartingWith("BOOT-INF/lib/spring-context")
.hasEntryWithNameStartingWith("BOOT-INF/lib/spring-core")
.hasEntryWithNameStartingWith("BOOT-INF/lib/log4j-api-");
});
}
@TestTemplate
void whenAnEntryIsOptionalAndOptionalsExcludedDoesNotAppearInTheRepackagedJar(MavenBuild mavenBuild) {
mavenBuild.project("jar-optional-exclude").goals("install").execute((project) -> {
File repackaged = new File(project, "target/jar-optional-exclude-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(jar(repackaged)).hasEntryWithNameStartingWith("BOOT-INF/classes/")
.hasEntryWithNameStartingWith("BOOT-INF/lib/spring-context")
.hasEntryWithNameStartingWith("BOOT-INF/lib/spring-core")
.doesNotHaveEntryWithNameStartingWith("BOOT-INF/lib/log4j-api-");
});
}
@TestTemplate
void whenAnEntryIsExcludedWithPropertyItDoesNotAppearInTheRepackagedJar(MavenBuild mavenBuild) {
mavenBuild.project("jar")
.systemProperty("spring-boot.excludes", "jakarta.servlet:jakarta.servlet-api")
.goals("install")
.execute((project) -> {
File repackaged = new File(project, "target/jar-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(jar(repackaged)).hasEntryWithNameStartingWith("BOOT-INF/classes/")
.hasEntryWithNameStartingWith("BOOT-INF/lib/spring-context")
.hasEntryWithNameStartingWith("BOOT-INF/lib/spring-core")
.hasEntryWithNameStartingWith("BOOT-INF/lib/commons-logging")
.doesNotHaveEntryWithNameStartingWith("BOOT-INF/lib/jakarta.servlet-api-");
});
}
@TestTemplate
void whenAnEntryIsIncludedOnlyIncludedEntriesAppearInTheRepackagedJar(MavenBuild mavenBuild) {
mavenBuild.project("jar-include-entry").goals("install").execute((project) -> {
File repackaged = new File(project, "target/jar-include-entry-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(jar(repackaged)).hasEntryWithNameStartingWith("BOOT-INF/classes/")
.hasEntryWithNameStartingWith("BOOT-INF/lib/jakarta.servlet-api-")
.doesNotHaveEntryWithNameStartingWith("BOOT-INF/lib/spring-context")
.doesNotHaveEntryWithNameStartingWith("BOOT-INF/lib/spring-core")
.doesNotHaveEntryWithNameStartingWith("BOOT-INF/lib/commons-logging");
});
}
@TestTemplate
void whenAnIncludeIsSpecifiedAsAPropertyOnlyIncludedEntriesAppearInTheRepackagedJar(MavenBuild mavenBuild) {
mavenBuild.project("jar")
.systemProperty("spring-boot.includes", "jakarta.servlet:jakarta.servlet-api")
.goals("install")
.execute((project) -> {
File repackaged = new File(project, "target/jar-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(jar(repackaged)).hasEntryWithNameStartingWith("BOOT-INF/classes/")
.hasEntryWithNameStartingWith("BOOT-INF/lib/jakarta.servlet-api-")
.doesNotHaveEntryWithNameStartingWith("BOOT-INF/lib/spring-context")
.doesNotHaveEntryWithNameStartingWith("BOOT-INF/lib/spring-core")
.doesNotHaveEntryWithNameStartingWith("BOOT-INF/lib/commons-logging");
});
}
@TestTemplate
void whenAGroupIsExcludedNoEntriesInThatGroupAppearInTheRepackagedJar(MavenBuild mavenBuild) {
mavenBuild.project("jar-exclude-group").goals("install").execute((project) -> {
File repackaged = new File(project, "target/jar-exclude-group-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(jar(repackaged)).hasEntryWithNameStartingWith("BOOT-INF/classes/")
.hasEntryWithNameStartingWith("BOOT-INF/lib/spring-context")
.hasEntryWithNameStartingWith("BOOT-INF/lib/commons-logging")
.doesNotHaveEntryWithName("BOOT-INF/lib/log4j-api-");
});
}
@TestTemplate
void whenAJarIsBuiltWithLibrariesWithConflictingNamesTheyAreMadeUniqueUsingTheirGroupIds(MavenBuild mavenBuild) {
mavenBuild.project("jar-lib-name-conflict").execute((project) -> {
File repackaged = new File(project, "test-project/target/test-project-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(jar(repackaged)).hasEntryWithNameStartingWith("BOOT-INF/classes/")
.hasEntryWithName("BOOT-INF/lib/org.springframework.boot.maven.it-acme-lib-0.0.1.BUILD-SNAPSHOT.jar")
.hasEntryWithName(
"BOOT-INF/lib/org.springframework.boot.maven.it.another-acme-lib-0.0.1.BUILD-SNAPSHOT.jar");
});
}
@TestTemplate
void whenAProjectUsesPomPackagingRepackagingIsSkipped(MavenBuild mavenBuild) {
mavenBuild.project("jar-pom").execute((project) -> {
File target = new File(project, "target");
assertThat(target.listFiles()).containsExactly(new File(target, "build.log"));
});
}
@TestTemplate
void whenRepackagingIsSkippedTheJarIsNotRepackaged(MavenBuild mavenBuild) {
mavenBuild.project("jar-skip").execute((project) -> {
File main = new File(project, "target/jar-skip-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(jar(main)).doesNotHaveEntryWithNameStartingWith("org/springframework/boot");
assertThat(new File(project, "target/jar-skip-0.0.1.BUILD-SNAPSHOT.jar.original")).doesNotExist();
});
}
@TestTemplate
void whenADependencyHasSystemScopeAndInclusionOfSystemScopeDependenciesIsEnabledItIsIncludedInTheRepackagedJar(
MavenBuild mavenBuild) {
mavenBuild.project("jar-system-scope").execute((project) -> {
File main = new File(project, "target/jar-system-scope-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(jar(main)).hasEntryWithName("BOOT-INF/lib/sample-1.0.0.jar");
});
}
@TestTemplate
void whenADependencyHasSystemScopeItIsNotIncludedInTheRepackagedJar(MavenBuild mavenBuild) {
mavenBuild.project("jar-system-scope-default").execute((project) -> {
File main = new File(project, "target/jar-system-scope-default-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(jar(main)).doesNotHaveEntryWithName("BOOT-INF/lib/sample-1.0.0.jar");
});
}
@TestTemplate
void whenADependencyHasTestScopeItIsNotIncludedInTheRepackagedJar(MavenBuild mavenBuild) {
mavenBuild.project("jar-test-scope").execute((project) -> {
File main = new File(project, "target/jar-test-scope-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(jar(main)).doesNotHaveEntryWithNameStartingWith("BOOT-INF/lib/log4j")
.hasEntryWithNameStartingWith("BOOT-INF/lib/spring-");
});
}
@TestTemplate
void whenAProjectUsesKotlinItsModuleMetadataIsRepackagedIntoBootInfClasses(MavenBuild mavenBuild) {
mavenBuild.project("jar-with-kotlin-module").execute((project) -> {
File main = new File(project, "target/jar-with-kotlin-module-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(jar(main)).hasEntryWithName("BOOT-INF/classes/META-INF/jar-with-kotlin-module.kotlin_module");
});
}
@TestTemplate
void whenAProjectIsBuiltWithALayoutPropertyTheSpecifiedLayoutIsUsed(MavenBuild mavenBuild) {
mavenBuild.project("jar-with-layout-property")
.goals("package", "-Dspring-boot.repackage.layout=ZIP")
.execute((project) -> {
File main = new File(project, "target/jar-with-layout-property-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(jar(main)).manifest(
(manifest) -> manifest.hasMainClass("org.springframework.boot.loader.launch.PropertiesLauncher")
.hasStartClass("org.test.SampleApplication"));
assertThat(buildLog(project)).contains("Layout: ZIP");
});
}
@TestTemplate
void whenALayoutIsConfiguredTheSpecifiedLayoutIsUsed(MavenBuild mavenBuild) {
mavenBuild.project("jar-with-zip-layout").execute((project) -> {
File main = new File(project, "target/jar-with-zip-layout-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(jar(main)).manifest(
(manifest) -> manifest.hasMainClass("org.springframework.boot.loader.launch.PropertiesLauncher")
.hasStartClass("org.test.SampleApplication"));
assertThat(buildLog(project)).contains("Layout: ZIP");
});
}
@TestTemplate
void whenRequiresUnpackConfigurationIsProvidedItIsReflectedInTheRepackagedJar(MavenBuild mavenBuild) {
mavenBuild.project("jar-with-unpack").execute((project) -> {
File main = new File(project, "target/jar-with-unpack-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(jar(main)).hasUnpackEntryWithNameStartingWith("BOOT-INF/lib/spring-core-")
.hasEntryWithNameStartingWith("BOOT-INF/lib/spring-context-");
});
}
@TestTemplate
void whenJarIsRepackagedWithACustomLayoutTheJarUsesTheLayout(MavenBuild mavenBuild) {
mavenBuild.project("jar-custom-layout").execute((project) -> {
assertThat(jar(new File(project, "custom/target/custom-0.0.1.BUILD-SNAPSHOT.jar")))
.hasEntryWithName("custom");
assertThat(jar(new File(project, "default/target/default-0.0.1.BUILD-SNAPSHOT.jar")))
.hasEntryWithName("sample");
});
}
@TestTemplate
void repackagedJarContainsTheLayersIndexByDefault(MavenBuild mavenBuild) {
mavenBuild.project("jar-layered").execute((project) -> {
File repackaged = new File(project, "jar/target/jar-layered-0.0.1.BUILD-SNAPSHOT.jar");
LibraryCoordinates coordinates = JarModeLibrary.TOOLS.getCoordinates();
assertThat(coordinates).isNotNull();
assertThat(jar(repackaged)).hasEntryWithNameStartingWith("BOOT-INF/classes/")
.hasEntryWithNameStartingWith("BOOT-INF/lib/jar-release")
.hasEntryWithNameStartingWith("BOOT-INF/lib/jar-snapshot")
.hasEntryWithNameStartingWith("BOOT-INF/lib/" + coordinates.getArtifactId());
try (JarFile jarFile = new JarFile(repackaged)) {
Map<String, List<String>> layerIndex = readLayerIndex(jarFile);
assertThat(layerIndex.keySet()).containsExactly("dependencies", "spring-boot-loader",
"snapshot-dependencies", "application");
assertThat(layerIndex.get("application")).contains("BOOT-INF/lib/jar-release-0.0.1.RELEASE.jar",
"BOOT-INF/lib/jar-snapshot-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(layerIndex.get("dependencies"))
.anyMatch((dependency) -> dependency.startsWith("BOOT-INF/lib/log4j-api-"));
}
catch (IOException ex) {
// Ignore
}
});
}
@TestTemplate
void whenJarIsRepackagedWithTheLayersDisabledDoesNotContainLayersIndex(MavenBuild mavenBuild) {
mavenBuild.project("jar-layered-disabled").execute((project) -> {
File repackaged = new File(project, "jar/target/jar-layered-0.0.1.BUILD-SNAPSHOT.jar");
LibraryCoordinates coordinates = JarModeLibrary.TOOLS.getCoordinates();
assertThat(coordinates).isNotNull();
assertThat(jar(repackaged)).hasEntryWithNameStartingWith("BOOT-INF/classes/")
.hasEntryWithNameStartingWith("BOOT-INF/lib/jar-release")
.hasEntryWithNameStartingWith("BOOT-INF/lib/jar-snapshot")
.hasEntryWithNameStartingWith("BOOT-INF/lib/" + coordinates.getArtifactId())
.doesNotHaveEntryWithName("BOOT-INF/layers.idx");
});
}
@TestTemplate
void whenJarIsRepackagedWithToolsExclude(MavenBuild mavenBuild) {
mavenBuild.project("jar-no-tools").execute((project) -> {
File repackaged = new File(project, "jar/target/jar-no-tools-0.0.1.BUILD-SNAPSHOT.jar");
LibraryCoordinates coordinates = JarModeLibrary.TOOLS.getCoordinates();
assertThat(coordinates).isNotNull();
assertThat(jar(repackaged)).hasEntryWithNameStartingWith("BOOT-INF/classes/")
.hasEntryWithNameStartingWith("BOOT-INF/lib/jar-release")
.hasEntryWithNameStartingWith("BOOT-INF/lib/jar-snapshot")
.doesNotHaveEntryWithNameStartingWith("BOOT-INF/lib/" + coordinates.getArtifactId());
});
}
@TestTemplate
void whenJarIsRepackagedWithTheCustomLayers(MavenBuild mavenBuild) {
mavenBuild.project("jar-layered-custom").execute((project) -> {
File repackaged = new File(project, "jar/target/jar-layered-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(jar(repackaged)).hasEntryWithNameStartingWith("BOOT-INF/classes/")
.hasEntryWithNameStartingWith("BOOT-INF/lib/jar-release")
.hasEntryWithNameStartingWith("BOOT-INF/lib/jar-snapshot");
try (JarFile jarFile = new JarFile(repackaged)) {
Map<String, List<String>> layerIndex = readLayerIndex(jarFile);
assertThat(layerIndex.keySet()).containsExactly("my-dependencies-name", "snapshot-dependencies",
"configuration", "application");
assertThat(layerIndex.get("application"))
.contains("BOOT-INF/lib/jar-release-0.0.1.RELEASE.jar",
"BOOT-INF/lib/jar-snapshot-0.0.1.BUILD-SNAPSHOT.jar",
"BOOT-INF/lib/jar-classifier-0.0.1-bravo.jar")
.doesNotContain("BOOT-INF/lib/jar-classifier-0.0.1-alpha.jar");
}
});
}
@TestTemplate
void repackagedJarContainsClasspathIndex(MavenBuild mavenBuild) {
mavenBuild.project("jar").execute((project) -> {
File repackaged = new File(project, "target/jar-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(jar(repackaged))
.manifest((manifest) -> manifest.hasAttribute("Spring-Boot-Classpath-Index", "BOOT-INF/classpath.idx"));
assertThat(jar(repackaged)).hasEntryWithName("BOOT-INF/classpath.idx");
try (JarFile jarFile = new JarFile(repackaged)) {
List<String> index = readClasspathIndex(jarFile, "BOOT-INF/classpath.idx");
assertThat(index).allMatch((entry) -> entry.startsWith("BOOT-INF/lib/"));
}
});
}
@TestTemplate
void whenJarIsRepackagedWithOutputTimestampConfiguredThenJarIsReproducible(MavenBuild mavenBuild)
throws InterruptedException {
String firstHash = buildJarWithOutputTimestamp(mavenBuild);
Thread.sleep(1500);
String secondHash = buildJarWithOutputTimestamp(mavenBuild);
assertThat(firstHash).isEqualTo(secondHash);
}
private String buildJarWithOutputTimestamp(MavenBuild mavenBuild) {
AtomicReference<String> jarHash = new AtomicReference<>();
mavenBuild.project("jar-output-timestamp").execute((project) -> {
File repackaged = new File(project, "target/jar-output-timestamp-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(repackaged).isFile();
long expectedModified = 1584352800000L;
long offsetExpectedModified = expectedModified - TimeZone.getDefault().getOffset(expectedModified);
assertThat(repackaged.lastModified()).isEqualTo(expectedModified);
try (JarFile jar = new JarFile(repackaged)) {
List<String> unreproducibleEntries = jar.stream()
.filter((entry) -> entry.getLastModifiedTime().toMillis() != offsetExpectedModified)
.map((entry) -> entry.getName() + ": " + entry.getLastModifiedTime())
.toList();
assertThat(unreproducibleEntries).isEmpty();
jarHash.set(FileUtils.sha1Hash(repackaged));
FileSystemUtils.deleteRecursively(project);
}
catch (IOException ex) {
throw new RuntimeException(ex);
}
});
String hash = jarHash.get();
assertThat(hash).isNotNull();
return hash;
}
@TestTemplate
void whenJarIsRepackagedWithOutputTimestampConfiguredThenLibrariesAreSorted(MavenBuild mavenBuild) {
mavenBuild.project("jar-output-timestamp").execute((project) -> {
File repackaged = new File(project, "target/jar-output-timestamp-0.0.1.BUILD-SNAPSHOT.jar");
LibraryCoordinates coordinates = JarModeLibrary.TOOLS.getCoordinates();
assertThat(coordinates).isNotNull();
List<String> sortedLibs = Arrays.asList("BOOT-INF/lib/commons-logging", "BOOT-INF/lib/jakarta.servlet-api",
"BOOT-INF/lib/jspecify", "BOOT-INF/lib/micrometer-commons", "BOOT-INF/lib/micrometer-observation",
"BOOT-INF/lib/spring-aop", "BOOT-INF/lib/spring-beans",
"BOOT-INF/lib/" + coordinates.getArtifactId(), "BOOT-INF/lib/spring-context",
"BOOT-INF/lib/spring-core", "BOOT-INF/lib/spring-expression");
assertThat(jar(repackaged)).entryNamesInPath("BOOT-INF/lib/")
.zipSatisfy(sortedLibs,
(String jarLib, String expectedLib) -> assertThat(jarLib).startsWith(expectedLib));
});
}
@TestTemplate
void whenSigned(MavenBuild mavenBuild) {
mavenBuild.project("jar-signed").execute((project) -> {
File repackaged = new File(project, "target/jar-signed-0.0.1.BUILD-SNAPSHOT.jar");
assertThat(jar(repackaged)).hasEntryWithName("META-INF/BOOT.SF");
});
}
}
| JarIntegrationTests |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/requests/WriteShareGroupStateResponse.java | {
"start": 1280,
"end": 5263
} | class ____ extends AbstractResponse {
private final WriteShareGroupStateResponseData data;
public WriteShareGroupStateResponse(WriteShareGroupStateResponseData data) {
super(ApiKeys.WRITE_SHARE_GROUP_STATE);
this.data = data;
}
@Override
public WriteShareGroupStateResponseData data() {
return data;
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> counts = new HashMap<>();
data.results().forEach(
result -> result.partitions().forEach(
partitionResult -> updateErrorCounts(counts, Errors.forCode(partitionResult.errorCode()))
)
);
return counts;
}
@Override
public int throttleTimeMs() {
return DEFAULT_THROTTLE_TIME;
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
// No op
}
public static WriteShareGroupStateResponse parse(Readable readable, short version) {
return new WriteShareGroupStateResponse(
new WriteShareGroupStateResponseData(readable, version)
);
}
public static WriteShareGroupStateResponseData toResponseData(Uuid topicId, int partitionId) {
return new WriteShareGroupStateResponseData()
.setResults(List.of(
new WriteShareGroupStateResponseData.WriteStateResult()
.setTopicId(topicId)
.setPartitions(List.of(
new WriteShareGroupStateResponseData.PartitionResult()
.setPartition(partitionId)))));
}
public static WriteShareGroupStateResponseData toErrorResponseData(Uuid topicId, int partitionId, Errors error, String errorMessage) {
WriteShareGroupStateResponseData responseData = new WriteShareGroupStateResponseData();
responseData.setResults(List.of(new WriteShareGroupStateResponseData.WriteStateResult()
.setTopicId(topicId)
.setPartitions(List.of(new WriteShareGroupStateResponseData.PartitionResult()
.setPartition(partitionId)
.setErrorCode(error.code())
.setErrorMessage(errorMessage)))));
return responseData;
}
public static WriteShareGroupStateResponseData.PartitionResult toErrorResponsePartitionResult(int partitionId, Errors error, String errorMessage) {
return new WriteShareGroupStateResponseData.PartitionResult()
.setPartition(partitionId)
.setErrorCode(error.code())
.setErrorMessage(errorMessage);
}
public static WriteShareGroupStateResponseData.WriteStateResult toResponseWriteStateResult(Uuid topicId, List<WriteShareGroupStateResponseData.PartitionResult> partitionResults) {
return new WriteShareGroupStateResponseData.WriteStateResult()
.setTopicId(topicId)
.setPartitions(partitionResults);
}
public static WriteShareGroupStateResponseData.PartitionResult toResponsePartitionResult(int partitionId) {
return new WriteShareGroupStateResponseData.PartitionResult()
.setPartition(partitionId);
}
public static WriteShareGroupStateResponseData toGlobalErrorResponse(WriteShareGroupStateRequestData request, Errors error) {
List<WriteShareGroupStateResponseData.WriteStateResult> writeStateResults = new ArrayList<>();
request.topics().forEach(topicData -> {
List<WriteShareGroupStateResponseData.PartitionResult> partitionResults = new ArrayList<>();
topicData.partitions().forEach(partitionData -> partitionResults.add(
toErrorResponsePartitionResult(partitionData.partition(), error, error.message()))
);
writeStateResults.add(toResponseWriteStateResult(topicData.topicId(), partitionResults));
});
return new WriteShareGroupStateResponseData().setResults(writeStateResults);
}
}
| WriteShareGroupStateResponse |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/codec/vectors/diskbbq/next/ESNextDiskBBQVectorsReader.java | {
"start": 2325,
"end": 9672
} | class ____ extends IVFVectorsReader {
public ESNextDiskBBQVectorsReader(SegmentReadState state, GenericFlatVectorReaders.LoadFlatVectorsReader getFormatReader)
throws IOException {
super(state, getFormatReader);
}
CentroidIterator getPostingListPrefetchIterator(CentroidIterator centroidIterator, IndexInput postingListSlice) throws IOException {
return new CentroidIterator() {
CentroidOffsetAndLength nextOffsetAndLength = centroidIterator.hasNext()
? centroidIterator.nextPostingListOffsetAndLength()
: null;
{
// prefetch the first one
if (nextOffsetAndLength != null) {
prefetch(nextOffsetAndLength);
}
}
void prefetch(CentroidOffsetAndLength offsetAndLength) throws IOException {
postingListSlice.prefetch(offsetAndLength.offset(), offsetAndLength.length());
}
@Override
public boolean hasNext() {
return nextOffsetAndLength != null;
}
@Override
public CentroidOffsetAndLength nextPostingListOffsetAndLength() throws IOException {
CentroidOffsetAndLength offsetAndLength = nextOffsetAndLength;
if (centroidIterator.hasNext()) {
nextOffsetAndLength = centroidIterator.nextPostingListOffsetAndLength();
prefetch(nextOffsetAndLength);
} else {
nextOffsetAndLength = null; // indicate we reached the end
}
return offsetAndLength;
}
};
}
static long directWriterSizeOnDisk(long numValues, int bitsPerValue) {
// TODO: use method in https://github.com/apache/lucene/pull/15422 when/if merged.
long bytes = (numValues * bitsPerValue + Byte.SIZE - 1) / 8;
int paddingBitsNeeded;
if (bitsPerValue > Integer.SIZE) {
paddingBitsNeeded = Long.SIZE - bitsPerValue;
} else if (bitsPerValue > Short.SIZE) {
paddingBitsNeeded = Integer.SIZE - bitsPerValue;
} else if (bitsPerValue > Byte.SIZE) {
paddingBitsNeeded = Short.SIZE - bitsPerValue;
} else {
paddingBitsNeeded = 0;
}
final int paddingBytesNeeded = (paddingBitsNeeded + Byte.SIZE - 1) / Byte.SIZE;
return bytes + paddingBytesNeeded;
}
@Override
public CentroidIterator getCentroidIterator(
FieldInfo fieldInfo,
int numCentroids,
IndexInput centroids,
float[] targetQuery,
IndexInput postingListSlice,
AcceptDocs acceptDocs,
float approximateCost,
FloatVectorValues values,
float visitRatio
) throws IOException {
final FieldEntry fieldEntry = fields.get(fieldInfo.number);
float approximateDocsPerCentroid = approximateCost / numCentroids;
if (approximateDocsPerCentroid <= 1.25) {
// TODO: we need to make this call to build the iterator, otherwise accept docs breaks all together
approximateDocsPerCentroid = (float) acceptDocs.cost() / numCentroids;
}
final int bitsRequired = DirectWriter.bitsRequired(numCentroids);
final long sizeLookup = directWriterSizeOnDisk(values.size(), bitsRequired);
final long fp = centroids.getFilePointer();
final FixedBitSet acceptCentroids;
if (approximateDocsPerCentroid > 1.25 || numCentroids == 1) {
// only apply centroid filtering when we expect some / many centroids will not have
// any matching document.
acceptCentroids = null;
} else {
acceptCentroids = new FixedBitSet(numCentroids);
final KnnVectorValues.DocIndexIterator docIndexIterator = values.iterator();
final DocIdSetIterator iterator = ConjunctionUtils.intersectIterators(List.of(acceptDocs.iterator(), docIndexIterator));
final LongValues longValues = DirectReader.getInstance(centroids.randomAccessSlice(fp, sizeLookup), bitsRequired);
int doc = iterator.nextDoc();
for (; doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
acceptCentroids.set((int) longValues.get(docIndexIterator.index()));
}
}
final OptimizedScalarQuantizer scalarQuantizer = new OptimizedScalarQuantizer(fieldInfo.getVectorSimilarityFunction());
final int[] scratch = new int[targetQuery.length];
final OptimizedScalarQuantizer.QuantizationResult queryParams = scalarQuantizer.scalarQuantize(
targetQuery,
new float[targetQuery.length],
scratch,
(byte) 7,
fieldEntry.globalCentroid()
);
final byte[] quantized = new byte[targetQuery.length];
for (int i = 0; i < quantized.length; i++) {
quantized[i] = (byte) scratch[i];
}
final ES92Int7VectorsScorer scorer = ESVectorUtil.getES92Int7VectorsScorer(centroids, fieldInfo.getVectorDimension());
centroids.seek(fp + sizeLookup);
int numParents = centroids.readVInt();
CentroidIterator centroidIterator;
if (numParents > 0) {
// equivalent to (float) centroidsPerParentCluster / 2
float centroidOversampling = (float) fieldEntry.numCentroids() / (2 * numParents);
centroidIterator = getCentroidIteratorWithParents(
fieldInfo,
centroids,
numParents,
numCentroids,
scorer,
quantized,
queryParams,
fieldEntry.globalCentroidDp(),
visitRatio * centroidOversampling,
acceptCentroids
);
} else {
centroidIterator = getCentroidIteratorNoParent(
fieldInfo,
centroids,
numCentroids,
scorer,
quantized,
queryParams,
fieldEntry.globalCentroidDp(),
acceptCentroids
);
}
return getPostingListPrefetchIterator(centroidIterator, postingListSlice);
}
@Override
protected FieldEntry doReadField(
IndexInput input,
String rawVectorFormat,
boolean useDirectIOReads,
VectorSimilarityFunction similarityFunction,
VectorEncoding vectorEncoding,
int numCentroids,
long centroidOffset,
long centroidLength,
long postingListOffset,
long postingListLength,
float[] globalCentroid,
float globalCentroidDp
) throws IOException {
ESNextDiskBBQVectorsFormat.QuantEncoding quantEncoding = ESNextDiskBBQVectorsFormat.QuantEncoding.fromId(input.readInt());
return new NextFieldEntry(
rawVectorFormat,
useDirectIOReads,
similarityFunction,
vectorEncoding,
numCentroids,
centroidOffset,
centroidLength,
postingListOffset,
postingListLength,
globalCentroid,
globalCentroidDp,
quantEncoding
);
}
static | ESNextDiskBBQVectorsReader |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UnnecessaryLongToIntConversionTest.java | {
"start": 12835,
"end": 13287
} | class ____ {
void acceptsLong(long value) {}
void foo() {
Long x = Long.valueOf(1);
acceptsLong(x);
}
}
""")
.setFixChooser(FIRST)
.doTest(TEXT_MATCH);
}
@Test
public void suggestReplacingInstanceMethodWithConstrainToRange() {
refactoringHelper
.addInputLines(
"in/A.java",
"""
public | A |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_3667/Source.java | {
"start": 844,
"end": 1069
} | class ____ {
private final String value;
public NestedNested(String value) {
this.value = value;
}
public String getValue() {
return value;
}
}
}
| NestedNested |
java | google__dagger | javatests/dagger/internal/codegen/ScopingValidationTest.java | {
"start": 7150,
"end": 8068
} | class ____ {",
" @Provides @PerTest String string() { return \"a string\"; }", // incompatible scope
" @Provides long integer() { return 0L; }", // unscoped - valid
" @Provides @Singleton float floatingPoint() { return 0.0f; }", // same scope - valid
" @Provides @Per(MyComponent.class) boolean bool() { return false; }", // incompatible
"}");
CompilerTests.daggerCompiler(componentFile, scopeFile, scopeWithAttribute, typeFile, moduleFile)
.compile(
subject -> {
subject.hasErrorCount(1);
subject
.hasErrorContaining(
String.join(
"\n",
"MyComponent scoped with @Singleton may not reference bindings with "
+ "different scopes:",
" @PerTest | ScopedModule |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/SchedulerInvalidResourceRequestException.java | {
"start": 1282,
"end": 1731
} | class ____ extends YarnRuntimeException {
private static final long serialVersionUID = 10081123982L;
public SchedulerInvalidResourceRequestException(String message) {
super(message);
}
public SchedulerInvalidResourceRequestException(Throwable cause) {
super(cause);
}
public SchedulerInvalidResourceRequestException(String message,
Throwable cause) {
super(message, cause);
}
}
| SchedulerInvalidResourceRequestException |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/PagerUtilsTest_Limit_hive_0.java | {
"start": 151,
"end": 767
} | class ____ extends TestCase {
public void test_hive_0() throws Exception {
String result = PagerUtils.limit("SELECT * FROM test", DbType.hive, 0, 10);
System.out.println(result);
assertEquals("SELECT *\n" +
"FROM test\n" +
"LIMIT 10", result);
}
public void test_odps_0() throws Exception {
String result = PagerUtils.limit("SELECT * FROM test", DbType.odps, 0, 10);
System.out.println(result);
assertEquals("SELECT *\n" +
"FROM test\n" +
"LIMIT 10", result);
}
}
| PagerUtilsTest_Limit_hive_0 |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptReportPBImpl.java | {
"start": 1570,
"end": 9015
} | class ____ extends ApplicationAttemptReport {
ApplicationAttemptReportProto proto = ApplicationAttemptReportProto
.getDefaultInstance();
ApplicationAttemptReportProto.Builder builder = null;
boolean viaProto = false;
private ApplicationAttemptId ApplicationAttemptId;
private ContainerId amContainerId;
public ApplicationAttemptReportPBImpl() {
builder = ApplicationAttemptReportProto.newBuilder();
}
public ApplicationAttemptReportPBImpl(ApplicationAttemptReportProto proto) {
this.proto = proto;
viaProto = true;
}
@Override
public ApplicationAttemptId getApplicationAttemptId() {
if (this.ApplicationAttemptId != null) {
return this.ApplicationAttemptId;
}
ApplicationAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasApplicationAttemptId()) {
return null;
}
this.ApplicationAttemptId =
convertFromProtoFormat(p.getApplicationAttemptId());
return this.ApplicationAttemptId;
}
@Override
public String getHost() {
ApplicationAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasHost()) {
return null;
}
return p.getHost();
}
@Override
public int getRpcPort() {
ApplicationAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
return p.getRpcPort();
}
@Override
public String getTrackingUrl() {
ApplicationAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasTrackingUrl()) {
return null;
}
return p.getTrackingUrl();
}
@Override
public String getOriginalTrackingUrl() {
ApplicationAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasOriginalTrackingUrl()) {
return null;
}
return p.getOriginalTrackingUrl();
}
@Override
public String getDiagnostics() {
ApplicationAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasDiagnostics()) {
return null;
}
return p.getDiagnostics();
}
@Override
public YarnApplicationAttemptState getYarnApplicationAttemptState() {
ApplicationAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasYarnApplicationAttemptState()) {
return null;
}
return convertFromProtoFormat(p.getYarnApplicationAttemptState());
}
@Override
public void setYarnApplicationAttemptState(YarnApplicationAttemptState state) {
maybeInitBuilder();
if (state == null) {
builder.clearYarnApplicationAttemptState();
return;
}
builder.setYarnApplicationAttemptState(convertToProtoFormat(state));
}
private YarnApplicationAttemptStateProto convertToProtoFormat(
YarnApplicationAttemptState state) {
return ProtoUtils.convertToProtoFormat(state);
}
private YarnApplicationAttemptState convertFromProtoFormat(
YarnApplicationAttemptStateProto yarnApplicationAttemptState) {
return ProtoUtils.convertFromProtoFormat(yarnApplicationAttemptState);
}
@Override
public void
setApplicationAttemptId(ApplicationAttemptId applicationAttemptId) {
maybeInitBuilder();
if (applicationAttemptId == null)
builder.clearApplicationAttemptId();
this.ApplicationAttemptId = applicationAttemptId;
}
@Override
public void setHost(String host) {
maybeInitBuilder();
if (host == null) {
builder.clearHost();
return;
}
builder.setHost(host);
}
@Override
public void setRpcPort(int rpcPort) {
maybeInitBuilder();
builder.setRpcPort(rpcPort);
}
@Override
public void setTrackingUrl(String url) {
maybeInitBuilder();
if (url == null) {
builder.clearTrackingUrl();
return;
}
builder.setTrackingUrl(url);
}
@Override
public void setOriginalTrackingUrl(String oUrl) {
maybeInitBuilder();
if (oUrl == null) {
builder.clearOriginalTrackingUrl();
return;
}
builder.setOriginalTrackingUrl(oUrl);
}
@Override
public void setDiagnostics(String diagnostics) {
maybeInitBuilder();
if (diagnostics == null) {
builder.clearDiagnostics();
return;
}
builder.setDiagnostics(diagnostics);
}
public ApplicationAttemptReportProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = ApplicationAttemptReportProto.newBuilder(proto);
}
viaProto = false;
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void mergeLocalToBuilder() {
if (this.ApplicationAttemptId != null
&& !((ApplicationAttemptIdPBImpl) this.ApplicationAttemptId).getProto()
.equals(builder.getApplicationAttemptId())) {
builder
.setApplicationAttemptId(convertToProtoFormat(this.ApplicationAttemptId));
}
if (this.amContainerId != null
&& !((ContainerIdPBImpl) this.amContainerId).getProto().equals(
builder.getAmContainerId())) {
builder.setAmContainerId(convertToProtoFormat(this.amContainerId));
}
}
private ContainerIdProto convertToProtoFormat(ContainerId amContainerId) {
return ((ContainerIdPBImpl) amContainerId).getProto();
}
private ContainerIdPBImpl convertFromProtoFormat(
ContainerIdProto amContainerId) {
return new ContainerIdPBImpl(amContainerId);
}
private ApplicationAttemptIdProto
convertToProtoFormat(ApplicationAttemptId t) {
return ((ApplicationAttemptIdPBImpl) t).getProto();
}
private ApplicationAttemptIdPBImpl convertFromProtoFormat(
ApplicationAttemptIdProto applicationAttemptId) {
return new ApplicationAttemptIdPBImpl(applicationAttemptId);
}
@Override
public ContainerId getAMContainerId() {
if (this.amContainerId != null) {
return this.amContainerId;
}
ApplicationAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasAmContainerId()) {
return null;
}
this.amContainerId = convertFromProtoFormat(p.getAmContainerId());
return this.amContainerId;
}
@Override
public void setAMContainerId(ContainerId amContainerId) {
maybeInitBuilder();
if (amContainerId == null)
builder.clearAmContainerId();
this.amContainerId = amContainerId;
}
@Override
public void setStartTime(long startTime) {
maybeInitBuilder();
builder.setStartTime(startTime);
}
@Override
public void setFinishTime(long finishTime) {
maybeInitBuilder();
builder.setFinishTime(finishTime);
}
@Override
public long getStartTime() {
ApplicationAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
return p.getStartTime();
}
@Override
public long getFinishTime() {
ApplicationAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
return p.getFinishTime();
}
}
| ApplicationAttemptReportPBImpl |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/processor/SpringOnCompletionAndInterceptTest.java | {
"start": 1049,
"end": 1343
} | class ____ extends OnCompletionAndInterceptTest {
@Override
protected CamelContext createCamelContext() throws Exception {
return createSpringCamelContext(this, "org/apache/camel/spring/processor/SpringOnCompletionAndInterceptTest.xml");
}
}
| SpringOnCompletionAndInterceptTest |
java | google__guava | android/guava/src/com/google/common/primitives/Chars.java | {
"start": 15220,
"end": 21379
} | enum ____ implements Comparator<char[]> {
INSTANCE;
@Override
public int compare(char[] left, char[] right) {
int minLength = Math.min(left.length, right.length);
for (int i = 0; i < minLength; i++) {
int result = Character.compare(left[i], right[i]);
if (result != 0) {
return result;
}
}
return left.length - right.length;
}
@Override
public String toString() {
return "Chars.lexicographicalComparator()";
}
}
/**
* Copies a collection of {@code Character} instances into a new array of primitive {@code char}
* values.
*
* <p>Elements are copied from the argument collection as if by {@code collection.toArray()}.
* Calling this method is as thread-safe as calling that method.
*
* @param collection a collection of {@code Character} objects
* @return an array containing the same values as {@code collection}, in the same order, converted
* to primitives
* @throws NullPointerException if {@code collection} or any of its elements is null
*/
public static char[] toArray(Collection<Character> collection) {
if (collection instanceof CharArrayAsList) {
return ((CharArrayAsList) collection).toCharArray();
}
Object[] boxedArray = collection.toArray();
int len = boxedArray.length;
char[] array = new char[len];
for (int i = 0; i < len; i++) {
// checkNotNull for GWT (do not optimize)
array[i] = (Character) checkNotNull(boxedArray[i]);
}
return array;
}
/**
* Sorts the elements of {@code array} in descending order.
*
* @since 23.1
*/
public static void sortDescending(char[] array) {
checkNotNull(array);
sortDescending(array, 0, array.length);
}
/**
* Sorts the elements of {@code array} between {@code fromIndex} inclusive and {@code toIndex}
* exclusive in descending order.
*
* @since 23.1
*/
public static void sortDescending(char[] array, int fromIndex, int toIndex) {
checkNotNull(array);
checkPositionIndexes(fromIndex, toIndex, array.length);
Arrays.sort(array, fromIndex, toIndex);
reverse(array, fromIndex, toIndex);
}
/**
* Reverses the elements of {@code array}. This is equivalent to {@code
* Collections.reverse(Chars.asList(array))}, but is likely to be more efficient.
*
* @since 23.1
*/
public static void reverse(char[] array) {
checkNotNull(array);
reverse(array, 0, array.length);
}
/**
* Reverses the elements of {@code array} between {@code fromIndex} inclusive and {@code toIndex}
* exclusive. This is equivalent to {@code
* Collections.reverse(Chars.asList(array).subList(fromIndex, toIndex))}, but is likely to be more
* efficient.
*
* @throws IndexOutOfBoundsException if {@code fromIndex < 0}, {@code toIndex > array.length}, or
* {@code toIndex > fromIndex}
* @since 23.1
*/
public static void reverse(char[] array, int fromIndex, int toIndex) {
checkNotNull(array);
checkPositionIndexes(fromIndex, toIndex, array.length);
for (int i = fromIndex, j = toIndex - 1; i < j; i++, j--) {
char tmp = array[i];
array[i] = array[j];
array[j] = tmp;
}
}
/**
* Performs a right rotation of {@code array} of "distance" places, so that the first element is
* moved to index "distance", and the element at index {@code i} ends up at index {@code (distance
* + i) mod array.length}. This is equivalent to {@code Collections.rotate(Chars.asList(array),
* distance)}, but is considerably faster and avoids allocation and garbage collection.
*
* <p>The provided "distance" may be negative, which will rotate left.
*
* @since 32.0.0
*/
public static void rotate(char[] array, int distance) {
rotate(array, distance, 0, array.length);
}
/**
* Performs a right rotation of {@code array} between {@code fromIndex} inclusive and {@code
* toIndex} exclusive. This is equivalent to {@code
* Collections.rotate(Chars.asList(array).subList(fromIndex, toIndex), distance)}, but is
* considerably faster and avoids allocations and garbage collection.
*
* <p>The provided "distance" may be negative, which will rotate left.
*
* @throws IndexOutOfBoundsException if {@code fromIndex < 0}, {@code toIndex > array.length}, or
* {@code toIndex > fromIndex}
* @since 32.0.0
*/
public static void rotate(char[] array, int distance, int fromIndex, int toIndex) {
// See Ints.rotate for more details about possible algorithms here.
checkNotNull(array);
checkPositionIndexes(fromIndex, toIndex, array.length);
if (array.length <= 1) {
return;
}
int length = toIndex - fromIndex;
// Obtain m = (-distance mod length), a non-negative value less than "length". This is how many
// places left to rotate.
int m = -distance % length;
m = (m < 0) ? m + length : m;
// The current index of what will become the first element of the rotated section.
int newFirstIndex = m + fromIndex;
if (newFirstIndex == fromIndex) {
return;
}
reverse(array, fromIndex, newFirstIndex);
reverse(array, newFirstIndex, toIndex);
reverse(array, fromIndex, toIndex);
}
/**
* Returns a fixed-size list backed by the specified array, similar to {@link
* Arrays#asList(Object[])}. The list supports {@link List#set(int, Object)}, but any attempt to
* set a value to {@code null} will result in a {@link NullPointerException}.
*
* <p>The returned list maintains the values, but not the identities, of {@code Character} objects
* written to or read from it. For example, whether {@code list.get(0) == list.get(0)} is true for
* the returned list is unspecified.
*
* <p>The returned list is serializable.
*
* @param backingArray the array to back the list
* @return a list view of the array
*/
public static List<Character> asList(char... backingArray) {
if (backingArray.length == 0) {
return Collections.emptyList();
}
return new CharArrayAsList(backingArray);
}
private static final | LexicographicalComparator |
java | quarkusio__quarkus | extensions/funqy/funqy-server-common/runtime/src/main/java/io/quarkus/funqy/runtime/ParameterInjector.java | {
"start": 148,
"end": 574
} | class ____ {
public static ValueInjector createInjector(Type type, Class clz, Annotation[] annotations) {
if (annotations != null) {
for (Annotation ann : annotations) {
if (ann.annotationType().equals(Context.class)) {
return new ContextValueInjector(clz);
}
}
}
return new InputValueInjector(clz);
}
}
| ParameterInjector |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/util/LazyMemorySegmentPool.java | {
"start": 1277,
"end": 4247
} | class ____ implements MemorySegmentPool, Closeable {
private static final long PER_REQUEST_MEMORY_SIZE = 16 * 1024 * 1024;
private final Object owner;
private final MemoryManager memoryManager;
private final ArrayList<MemorySegment> cachePages;
private final int maxPages;
private final int perRequestPages;
private int pageUsage;
public LazyMemorySegmentPool(Object owner, MemoryManager memoryManager, int maxPages) {
this.owner = owner;
this.memoryManager = memoryManager;
this.cachePages = new ArrayList<>();
this.maxPages = maxPages;
this.pageUsage = 0;
this.perRequestPages =
Math.max(1, (int) (PER_REQUEST_MEMORY_SIZE / memoryManager.getPageSize()));
}
@Override
public int pageSize() {
return this.memoryManager.getPageSize();
}
@Override
public void returnAll(List<MemorySegment> memory) {
this.pageUsage -= memory.size();
if (this.pageUsage < 0) {
throw new RuntimeException("Return too more memories.");
}
this.cachePages.addAll(memory);
}
public void returnPage(MemorySegment segment) {
returnAll(Collections.singletonList(segment));
}
@Override
public MemorySegment nextSegment() {
int freePages = freePages();
if (freePages == 0) {
return null;
}
if (this.cachePages.isEmpty()) {
int numPages = Math.min(freePages, this.perRequestPages);
try {
this.memoryManager.allocatePages(owner, this.cachePages, numPages);
} catch (MemoryAllocationException e) {
throw new RuntimeException(e);
}
}
this.pageUsage++;
return this.cachePages.remove(this.cachePages.size() - 1);
}
public List<MemorySegment> allocateSegments(int required) {
int freePages = freePages();
if (freePages < required) {
return null;
}
List<MemorySegment> ret = new ArrayList<>(required);
for (int i = 0; i < required; i++) {
MemorySegment segment;
try {
segment = nextSegment();
Preconditions.checkNotNull(segment);
} catch (Throwable t) {
// unexpected, we should first return all temporary segments
returnAll(ret);
throw t;
}
ret.add(segment);
}
return ret;
}
@Override
public int freePages() {
return this.maxPages - this.pageUsage;
}
@Override
public void close() {
if (this.pageUsage != 0) {
throw new RuntimeException(
"Should return all used memory before clean, page used: " + pageUsage);
}
cleanCache();
}
public void cleanCache() {
this.memoryManager.release(this.cachePages);
}
}
| LazyMemorySegmentPool |
java | quarkusio__quarkus | extensions/smallrye-metrics/deployment/src/main/java/io/quarkus/smallrye/metrics/deployment/SmallRyeMetricsDotNames.java | {
"start": 939,
"end": 3194
} | class ____ {
// metric interfaces
public static final DotName METRIC_INTERFACE = DotName
.createSimple(org.eclipse.microprofile.metrics.Metric.class.getName());
public static final DotName GAUGE_INTERFACE = DotName
.createSimple(org.eclipse.microprofile.metrics.Gauge.class.getName());
public static final DotName COUNTER_INTERFACE = DotName
.createSimple(Counter.class.getName());
public static final DotName CONCURRENT_GAUGE_INTERFACE = DotName
.createSimple(org.eclipse.microprofile.metrics.ConcurrentGauge.class.getName());
public static final DotName METER_INTERFACE = DotName
.createSimple(Meter.class.getName());
public static final DotName SIMPLE_TIMER_INTERFACE = DotName
.createSimple(SimpleTimer.class.getName());
public static final DotName TIMER_INTERFACE = DotName
.createSimple(Timer.class.getName());
public static final DotName HISTOGRAM_INTERFACE = DotName
.createSimple(Histogram.class.getName());
// annotations
public static final DotName GAUGE = DotName.createSimple(Gauge.class.getName());
public static final DotName TIMED = DotName.createSimple(Timed.class.getName());
public static final DotName SIMPLY_TIMED = DotName.createSimple(SimplyTimed.class.getName());
public static final DotName METRIC = DotName.createSimple(Metric.class.getName());
public static final DotName COUNTED = DotName.createSimple(Counted.class.getName());
public static final DotName METERED = DotName.createSimple(Metered.class.getName());
public static final DotName METRICS_BINDING = DotName.createSimple(MetricsBinding.class.getName());
public static final DotName CONCURRENT_GAUGE = DotName.createSimple(ConcurrentGauge.class.getName());
public static final Set<DotName> METRICS_ANNOTATIONS = new HashSet<>(Arrays.asList(
GAUGE,
TIMED,
SIMPLY_TIMED,
COUNTED,
METERED,
CONCURRENT_GAUGE));
public static boolean isMetricAnnotation(AnnotationInstance instance) {
return METRICS_ANNOTATIONS.contains(instance.name());
}
// these are needed for determining whether a | SmallRyeMetricsDotNames |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java | {
"start": 2636,
"end": 7848
} | class ____ {
private static final Logger LOG = DataNode.LOG;
private StripedReader stripedReader;
private final DataNode datanode;
private final Configuration conf;
private final short index; // internal block index
private final ExtendedBlock block;
private final DatanodeInfo source;
private BlockReader blockReader;
private ByteBuffer buffer;
private boolean isLocal;
StripedBlockReader(StripedReader stripedReader, DataNode datanode,
Configuration conf, short index, ExtendedBlock block,
DatanodeInfo source, long offsetInBlock) {
this.stripedReader = stripedReader;
this.datanode = datanode;
this.conf = conf;
this.index = index;
this.source = source;
this.block = block;
this.isLocal = false;
BlockReader tmpBlockReader = createBlockReader(offsetInBlock);
if (tmpBlockReader != null) {
this.blockReader = tmpBlockReader;
}
}
ByteBuffer getReadBuffer() {
if (buffer == null) {
this.buffer = stripedReader.allocateReadBuffer();
}
return buffer;
}
void freeReadBuffer() {
DataNodeFaultInjector.get().interceptFreeBlockReaderBuffer();
buffer = null;
}
void resetBlockReader(long offsetInBlock) {
this.blockReader = createBlockReader(offsetInBlock);
}
private BlockReader createBlockReader(long offsetInBlock) {
if (offsetInBlock >= block.getNumBytes()) {
return null;
}
Peer peer = null;
try {
InetSocketAddress dnAddr =
stripedReader.getSocketAddress4Transfer(source);
Token<BlockTokenIdentifier> blockToken = datanode.getBlockAccessToken(
block, EnumSet.of(BlockTokenIdentifier.AccessMode.READ),
StorageType.EMPTY_ARRAY, new String[0]);
/*
* This can be further improved if the replica is local, then we can
* read directly from DN and need to check the replica is FINALIZED
* state, notice we should not use short-circuit local read which
* requires config for domain-socket in UNIX or legacy config in
* Windows. The network distance value isn't used for this scenario.
*
* TODO: add proper tracer
*/
peer = newConnectedPeer(block, dnAddr, blockToken, source);
if (peer.isLocal()) {
this.isLocal = true;
}
return BlockReaderRemote.newBlockReader(
"dummy", block, blockToken, offsetInBlock,
block.getNumBytes() - offsetInBlock, true, "", peer, source,
null, stripedReader.getCachingStrategy(), -1, conf);
} catch (IOException e) {
LOG.info("Exception while creating remote block reader for {}, datanode {}",
block, source, e);
IOUtils.closeStream(peer);
return null;
}
}
private Peer newConnectedPeer(ExtendedBlock b, InetSocketAddress addr,
Token<BlockTokenIdentifier> blockToken,
DatanodeID datanodeId)
throws IOException {
Peer peer = null;
boolean success = false;
Socket sock = null;
final int socketTimeout = datanode.getDnConf().getSocketTimeout();
try {
sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
NetUtils.connect(sock, addr, socketTimeout);
peer = DFSUtilClient.peerFromSocketAndKey(datanode.getSaslClient(),
sock, datanode.getDataEncryptionKeyFactoryForBlock(b),
blockToken, datanodeId, socketTimeout);
success = true;
return peer;
} finally {
if (!success) {
IOUtils.cleanupWithLogger(null, peer);
IOUtils.closeSocket(sock);
}
}
}
Callable<BlockReadStats> readFromBlock(final int length,
final CorruptedBlocks corruptedBlocks) {
return new Callable<BlockReadStats>() {
@Override
public BlockReadStats call() throws Exception {
try {
getReadBuffer().limit(length);
return actualReadFromBlock();
} catch (ChecksumException e) {
LOG.warn("Found Checksum error for {} from {} at {}", block,
source, e.getPos());
corruptedBlocks.addCorruptedBlock(block, source);
throw e;
} catch (IOException e) {
LOG.info(e.getMessage());
throw e;
} finally {
DataNodeFaultInjector.get().interceptBlockReader();
}
}
};
}
/**
* Perform actual reading of bytes from block.
*/
private BlockReadStats actualReadFromBlock() throws IOException {
DataNodeFaultInjector.get().delayBlockReader();
int len = buffer.remaining();
int n = 0;
while (n < len) {
int nread = blockReader.read(buffer);
if (nread <= 0) {
break;
}
n += nread;
stripedReader.getReconstructor().incrBytesRead(isLocal, nread);
}
return new BlockReadStats(n, blockReader.isShortCircuit(),
blockReader.getNetworkDistance());
}
// close block reader
void closeBlockReader() {
IOUtils.closeStream(blockReader);
blockReader = null;
}
short getIndex() {
return index;
}
BlockReader getBlockReader() {
return blockReader;
}
}
| StripedBlockReader |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/context/properties/ConfigurationPropertiesBeanFactoryInitializationAotProcessor.java | {
"start": 1678,
"end": 2687
} | class ____ implements BeanFactoryInitializationAotProcessor {
@Override
public @Nullable ConfigurationPropertiesReflectionHintsContribution processAheadOfTime(
ConfigurableListableBeanFactory beanFactory) {
String[] beanNames = beanFactory.getBeanNamesForAnnotation(ConfigurationProperties.class);
List<Bindable<?>> bindables = new ArrayList<>();
for (String beanName : beanNames) {
Class<?> beanType = beanFactory.getType(beanName, false);
if (beanType != null) {
BindMethod bindMethod = beanFactory.containsBeanDefinition(beanName)
? (BindMethod) beanFactory.getBeanDefinition(beanName).getAttribute(BindMethod.class.getName())
: null;
bindables.add(Bindable.of(ClassUtils.getUserClass(beanType))
.withBindMethod((bindMethod != null) ? bindMethod : BindMethod.JAVA_BEAN));
}
}
return (!bindables.isEmpty()) ? new ConfigurationPropertiesReflectionHintsContribution(bindables) : null;
}
static final | ConfigurationPropertiesBeanFactoryInitializationAotProcessor |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.