language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
apache__flink
|
flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/stream/ProcessTableFunctionTestUtils.java
|
{
"start": 21733,
"end": 22148
}
|
class ____ extends AppendProcessTableFunctionBase {
public void eval(
ColumnList columnList1,
@ArgumentHint(isOptional = true) ColumnList columnList2,
@DataTypeHint("DESCRIPTOR NOT NULL") ColumnList columnList3) {
collectObjects(columnList1, columnList2, columnList3);
}
}
/** Testing function. */
public static
|
DescriptorFunction
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/MockSecurityContextHolderStrategy.java
|
{
"start": 896,
"end": 1426
}
|
class ____ implements SecurityContextHolderStrategy {
private SecurityContext context;
@Override
public void clearContext() {
this.context = null;
}
@Override
public SecurityContext getContext() {
if (this.context == null) {
this.context = createEmptyContext();
}
return this.context;
}
@Override
public void setContext(SecurityContext context) {
this.context = context;
}
@Override
public SecurityContext createEmptyContext() {
return new SecurityContextImpl();
}
}
|
MockSecurityContextHolderStrategy
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/JobContextImpl.java
|
{
"start": 2240,
"end": 3833
}
|
class ____ implements JobContext {
protected final org.apache.hadoop.mapred.JobConf conf;
private JobID jobId;
/**
* The UserGroupInformation object that has a reference to the current user
*/
protected UserGroupInformation ugi;
protected final Credentials credentials;
public JobContextImpl(Configuration conf, JobID jobId) {
if (conf instanceof JobConf) {
this.conf = (JobConf)conf;
} else {
this.conf = new JobConf(conf);
}
this.jobId = jobId;
this.credentials = this.conf.getCredentials();
try {
this.ugi = UserGroupInformation.getCurrentUser();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/**
* Return the configuration for the job.
* @return the shared configuration object
*/
public Configuration getConfiguration() {
return conf;
}
/**
* Get the unique ID for the job.
* @return the object with the job id
*/
public JobID getJobID() {
return jobId;
}
/**
* Set the JobID.
*/
public void setJobID(JobID jobId) {
this.jobId = jobId;
}
/**
* Get configured the number of reduce tasks for this job. Defaults to
* <code>1</code>.
* @return the number of reduce tasks for this job.
*/
public int getNumReduceTasks() {
return conf.getNumReduceTasks();
}
/**
* Get the current working directory for the default file system.
*
* @return the directory name.
*/
public Path getWorkingDirectory() throws IOException {
return conf.getWorkingDirectory();
}
/**
* Get the key
|
JobContextImpl
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/cache/interceptor/LoggingCacheErrorHandlerTests.java
|
{
"start": 1230,
"end": 2974
}
|
class ____ {
private static final Cache CACHE = new NoOpCache("NOOP");
private static final String KEY = "enigma";
private final Log logger = mock();
private LoggingCacheErrorHandler handler = new LoggingCacheErrorHandler(this.logger, false);
@BeforeEach
void setUp() {
given(this.logger.isWarnEnabled()).willReturn(true);
}
@Test
void handleGetCacheErrorLogsAppropriateMessage() {
this.handler.handleCacheGetError(new RuntimeException(), CACHE, KEY);
verify(this.logger).warn("Cache 'NOOP' failed to get entry with key 'enigma'");
}
@Test
void handlePutCacheErrorLogsAppropriateMessage() {
this.handler.handleCachePutError(new RuntimeException(), CACHE, KEY, null);
verify(this.logger).warn("Cache 'NOOP' failed to put entry with key 'enigma'");
}
@Test
void handleEvictCacheErrorLogsAppropriateMessage() {
this.handler.handleCacheEvictError(new RuntimeException(), CACHE, KEY);
verify(this.logger).warn("Cache 'NOOP' failed to evict entry with key 'enigma'");
}
@Test
void handleClearErrorLogsAppropriateMessage() {
this.handler.handleCacheClearError(new RuntimeException(), CACHE);
verify(this.logger).warn("Cache 'NOOP' failed to clear entries");
}
@Test
void handleGetCacheErrorWithStackTraceLoggingEnabled() {
this.handler = new LoggingCacheErrorHandler(this.logger, true);
RuntimeException exception = new RuntimeException();
this.handler.handleCacheGetError(exception, CACHE, KEY);
verify(this.logger).warn("Cache 'NOOP' failed to get entry with key 'enigma'", exception);
}
@Test
void constructorWithLoggerName() {
assertThatCode(() -> new LoggingCacheErrorHandler("org.apache.commons.logging.Log", true))
.doesNotThrowAnyException();
}
}
|
LoggingCacheErrorHandlerTests
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/dialect/resolver/DialectResolverTest.java
|
{
"start": 751,
"end": 5128
}
|
class ____ {
@Test
public void testDialects() {
DialectResolverSet resolvers = new DialectResolverSet();
resolvers.addResolverAtFirst( new TestingDialects.MyDialectResolver1() );
resolvers.addResolverAtFirst( new TestingDialects.MyDialectResolver2() );
testDetermination( resolvers, "MyDatabase1", 1, TestingDialects.MyDialect1.class );
testDetermination( resolvers, "MyDatabase1", 2, TestingDialects.MyDialect1.class );
testDetermination( resolvers, "MyDatabase2", 0, null );
testDetermination( resolvers, "MyDatabase2", 1, TestingDialects.MyDialect21.class );
testDetermination( resolvers, "MyDatabase2", 2, TestingDialects.MyDialect22.class );
testDetermination( resolvers, "MyDatabase2", 3, TestingDialects.MyDialect22.class );
testDetermination( resolvers, "MyDatabase3", 1, null );
testDetermination( resolvers, "MyTrickyDatabase1", 1, TestingDialects.MyDialect1.class );
}
@Test
public void testErrorAndOrder() {
DialectResolverSet resolvers = new DialectResolverSet();
resolvers.addResolverAtFirst( new TestingDialects.MyDialectResolver1() );
resolvers.addResolver( new TestingDialects.MyDialectResolver2() );
// Non-connection errors are suppressed.
testDetermination( resolvers, "MyDatabase1", 1, TestingDialects.MyDialect1.class );
testDetermination( resolvers, "MyTrickyDatabase1", 1, TestingDialects.MyDialect1.class );
testDetermination( resolvers, "NoSuchDatabase", 1, null );
}
@Test
public void testBasicDialectResolver() {
DialectResolverSet resolvers = new DialectResolverSet();
// Simulating MyDialectResolver1 by BasicDialectResolvers
resolvers.addResolver( new BasicDialectResolver( "MyDatabase1", TestingDialects.MyDialect1.class ) );
resolvers.addResolver( new BasicDialectResolver( "MyDatabase2", 1, TestingDialects.MyDialect21.class ) );
resolvers.addResolver( new BasicDialectResolver( "MyDatabase2", 2, TestingDialects.MyDialect22.class ) );
resolvers.addResolver( new BasicDialectResolver( "ErrorDatabase1", Dialect.class ) );
testDetermination( resolvers, "MyDatabase1", 1, TestingDialects.MyDialect1.class );
testDetermination( resolvers, "MyDatabase1", 2, TestingDialects.MyDialect1.class );
testDetermination( resolvers, "MyDatabase2", 0, null );
testDetermination( resolvers, "MyDatabase2", 1, TestingDialects.MyDialect21.class );
testDetermination( resolvers, "MyDatabase2", 2, TestingDialects.MyDialect22.class );
testDetermination( resolvers, "ErrorDatabase1", 0, null );
}
@Test
@JiraKey(value = "HHH-13225")
public void testMinorVersion() {
DialectResolverSet resolvers = new DialectResolverSet();
resolvers.addResolver( new BasicDialectResolver( "MyDatabase1", TestingDialects.MyDialect1.class ) );
resolvers.addResolver( new BasicDialectResolver( "MyDatabase2", 1, TestingDialects.MyDialect21.class ) );
resolvers.addResolver( new BasicDialectResolver( "MyDatabase2", 2, TestingDialects.MyDialect22.class ) );
resolvers.addResolver( new BasicDialectResolver( "MyDatabase3", 1, 1, TestingDialects.MyDialect311.class ) );
resolvers.addResolver( new BasicDialectResolver( "MyDatabase3", 1, 2, TestingDialects.MyDialect312.class ) );
resolvers.addResolver( new BasicDialectResolver( "ErrorDatabase1", Dialect.class ) );
testDetermination( resolvers, "MyDatabase1", 1, 1, TestingDialects.MyDialect1.class );
testDetermination( resolvers, "MyDatabase3", 1, null );
testDetermination( resolvers, "MyDatabase3", 1, 1, TestingDialects.MyDialect311.class );
testDetermination( resolvers, "MyDatabase3", 1, 2, TestingDialects.MyDialect312.class );
testDetermination( resolvers, "MyDatabase3", 1, 3, null );
}
private void testDetermination(
DialectResolver resolver,
String databaseName,
int majorVersion,
Class<? extends Dialect> dialectClass) {
testDetermination( resolver, databaseName, majorVersion, DialectResolutionInfo.NO_VERSION, dialectClass );
}
private void testDetermination(
DialectResolver resolver,
String databaseName,
int majorVersion,
int minorVersion,
Class<? extends Dialect> dialectClass) {
Dialect dialect = resolver.resolveDialect(
TestingDialectResolutionInfo.forDatabaseInfo( databaseName, majorVersion, minorVersion )
);
if ( dialectClass == null ) {
assertNull( dialect );
}
else {
assertEquals( dialectClass, dialect.getClass() );
}
}
}
|
DialectResolverTest
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterNamenodeProtocolTranslatorPB.java
|
{
"start": 2179,
"end": 10601
}
|
class ____ extends NamenodeProtocolTranslatorPB {
/*
* Protobuf requests with no parameters instantiated only once
*/
private static final GetBlockKeysRequestProto VOID_GET_BLOCKKEYS_REQUEST =
GetBlockKeysRequestProto.newBuilder().build();
private static final GetTransactionIdRequestProto VOID_GET_TRANSACTIONID_REQUEST =
GetTransactionIdRequestProto.newBuilder().build();
private static final RollEditLogRequestProto VOID_ROLL_EDITLOG_REQUEST =
RollEditLogRequestProto.newBuilder().build();
private static final VersionRequestProto VOID_VERSION_REQUEST =
VersionRequestProto.newBuilder().build();
private final NamenodeProtocolPB rpcProxy;
public RouterNamenodeProtocolTranslatorPB(NamenodeProtocolPB rpcProxy) {
super(rpcProxy);
this.rpcProxy = rpcProxy;
}
@Override
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size, long
minBlockSize, long timeInterval, StorageType storageType)
throws IOException {
if (!Client.isAsynchronousMode()) {
return super.getBlocks(datanode, size, minBlockSize, timeInterval, storageType);
}
NamenodeProtocolProtos.GetBlocksRequestProto.Builder builder =
NamenodeProtocolProtos.GetBlocksRequestProto.newBuilder()
.setDatanode(PBHelperClient.convert((DatanodeID)datanode)).setSize(size)
.setMinBlockSize(minBlockSize).setTimeInterval(timeInterval);
if (storageType != null) {
builder.setStorageType(PBHelperClient.convertStorageType(storageType));
}
NamenodeProtocolProtos.GetBlocksRequestProto req = builder.build();
return asyncIpcClient(() -> rpcProxy.getBlocks(null, req),
res -> PBHelper.convert(res.getBlocks()),
BlocksWithLocations.class);
}
@Override
public ExportedBlockKeys getBlockKeys() throws IOException {
if (!Client.isAsynchronousMode()) {
return super.getBlockKeys();
}
return asyncIpcClient(() -> rpcProxy.getBlockKeys(null,
VOID_GET_BLOCKKEYS_REQUEST),
res -> res.hasKeys() ? PBHelper.convert(res.getKeys()) : null,
ExportedBlockKeys.class);
}
@Override
public long getTransactionID() throws IOException {
if (!Client.isAsynchronousMode()) {
return super.getTransactionID();
}
return asyncIpcClient(() -> rpcProxy.getTransactionId(null,
VOID_GET_TRANSACTIONID_REQUEST),
res -> res.getTxId(), Long.class);
}
@Override
public long getMostRecentCheckpointTxId() throws IOException {
if (!Client.isAsynchronousMode()) {
return super.getMostRecentCheckpointTxId();
}
return asyncIpcClient(() -> rpcProxy.getMostRecentCheckpointTxId(null,
NamenodeProtocolProtos
.GetMostRecentCheckpointTxIdRequestProto
.getDefaultInstance()),
res -> res.getTxId(), Long.class);
}
@Override
public long getMostRecentNameNodeFileTxId(NNStorage.NameNodeFile nnf) throws IOException {
if (!Client.isAsynchronousMode()) {
return super.getMostRecentNameNodeFileTxId(nnf);
}
return asyncIpcClient(() -> rpcProxy.getMostRecentNameNodeFileTxId(null,
NamenodeProtocolProtos
.GetMostRecentNameNodeFileTxIdRequestProto
.newBuilder()
.setNameNodeFile(nnf.toString())
.build()),
res -> res.getTxId(), Long.class);
}
@Override
public CheckpointSignature rollEditLog() throws IOException {
if (!Client.isAsynchronousMode()) {
return super.rollEditLog();
}
return asyncIpcClient(() -> rpcProxy.rollEditLog(null,
VOID_ROLL_EDITLOG_REQUEST),
res -> PBHelper.convert(res.getSignature()), CheckpointSignature.class);
}
@Override
public NamespaceInfo versionRequest() throws IOException {
if (!Client.isAsynchronousMode()) {
return super.versionRequest();
}
return asyncIpcClient(() -> rpcProxy.versionRequest(null,
VOID_VERSION_REQUEST),
res -> PBHelper.convert(res.getInfo()),
NamespaceInfo.class);
}
@Override
public void errorReport(NamenodeRegistration registration, int errorCode,
String msg) throws IOException {
if (!Client.isAsynchronousMode()) {
super.errorReport(registration, errorCode, msg);
return;
}
NamenodeProtocolProtos.ErrorReportRequestProto req =
NamenodeProtocolProtos.ErrorReportRequestProto.newBuilder()
.setErrorCode(errorCode).setMsg(msg)
.setRegistration(PBHelper.convert(registration)).build();
asyncIpcClient(() -> rpcProxy.errorReport(null, req),
res -> null, Void.class);
}
@Override
public NamenodeRegistration registerSubordinateNamenode(
NamenodeRegistration registration) throws IOException {
if (!Client.isAsynchronousMode()) {
return super.registerSubordinateNamenode(registration);
}
NamenodeProtocolProtos.RegisterRequestProto req =
NamenodeProtocolProtos.RegisterRequestProto.newBuilder()
.setRegistration(PBHelper.convert(registration)).build();
return asyncIpcClient(() -> rpcProxy.registerSubordinateNamenode(null, req),
res -> PBHelper.convert(res.getRegistration()),
NamenodeRegistration.class);
}
@Override
public NamenodeCommand startCheckpoint(NamenodeRegistration registration)
throws IOException {
if (!Client.isAsynchronousMode()) {
return super.startCheckpoint(registration);
}
NamenodeProtocolProtos.StartCheckpointRequestProto req =
NamenodeProtocolProtos.StartCheckpointRequestProto.newBuilder()
.setRegistration(PBHelper.convert(registration)).build();
return asyncIpcClient(() -> rpcProxy.startCheckpoint(null, req),
res -> {
HdfsServerProtos.NamenodeCommandProto cmd = res.getCommand();
return PBHelper.convert(cmd);
}, NamenodeCommand.class);
}
@Override
public void endCheckpoint(NamenodeRegistration registration,
CheckpointSignature sig) throws IOException {
if (!Client.isAsynchronousMode()) {
super.endCheckpoint(registration, sig);
return;
}
NamenodeProtocolProtos.EndCheckpointRequestProto req =
NamenodeProtocolProtos.EndCheckpointRequestProto.newBuilder()
.setRegistration(PBHelper.convert(registration))
.setSignature(PBHelper.convert(sig)).build();
asyncIpcClient(() -> rpcProxy.endCheckpoint(null, req),
res -> null, Void.class);
}
@Override
public RemoteEditLogManifest getEditLogManifest(long sinceTxId)
throws IOException {
if (!Client.isAsynchronousMode()) {
return super.getEditLogManifest(sinceTxId);
}
NamenodeProtocolProtos.GetEditLogManifestRequestProto req =
NamenodeProtocolProtos.GetEditLogManifestRequestProto
.newBuilder().setSinceTxId(sinceTxId).build();
return asyncIpcClient(() -> rpcProxy.getEditLogManifest(null, req),
res -> PBHelper.convert(res.getManifest()), RemoteEditLogManifest.class);
}
@Override
public boolean isUpgradeFinalized() throws IOException {
if (!Client.isAsynchronousMode()) {
return super.isUpgradeFinalized();
}
NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto req =
NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto
.newBuilder().build();
return asyncIpcClient(() -> rpcProxy.isUpgradeFinalized(null, req),
res -> res.getIsUpgradeFinalized(), Boolean.class);
}
@Override
public boolean isRollingUpgrade() throws IOException {
if (!Client.isAsynchronousMode()) {
return super.isRollingUpgrade();
}
NamenodeProtocolProtos.IsRollingUpgradeRequestProto req =
NamenodeProtocolProtos.IsRollingUpgradeRequestProto
.newBuilder().build();
return asyncIpcClient(() -> rpcProxy.isRollingUpgrade(null, req),
res -> res.getIsRollingUpgrade(), Boolean.class);
}
@Override
public Long getNextSPSPath() throws IOException {
if (!Client.isAsynchronousMode()) {
return super.getNextSPSPath();
}
NamenodeProtocolProtos.GetNextSPSPathRequestProto req =
NamenodeProtocolProtos.GetNextSPSPathRequestProto.newBuilder().build();
return asyncIpcClient(() -> rpcProxy.getNextSPSPath(null, req),
res -> res.hasSpsPath() ? res.getSpsPath() : null, Long.class);
}
}
|
RouterNamenodeProtocolTranslatorPB
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/DependencyCycleValidationTest.java
|
{
"start": 10284,
"end": 11852
}
|
class ____ {",
" @Provides @IntoMap",
" @StringKey(\"C\")",
" static C c(C c) {",
" return c;",
" }",
" }",
"}");
CompilerTests.daggerCompiler(component)
.withProcessingOptions(compilerMode.processorOptions())
.compile(
subject -> {
subject.hasErrorCount(1);
subject.hasErrorContaining(
String.join(
"\n",
"Found a dependency cycle:",
" Outer.C is injected at",
" [Outer.CComponent] Outer.CModule.c(c)",
" Map<String,Outer.C> is injected at",
" [Outer.CComponent] Outer.A(cMap)",
" Outer.A is injected at",
" [Outer.CComponent] Outer.B(aParam)",
" Outer.B is injected at",
" [Outer.CComponent] Outer.C(bParam)",
" Outer.C is injected at",
" [Outer.CComponent] Outer.CModule.c(c)",
" ...",
"",
"The cycle is requested via:",
" Outer.C is requested at",
" [Outer.CComponent] Outer.CComponent.getC()"))
.onSource(component)
.onLineContaining("
|
CModule
|
java
|
apache__camel
|
components/camel-test/camel-test-spring-junit5/src/main/java/org/apache/camel/test/spring/junit5/CamelTestContextBootstrapper.java
|
{
"start": 1059,
"end": 1305
}
|
class ____ extends DefaultTestContextBootstrapper {
@Override
protected Class<? extends ContextLoader> getDefaultContextLoaderClass(Class<?> testClass) {
return CamelSpringTestContextLoader.class;
}
}
|
CamelTestContextBootstrapper
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/main/java/io/vertx/core/Handler.java
|
{
"start": 684,
"end": 833
}
|
interface ____<E> {
/**
* Something has happened, so handle it.
*
* @param event the event to handle
*/
void handle(E event);
}
|
Handler
|
java
|
google__dagger
|
dagger-compiler/main/java/dagger/internal/codegen/writing/SubcomponentCreatorRequestRepresentation.java
|
{
"start": 2475,
"end": 2590
}
|
interface ____ {
SubcomponentCreatorRequestRepresentation create(SubcomponentCreatorBinding binding);
}
}
|
Factory
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/query/ReloadWithPreviousRowEntityTest3.java
|
{
"start": 949,
"end": 2822
}
|
class ____ {
@BeforeEach
public void prepareTestData(EntityManagerFactoryScope scope) {
scope.inTransaction(
(session) -> {
Student mathStudent = new Student(16);
Student frenchStudent = new Student(17);
Student scienceStudent = new Student(18);
Teacher teacherWithNoStudents = new Teacher(16);
Teacher teacherWithOneStudent = new Teacher(17);
Teacher teacherWithTwoStudents = new Teacher(18);
session.persist( teacherWithNoStudents );
session.persist( teacherWithOneStudent );
session.persist( teacherWithTwoStudents );
mathStudent.setTeacher( teacherWithOneStudent );
teacherWithOneStudent.addStudent( mathStudent );
frenchStudent.setTeacher( teacherWithTwoStudents );
teacherWithTwoStudents.addStudent( frenchStudent );
scienceStudent.setTeacher( teacherWithTwoStudents );
teacherWithTwoStudents.addStudent( scienceStudent );
session.persist( mathStudent );
session.persist( frenchStudent );
session.persist( scienceStudent );
}
);
}
@AfterEach
public void dropTestData(EntityManagerFactoryScope scope) {
scope.getEntityManagerFactory().getSchemaManager().truncate();
}
@Test
public void testReloadWithPreviousRow(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
// First load a fully initialized graph i.e. maybeLazySet empty
em.createQuery( "select s from Student s join fetch s.teacher t left join fetch t.students left join fetch t.skills order by s.id desc", Student.class ).getResultList();
// Then load a partially initialized graph and see if previous row optimization works properly
em.createQuery( "select s from Student s join fetch s.teacher t left join fetch t.students order by s.id desc", Student.class ).getResultList();
} );
}
@Entity(name = "Student")
public static
|
ReloadWithPreviousRowEntityTest3
|
java
|
apache__camel
|
components/camel-twilio/src/generated/java/org/apache/camel/component/twilio/internal/AddressDependentPhoneNumberApiMethod.java
|
{
"start": 692,
"end": 1894
}
|
enum ____ implements ApiMethod {
READER(
com.twilio.rest.api.v2010.account.address.DependentPhoneNumberReader.class,
"reader",
arg("pathAddressSid", String.class)),
READER_1(
com.twilio.rest.api.v2010.account.address.DependentPhoneNumberReader.class,
"reader",
arg("pathAccountSid", String.class),
arg("pathAddressSid", String.class));
private final ApiMethod apiMethod;
AddressDependentPhoneNumberApiMethod(Class<?> resultType, String name, ApiMethodArg... args) {
this.apiMethod = new ApiMethodImpl(DependentPhoneNumber.class, resultType, name, args);
}
@Override
public String getName() { return apiMethod.getName(); }
@Override
public Class<?> getResultType() { return apiMethod.getResultType(); }
@Override
public List<String> getArgNames() { return apiMethod.getArgNames(); }
@Override
public List<String> getSetterArgNames() { return apiMethod.getSetterArgNames(); }
@Override
public List<Class<?>> getArgTypes() { return apiMethod.getArgTypes(); }
@Override
public Method getMethod() { return apiMethod.getMethod(); }
}
|
AddressDependentPhoneNumberApiMethod
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/time/JodaWithDurationAddedLongTest.java
|
{
"start": 7673,
"end": 7836
}
|
class ____ {
private static final DateTime A = DateTime.now().withDurationAdded(42, 1);
}
""")
.doTest();
}
}
|
TestClass
|
java
|
apache__camel
|
components/camel-drill/src/main/java/org/apache/camel/component/drill/DrillEndpoint.java
|
{
"start": 1817,
"end": 5247
}
|
class ____ extends DefaultPollingEndpoint implements EndpointServiceLocation {
@UriPath(description = "Host name or IP address")
@Metadata(required = true)
private String host;
@UriParam(description = "Port number")
@Metadata(required = false, defaultValue = "2181")
private Integer port = 2181;
@UriParam(description = "Drill directory", defaultValue = "")
private String directory = "";
@UriParam(defaultValue = "")
private String clusterId = "";
@UriParam(defaultValue = "ZK", enums = "ZK,DRILLBIT")
private DrillConnectionMode mode = DrillConnectionMode.ZK;
/**
* creates a drill endpoint
*
* @param uri the endpoint uri
* @param component the component
*/
public DrillEndpoint(String uri, DrillComponent component) {
super(uri, component);
}
@Override
public String getServiceUrl() {
return host + ":" + port;
}
@Override
public String getServiceProtocol() {
return "jdbc";
}
@Override
public Consumer createConsumer(Processor processor) throws Exception {
throw new UnsupportedOperationException("DrillConsumer is not supported!");
}
@Override
public Producer createProducer() throws Exception {
return new DrillProducer(this);
}
public String toJDBCUri() {
String url = "jdbc:drill:" + mode.name().toLowerCase() + "=" + host + ":" + port;
if (mode.equals(DrillConnectionMode.ZK)) {
if (StringUtils.isNotBlank(directory)) {
url += "/" + directory;
}
if (StringUtils.isNotBlank(clusterId)) {
url += "/" + clusterId;
}
}
return url;
}
public List<?> queryForList(ResultSet rs) throws SQLException {
ColumnMapRowMapper rowMapper = new ColumnMapRowMapper();
RowMapperResultSetExtractor<Map<String, Object>> mapper = new RowMapperResultSetExtractor<>(rowMapper);
return mapper.extractData(rs);
}
public String getHost() {
return host;
}
/**
* ZooKeeper host name or IP address. Use local instead of a host name or IP address to connect to the local
* Drillbit
*
* @param host
*/
public void setHost(String host) {
this.host = host;
}
public Integer getPort() {
return port;
}
/**
* ZooKeeper port number
*
* @param port
*/
public void setPort(Integer port) {
this.port = port;
}
public String getDirectory() {
return directory;
}
/**
* Drill directory in ZooKeeper
*
* @param directory
*/
public void setDirectory(String directory) {
this.directory = directory;
}
public String getClusterId() {
return clusterId;
}
/**
* Cluster ID https://drill.apache.org/docs/using-the-jdbc-driver/#determining-the-cluster-id
*
* @param clusterId
*/
public void setClusterId(String clusterId) {
this.clusterId = clusterId;
}
/**
* Connection mode: zk: Zookeeper drillbit: Drillbit direct connection
* https://drill.apache.org/docs/using-the-jdbc-driver/
*
* @return
*/
public DrillConnectionMode getMode() {
return mode;
}
public void setMode(DrillConnectionMode mode) {
this.mode = mode;
}
}
|
DrillEndpoint
|
java
|
apache__kafka
|
connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceConnectorTest.java
|
{
"start": 29261,
"end": 41445
}
|
class ____ extends DefaultReplicationPolicy {
@Override
public String upstreamTopic(String topic) {
return null;
}
}
MirrorSourceConnector connector = new MirrorSourceConnector(new SourceAndTarget("source", "target"),
new CustomReplicationPolicy(), new DefaultTopicFilter(), new DefaultConfigPropertyFilter());
assertDoesNotThrow(() -> connector.isCycle(".b"));
}
@Test
public void testExactlyOnceSupport() {
String readCommitted = "read_committed";
String readUncommitted = "read_uncommitted";
String readGarbage = "read_garbage";
// Connector is configured correctly, but exactly-once can't be supported
assertExactlyOnceSupport(null, null, false);
assertExactlyOnceSupport(readUncommitted, null, false);
assertExactlyOnceSupport(null, readUncommitted, false);
assertExactlyOnceSupport(readUncommitted, readUncommitted, false);
// Connector is configured correctly, and exactly-once can be supported
assertExactlyOnceSupport(readCommitted, null, true);
assertExactlyOnceSupport(null, readCommitted, true);
assertExactlyOnceSupport(readUncommitted, readCommitted, true);
assertExactlyOnceSupport(readCommitted, readCommitted, true);
// Connector is configured incorrectly, but is able to react gracefully
assertExactlyOnceSupport(readGarbage, null, false);
assertExactlyOnceSupport(null, readGarbage, false);
assertExactlyOnceSupport(readGarbage, readGarbage, false);
assertExactlyOnceSupport(readCommitted, readGarbage, false);
assertExactlyOnceSupport(readUncommitted, readGarbage, false);
assertExactlyOnceSupport(readGarbage, readUncommitted, false);
assertExactlyOnceSupport(readGarbage, readCommitted, true);
}
private void assertExactlyOnceSupport(String defaultIsolationLevel, String sourceIsolationLevel, boolean expected) {
Map<String, String> props = makeProps();
if (defaultIsolationLevel != null) {
props.put(CONSUMER_CLIENT_PREFIX + ISOLATION_LEVEL_CONFIG, defaultIsolationLevel);
}
if (sourceIsolationLevel != null) {
props.put(SOURCE_PREFIX + CONSUMER_CLIENT_PREFIX + ISOLATION_LEVEL_CONFIG, sourceIsolationLevel);
}
ExactlyOnceSupport expectedSupport = expected ? ExactlyOnceSupport.SUPPORTED : ExactlyOnceSupport.UNSUPPORTED;
ExactlyOnceSupport actualSupport = new MirrorSourceConnector().exactlyOnceSupport(props);
assertEquals(expectedSupport, actualSupport);
}
@Test
public void testExactlyOnceSupportValidation() {
String exactlyOnceSupport = "exactly.once.support";
Map<String, String> props = makeProps();
Optional<ConfigValue> configValue = validateProperty(exactlyOnceSupport, props);
assertEquals(Optional.empty(), configValue);
props.put(exactlyOnceSupport, "requested");
configValue = validateProperty(exactlyOnceSupport, props);
assertEquals(Optional.empty(), configValue);
props.put(exactlyOnceSupport, "garbage");
configValue = validateProperty(exactlyOnceSupport, props);
assertEquals(Optional.empty(), configValue);
props.put(exactlyOnceSupport, "required");
configValue = validateProperty(exactlyOnceSupport, props);
assertTrue(configValue.isPresent());
List<String> errorMessages = configValue.get().errorMessages();
assertEquals(1, errorMessages.size());
String errorMessage = errorMessages.get(0);
assertTrue(
errorMessages.get(0).contains(ISOLATION_LEVEL_CONFIG),
"Error message \"" + errorMessage + "\" should have mentioned the 'isolation.level' consumer property"
);
props.put(CONSUMER_CLIENT_PREFIX + ISOLATION_LEVEL_CONFIG, "read_committed");
configValue = validateProperty(exactlyOnceSupport, props);
assertEquals(Optional.empty(), configValue);
// Make sure that an unrelated invalid property doesn't cause an exception to be thrown and is instead handled and reported gracefully
props.put(OFFSET_LAG_MAX, "bad");
// Ensure that the issue with the invalid property is reported...
configValue = validateProperty(OFFSET_LAG_MAX, props);
assertTrue(configValue.isPresent());
errorMessages = configValue.get().errorMessages();
assertEquals(1, errorMessages.size());
errorMessage = errorMessages.get(0);
assertTrue(
errorMessages.get(0).contains(OFFSET_LAG_MAX),
"Error message \"" + errorMessage + "\" should have mentioned the 'offset.lag.max' property"
);
// ... and that it does not cause any issues with validation for exactly-once support...
configValue = validateProperty(exactlyOnceSupport, props);
assertEquals(Optional.empty(), configValue);
// ... regardless of whether validation for exactly-once support does or does not find an error
props.remove(CONSUMER_CLIENT_PREFIX + ISOLATION_LEVEL_CONFIG);
configValue = validateProperty(exactlyOnceSupport, props);
assertTrue(configValue.isPresent());
errorMessages = configValue.get().errorMessages();
assertEquals(1, errorMessages.size());
errorMessage = errorMessages.get(0);
assertTrue(
errorMessages.get(0).contains(ISOLATION_LEVEL_CONFIG),
"Error message \"" + errorMessage + "\" should have mentioned the 'isolation.level' consumer property"
);
}
private Optional<ConfigValue> validateProperty(String name, Map<String, String> props) {
List<ConfigValue> results = new MirrorSourceConnector().validate(props)
.configValues().stream()
.filter(cv -> name.equals(cv.name()))
.toList();
assertTrue(results.size() <= 1, "Connector produced multiple config values for '" + name + "' property");
if (results.isEmpty())
return Optional.empty();
ConfigValue result = results.get(0);
assertNotNull(result, "Connector should not have record null config value for '" + name + "' property");
return Optional.of(result);
}
@Test
public void testAlterOffsetsIncorrectPartitionKey() {
MirrorSourceConnector connector = new MirrorSourceConnector();
assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Map.of(
Map.of("unused_partition_key", "unused_partition_value"),
MirrorUtils.wrapOffset(10)
)));
// null partitions are invalid
assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Collections.singletonMap(
null,
MirrorUtils.wrapOffset(10)
)));
}
@Test
public void testAlterOffsetsMissingPartitionKey() {
MirrorSourceConnector connector = new MirrorSourceConnector();
Function<Map<String, ?>, Boolean> alterOffsets = partition -> connector.alterOffsets(null, Map.of(
partition,
MirrorUtils.wrapOffset(64)
));
Map<String, ?> validPartition = sourcePartition("t", 3, "us-east-2");
// Sanity check to make sure our valid partition is actually valid
assertTrue(alterOffsets.apply(validPartition));
for (String key : List.of(SOURCE_CLUSTER_KEY, TOPIC_KEY, PARTITION_KEY)) {
Map<String, ?> invalidPartition = new HashMap<>(validPartition);
invalidPartition.remove(key);
assertThrows(ConnectException.class, () -> alterOffsets.apply(invalidPartition));
}
}
@Test
public void testAlterOffsetsInvalidPartitionPartition() {
MirrorSourceConnector connector = new MirrorSourceConnector();
Map<String, Object> partition = sourcePartition("t", 3, "us-west-2");
partition.put(PARTITION_KEY, "a string");
assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Map.of(
partition,
MirrorUtils.wrapOffset(49)
)));
}
@Test
public void testAlterOffsetsMultiplePartitions() {
MirrorSourceConnector connector = new MirrorSourceConnector();
Map<String, ?> partition1 = sourcePartition("t1", 0, "primary");
Map<String, ?> partition2 = sourcePartition("t1", 1, "primary");
Map<Map<String, ?>, Map<String, ?>> offsets = new HashMap<>();
offsets.put(partition1, MirrorUtils.wrapOffset(50));
offsets.put(partition2, MirrorUtils.wrapOffset(100));
assertTrue(connector.alterOffsets(null, offsets));
}
@Test
public void testAlterOffsetsIncorrectOffsetKey() {
MirrorSourceConnector connector = new MirrorSourceConnector();
Map<Map<String, ?>, Map<String, ?>> offsets = Map.of(
sourcePartition("t1", 2, "backup"),
Map.of("unused_offset_key", 0)
);
assertThrows(ConnectException.class, () -> connector.alterOffsets(null, offsets));
}
@Test
public void testAlterOffsetsOffsetValues() {
MirrorSourceConnector connector = new MirrorSourceConnector();
Function<Object, Boolean> alterOffsets = offset -> connector.alterOffsets(null, Map.of(
sourcePartition("t", 5, "backup"),
Collections.singletonMap(MirrorUtils.OFFSET_KEY, offset)
));
assertThrows(ConnectException.class, () -> alterOffsets.apply("nan"));
assertThrows(ConnectException.class, () -> alterOffsets.apply(null));
assertThrows(ConnectException.class, () -> alterOffsets.apply(new Object()));
assertThrows(ConnectException.class, () -> alterOffsets.apply(3.14));
assertThrows(ConnectException.class, () -> alterOffsets.apply(-420));
assertThrows(ConnectException.class, () -> alterOffsets.apply("-420"));
assertThrows(ConnectException.class, () -> alterOffsets.apply("10"));
assertTrue(() -> alterOffsets.apply(0));
assertTrue(() -> alterOffsets.apply(10));
assertTrue(() -> alterOffsets.apply(((long) Integer.MAX_VALUE) + 1));
}
@Test
public void testSuccessfulAlterOffsets() {
MirrorSourceConnector connector = new MirrorSourceConnector();
Map<Map<String, ?>, Map<String, ?>> offsets = Map.of(
sourcePartition("t2", 0, "backup"),
MirrorUtils.wrapOffset(5)
);
// Expect no exception to be thrown when a valid offsets map is passed. An empty offsets map is treated as valid
// since it could indicate that the offsets were reset previously or that no offsets have been committed yet
// (for a reset operation)
assertTrue(connector.alterOffsets(null, offsets));
assertTrue(connector.alterOffsets(null, Map.of()));
}
@Test
public void testAlterOffsetsTombstones() {
MirrorCheckpointConnector connector = new MirrorCheckpointConnector();
Function<Map<String, ?>, Boolean> alterOffsets = partition -> connector.alterOffsets(
null,
Collections.singletonMap(partition, null)
);
Map<String, Object> partition = sourcePartition("kips", 875, "apache.kafka");
assertTrue(() -> alterOffsets.apply(partition));
partition.put(PARTITION_KEY, "a string");
assertTrue(() -> alterOffsets.apply(partition));
partition.remove(PARTITION_KEY);
assertTrue(() -> alterOffsets.apply(partition));
assertTrue(() -> alterOffsets.apply(null));
assertTrue(() -> alterOffsets.apply(Map.of()));
assertTrue(() -> alterOffsets.apply(Map.of("unused_partition_key", "unused_partition_value")));
}
private static Map<String, Object> sourcePartition(String topic, int partition, String sourceClusterAlias) {
return MirrorUtils.wrapPartition(
new TopicPartition(topic, partition),
sourceClusterAlias
);
}
}
|
CustomReplicationPolicy
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/path/JSONPath_17.java
|
{
"start": 392,
"end": 901
}
|
class ____ extends TestCase {
public void test_for_jsonpath() throws Exception {
String input = "{\"b\":[{\"c\":{\"d\":{\"e\":\"978\"}},\"f\":{\"c\":{\"d\":{\"$ref\":\"$.b[0].c.d\"}}}}]}";
Object obj = JSON.parse(input);
String oupput = JSON.parse(input).toString();
assertEquals(obj, JSON.parse(oupput));
}
public void test_for_jsonpath_1() throws Exception {
assertEquals("[5]", JSONPath.extract("[1, 2, 3, 4, 5]", "$[last]").toString());
}
}
|
JSONPath_17
|
java
|
google__guava
|
android/guava/src/com/google/common/collect/ImmutableMap.java
|
{
"start": 25880,
"end": 28366
}
|
class ____ {
private final Object key;
private final Object value1;
private final Object value2;
DuplicateKey(Object key, Object value1, Object value2) {
this.key = key;
this.value1 = value1;
this.value2 = value2;
}
IllegalArgumentException exception() {
return new IllegalArgumentException(
"Multiple entries with same key: " + key + "=" + value1 + " and " + key + "=" + value2);
}
}
}
/**
* Returns an immutable map containing the same entries as {@code map}. The returned map iterates
* over entries in the same order as the {@code entrySet} of the original map. If {@code map}
* somehow contains entries with duplicate keys (for example, if it is a {@code SortedMap} whose
* comparator is not <i>consistent with equals</i>), the results of this method are undefined.
*
* <p>Despite the method name, this method attempts to avoid actually copying the data when it is
* safe to do so. The exact circumstances under which a copy will or will not be performed are
* undocumented and subject to change.
*
* @throws NullPointerException if any key or value in {@code map} is null
*/
public static <K, V> ImmutableMap<K, V> copyOf(Map<? extends K, ? extends V> map) {
if ((map instanceof ImmutableMap) && !(map instanceof SortedMap)) {
@SuppressWarnings("unchecked") // safe since map is not writable
ImmutableMap<K, V> kvMap = (ImmutableMap<K, V>) map;
if (!kvMap.isPartialView()) {
return kvMap;
}
}
return copyOf(map.entrySet());
}
/**
* Returns an immutable map containing the specified entries. The returned map iterates over
* entries in the same order as the original iterable.
*
* @throws NullPointerException if any key, value, or entry is null
* @throws IllegalArgumentException if two entries have the same key
* @since 19.0
*/
public static <K, V> ImmutableMap<K, V> copyOf(
Iterable<? extends Entry<? extends K, ? extends V>> entries) {
int initialCapacity =
(entries instanceof Collection)
? ((Collection<?>) entries).size()
: ImmutableCollection.Builder.DEFAULT_INITIAL_CAPACITY;
ImmutableMap.Builder<K, V> builder = new ImmutableMap.Builder<K, V>(initialCapacity);
builder.putAll(entries);
return builder.build();
}
static final Entry<?, ?>[] EMPTY_ENTRY_ARRAY = new Entry<?, ?>[0];
abstract static
|
DuplicateKey
|
java
|
spring-projects__spring-framework
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
|
{
"start": 34524,
"end": 58478
}
|
class ____ not be null");
this.customEditors.put(requiredType, propertyEditorClass);
}
@Override
public void copyRegisteredEditorsTo(PropertyEditorRegistry registry) {
registerCustomEditors(registry);
}
/**
* Return the map of custom editors, with Classes as keys and PropertyEditor classes as values.
*/
public Map<Class<?>, Class<? extends PropertyEditor>> getCustomEditors() {
return this.customEditors;
}
@Override
public void setTypeConverter(TypeConverter typeConverter) {
this.typeConverter = typeConverter;
}
/**
* Return the custom TypeConverter to use, if any.
* @return the custom TypeConverter, or {@code null} if none specified
*/
protected @Nullable TypeConverter getCustomTypeConverter() {
return this.typeConverter;
}
@Override
public TypeConverter getTypeConverter() {
TypeConverter customConverter = getCustomTypeConverter();
if (customConverter != null) {
return customConverter;
}
else {
// Build default TypeConverter, registering custom editors.
SimpleTypeConverter typeConverter = new SimpleTypeConverter();
typeConverter.setConversionService(getConversionService());
registerCustomEditors(typeConverter);
return typeConverter;
}
}
@Override
public void addEmbeddedValueResolver(StringValueResolver valueResolver) {
Assert.notNull(valueResolver, "StringValueResolver must not be null");
this.embeddedValueResolvers.add(valueResolver);
}
@Override
public boolean hasEmbeddedValueResolver() {
return !this.embeddedValueResolvers.isEmpty();
}
@Override
public @Nullable String resolveEmbeddedValue(@Nullable String value) {
if (value == null) {
return null;
}
String result = value;
for (StringValueResolver resolver : this.embeddedValueResolvers) {
result = resolver.resolveStringValue(result);
if (result == null) {
return null;
}
}
return result;
}
@Override
public void addBeanPostProcessor(BeanPostProcessor beanPostProcessor) {
Assert.notNull(beanPostProcessor, "BeanPostProcessor must not be null");
synchronized (this.beanPostProcessors) {
// Remove from old position, if any
this.beanPostProcessors.remove(beanPostProcessor);
// Add to end of list
this.beanPostProcessors.add(beanPostProcessor);
}
}
/**
* Add new BeanPostProcessors that will get applied to beans created
* by this factory. To be invoked during factory configuration.
* @since 5.3
* @see #addBeanPostProcessor
*/
public void addBeanPostProcessors(Collection<? extends BeanPostProcessor> beanPostProcessors) {
synchronized (this.beanPostProcessors) {
// Remove from old position, if any
this.beanPostProcessors.removeAll(beanPostProcessors);
// Add to end of list
this.beanPostProcessors.addAll(beanPostProcessors);
}
}
@Override
public int getBeanPostProcessorCount() {
return this.beanPostProcessors.size();
}
/**
* Return the list of BeanPostProcessors that will get applied
* to beans created with this factory.
*/
public List<BeanPostProcessor> getBeanPostProcessors() {
return this.beanPostProcessors;
}
/**
* Return the internal cache of pre-filtered post-processors,
* freshly (re-)building it if necessary.
* @since 5.3
*/
BeanPostProcessorCache getBeanPostProcessorCache() {
synchronized (this.beanPostProcessors) {
BeanPostProcessorCache bppCache = this.beanPostProcessorCache;
if (bppCache == null) {
bppCache = new BeanPostProcessorCache();
for (BeanPostProcessor bpp : this.beanPostProcessors) {
if (bpp instanceof InstantiationAwareBeanPostProcessor instantiationAwareBpp) {
bppCache.instantiationAware.add(instantiationAwareBpp);
if (bpp instanceof SmartInstantiationAwareBeanPostProcessor smartInstantiationAwareBpp) {
bppCache.smartInstantiationAware.add(smartInstantiationAwareBpp);
}
}
if (bpp instanceof DestructionAwareBeanPostProcessor destructionAwareBpp) {
bppCache.destructionAware.add(destructionAwareBpp);
}
if (bpp instanceof MergedBeanDefinitionPostProcessor mergedBeanDefBpp) {
bppCache.mergedDefinition.add(mergedBeanDefBpp);
}
}
this.beanPostProcessorCache = bppCache;
}
return bppCache;
}
}
private void resetBeanPostProcessorCache() {
synchronized (this.beanPostProcessors) {
this.beanPostProcessorCache = null;
}
}
/**
* Return whether this factory holds a InstantiationAwareBeanPostProcessor
* that will get applied to singleton beans on creation.
* @see #addBeanPostProcessor
* @see org.springframework.beans.factory.config.InstantiationAwareBeanPostProcessor
*/
protected boolean hasInstantiationAwareBeanPostProcessors() {
return !getBeanPostProcessorCache().instantiationAware.isEmpty();
}
/**
* Return whether this factory holds a DestructionAwareBeanPostProcessor
* that will get applied to singleton beans on shutdown.
* @see #addBeanPostProcessor
* @see org.springframework.beans.factory.config.DestructionAwareBeanPostProcessor
*/
protected boolean hasDestructionAwareBeanPostProcessors() {
return !getBeanPostProcessorCache().destructionAware.isEmpty();
}
@Override
public void registerScope(String scopeName, Scope scope) {
Assert.notNull(scopeName, "Scope identifier must not be null");
Assert.notNull(scope, "Scope must not be null");
if (SCOPE_SINGLETON.equals(scopeName) || SCOPE_PROTOTYPE.equals(scopeName)) {
throw new IllegalArgumentException("Cannot replace existing scopes 'singleton' and 'prototype'");
}
Scope previous = this.scopes.put(scopeName, scope);
if (previous != null && previous != scope) {
if (logger.isDebugEnabled()) {
logger.debug("Replacing scope '" + scopeName + "' from [" + previous + "] to [" + scope + "]");
}
}
else {
if (logger.isTraceEnabled()) {
logger.trace("Registering scope '" + scopeName + "' with implementation [" + scope + "]");
}
}
}
@Override
public String[] getRegisteredScopeNames() {
return StringUtils.toStringArray(this.scopes.keySet());
}
@Override
public @Nullable Scope getRegisteredScope(String scopeName) {
Assert.notNull(scopeName, "Scope identifier must not be null");
return this.scopes.get(scopeName);
}
@Override
public void setApplicationStartup(ApplicationStartup applicationStartup) {
Assert.notNull(applicationStartup, "ApplicationStartup must not be null");
this.applicationStartup = applicationStartup;
}
@Override
public ApplicationStartup getApplicationStartup() {
return this.applicationStartup;
}
@Override
public void copyConfigurationFrom(ConfigurableBeanFactory otherFactory) {
Assert.notNull(otherFactory, "BeanFactory must not be null");
setBeanClassLoader(otherFactory.getBeanClassLoader());
setCacheBeanMetadata(otherFactory.isCacheBeanMetadata());
setBeanExpressionResolver(otherFactory.getBeanExpressionResolver());
setConversionService(otherFactory.getConversionService());
if (otherFactory instanceof AbstractBeanFactory otherAbstractFactory) {
this.defaultEditorRegistrars.addAll(otherAbstractFactory.defaultEditorRegistrars);
this.propertyEditorRegistrars.addAll(otherAbstractFactory.propertyEditorRegistrars);
this.customEditors.putAll(otherAbstractFactory.customEditors);
this.typeConverter = otherAbstractFactory.typeConverter;
this.beanPostProcessors.addAll(otherAbstractFactory.beanPostProcessors);
this.scopes.putAll(otherAbstractFactory.scopes);
}
else {
setTypeConverter(otherFactory.getTypeConverter());
String[] otherScopeNames = otherFactory.getRegisteredScopeNames();
for (String scopeName : otherScopeNames) {
this.scopes.put(scopeName, otherFactory.getRegisteredScope(scopeName));
}
}
}
/**
* Return a 'merged' BeanDefinition for the given bean name,
* merging a child bean definition with its parent if necessary.
* <p>This {@code getMergedBeanDefinition} considers bean definition
* in ancestors as well.
* @param name the name of the bean to retrieve the merged definition for
* (may be an alias)
* @return a (potentially merged) RootBeanDefinition for the given bean
* @throws NoSuchBeanDefinitionException if there is no bean with the given name
* @throws BeanDefinitionStoreException in case of an invalid bean definition
*/
@Override
public BeanDefinition getMergedBeanDefinition(String name) throws BeansException {
String beanName = transformedBeanName(name);
// Efficiently check whether bean definition exists in this factory.
if (getParentBeanFactory() instanceof ConfigurableBeanFactory parent && !containsBeanDefinition(beanName)) {
return parent.getMergedBeanDefinition(beanName);
}
// Resolve merged bean definition locally.
return getMergedLocalBeanDefinition(beanName);
}
@Override
public boolean isFactoryBean(String name) throws NoSuchBeanDefinitionException {
String beanName = transformedBeanName(name);
Object beanInstance = getSingleton(beanName, false);
if (beanInstance != null) {
return (beanInstance instanceof FactoryBean);
}
// No singleton instance found -> check bean definition.
if (!containsBeanDefinition(beanName) && getParentBeanFactory() instanceof ConfigurableBeanFactory cbf) {
// No bean definition found in this factory -> delegate to parent.
return cbf.isFactoryBean(name);
}
return isFactoryBean(beanName, getMergedLocalBeanDefinition(beanName));
}
@Override
public boolean isActuallyInCreation(String beanName) {
return (isSingletonCurrentlyInCreation(beanName) || isPrototypeCurrentlyInCreation(beanName));
}
/**
* Return whether the specified prototype bean is currently in creation
* (within the current thread).
* @param beanName the name of the bean
*/
protected boolean isPrototypeCurrentlyInCreation(String beanName) {
Object curVal = this.prototypesCurrentlyInCreation.get();
return (curVal != null &&
(curVal.equals(beanName) || (curVal instanceof Set<?> set && set.contains(beanName))));
}
/**
* Callback before prototype creation.
* <p>The default implementation registers the prototype as currently in creation.
* @param beanName the name of the prototype about to be created
* @see #isPrototypeCurrentlyInCreation
*/
@SuppressWarnings("unchecked")
protected void beforePrototypeCreation(String beanName) {
Object curVal = this.prototypesCurrentlyInCreation.get();
if (curVal == null) {
this.prototypesCurrentlyInCreation.set(beanName);
}
else if (curVal instanceof String strValue) {
Set<String> beanNameSet = CollectionUtils.newHashSet(2);
beanNameSet.add(strValue);
beanNameSet.add(beanName);
this.prototypesCurrentlyInCreation.set(beanNameSet);
}
else {
Set<String> beanNameSet = (Set<String>) curVal;
beanNameSet.add(beanName);
}
}
/**
* Callback after prototype creation.
* <p>The default implementation marks the prototype as not in creation anymore.
* @param beanName the name of the prototype that has been created
* @see #isPrototypeCurrentlyInCreation
*/
@SuppressWarnings("unchecked")
protected void afterPrototypeCreation(String beanName) {
Object curVal = this.prototypesCurrentlyInCreation.get();
if (curVal instanceof String) {
this.prototypesCurrentlyInCreation.remove();
}
else if (curVal instanceof Set<?> beanNameSet) {
beanNameSet.remove(beanName);
if (beanNameSet.isEmpty()) {
this.prototypesCurrentlyInCreation.remove();
}
}
}
@Override
public void destroyBean(String beanName, Object beanInstance) {
destroyBean(beanName, beanInstance, getMergedLocalBeanDefinition(beanName));
}
/**
* Destroy the given bean instance (usually a prototype instance
* obtained from this factory) according to the given bean definition.
* @param beanName the name of the bean definition
* @param bean the bean instance to destroy
* @param mbd the merged bean definition
*/
protected void destroyBean(String beanName, Object bean, RootBeanDefinition mbd) {
new DisposableBeanAdapter(
bean, beanName, mbd, getBeanPostProcessorCache().destructionAware).destroy();
}
@Override
public void destroyScopedBean(String beanName) {
RootBeanDefinition mbd = getMergedLocalBeanDefinition(beanName);
if (mbd.isSingleton() || mbd.isPrototype()) {
throw new IllegalArgumentException(
"Bean name '" + beanName + "' does not correspond to an object in a mutable scope");
}
String scopeName = mbd.getScope();
Scope scope = this.scopes.get(scopeName);
if (scope == null) {
throw new IllegalStateException("No Scope SPI registered for scope name '" + scopeName + "'");
}
Object bean = scope.remove(beanName);
if (bean != null) {
destroyBean(beanName, bean, mbd);
}
}
//---------------------------------------------------------------------
// Implementation methods
//---------------------------------------------------------------------
/**
* Return the bean name, stripping out the factory dereference prefix if necessary,
* and resolving aliases to canonical names.
* @param name the user-specified name
* @return the transformed bean name
*/
protected String transformedBeanName(String name) {
return canonicalName(BeanFactoryUtils.transformedBeanName(name));
}
/**
* Determine the original bean name, resolving locally defined aliases to canonical names.
* @param name the user-specified name
* @return the original bean name
*/
protected String originalBeanName(String name) {
String beanName = transformedBeanName(name);
if (!name.isEmpty() && name.charAt(0) == BeanFactory.FACTORY_BEAN_PREFIX_CHAR) {
beanName = FACTORY_BEAN_PREFIX + beanName;
}
return beanName;
}
/**
* Initialize the given BeanWrapper with the custom editors registered
* with this factory. To be called for BeanWrappers that will create
* and populate bean instances.
* <p>The default implementation delegates to {@link #registerCustomEditors}.
* Can be overridden in subclasses.
* @param bw the BeanWrapper to initialize
*/
protected void initBeanWrapper(BeanWrapper bw) {
bw.setConversionService(getConversionService());
registerCustomEditors(bw);
}
/**
* Initialize the given PropertyEditorRegistry with the custom editors
* that have been registered with this BeanFactory.
* <p>To be called for BeanWrappers that will create and populate bean
* instances, and for SimpleTypeConverter used for constructor argument
* and factory method type conversion.
* @param registry the PropertyEditorRegistry to initialize
*/
protected void registerCustomEditors(PropertyEditorRegistry registry) {
if (registry instanceof PropertyEditorRegistrySupport registrySupport) {
registrySupport.useConfigValueEditors();
if (!this.defaultEditorRegistrars.isEmpty()) {
// Optimization: lazy overriding of default editors only when needed
registrySupport.setDefaultEditorRegistrar(new BeanFactoryDefaultEditorRegistrar());
}
}
else if (!this.defaultEditorRegistrars.isEmpty()) {
// Fallback: proactive overriding of default editors
applyEditorRegistrars(registry, this.defaultEditorRegistrars);
}
if (!this.propertyEditorRegistrars.isEmpty()) {
applyEditorRegistrars(registry, this.propertyEditorRegistrars);
}
if (!this.customEditors.isEmpty()) {
this.customEditors.forEach((requiredType, editorClass) ->
registry.registerCustomEditor(requiredType, BeanUtils.instantiateClass(editorClass)));
}
}
private void applyEditorRegistrars(PropertyEditorRegistry registry, Set<PropertyEditorRegistrar> registrars) {
for (PropertyEditorRegistrar registrar : registrars) {
try {
registrar.registerCustomEditors(registry);
}
catch (BeanCreationException ex) {
Throwable rootCause = ex.getMostSpecificCause();
if (rootCause instanceof BeanCurrentlyInCreationException bce) {
String bceBeanName = bce.getBeanName();
if (bceBeanName != null && isCurrentlyInCreation(bceBeanName)) {
if (logger.isDebugEnabled()) {
logger.debug("PropertyEditorRegistrar [" + registrar.getClass().getName() +
"] failed because it tried to obtain currently created bean '" +
ex.getBeanName() + "': " + ex.getMessage());
}
onSuppressedException(ex);
return;
}
}
throw ex;
}
}
}
/**
* Return a merged RootBeanDefinition, traversing the parent bean definition
* if the specified bean corresponds to a child bean definition.
* @param beanName the name of the bean to retrieve the merged definition for
* @return a (potentially merged) RootBeanDefinition for the given bean
* @throws NoSuchBeanDefinitionException if there is no bean with the given name
* @throws BeanDefinitionStoreException in case of an invalid bean definition
*/
protected RootBeanDefinition getMergedLocalBeanDefinition(String beanName) throws BeansException {
// Quick check on the concurrent map first, with minimal locking.
RootBeanDefinition mbd = this.mergedBeanDefinitions.get(beanName);
if (mbd != null && !mbd.stale) {
return mbd;
}
return getMergedBeanDefinition(beanName, getBeanDefinition(beanName));
}
/**
* Return a RootBeanDefinition for the given top-level bean, by merging with
* the parent if the given bean's definition is a child bean definition.
* @param beanName the name of the bean definition
* @param bd the original bean definition (Root/ChildBeanDefinition)
* @return a (potentially merged) RootBeanDefinition for the given bean
* @throws BeanDefinitionStoreException in case of an invalid bean definition
*/
protected RootBeanDefinition getMergedBeanDefinition(String beanName, BeanDefinition bd)
throws BeanDefinitionStoreException {
return getMergedBeanDefinition(beanName, bd, null);
}
/**
* Return a RootBeanDefinition for the given bean, by merging with the
* parent if the given bean's definition is a child bean definition.
* @param beanName the name of the bean definition
* @param bd the original bean definition (Root/ChildBeanDefinition)
* @param containingBd the containing bean definition in case of inner bean,
* or {@code null} in case of a top-level bean
* @return a (potentially merged) RootBeanDefinition for the given bean
* @throws BeanDefinitionStoreException in case of an invalid bean definition
*/
protected RootBeanDefinition getMergedBeanDefinition(
String beanName, BeanDefinition bd, @Nullable BeanDefinition containingBd)
throws BeanDefinitionStoreException {
synchronized (this.mergedBeanDefinitions) {
RootBeanDefinition mbd = null;
RootBeanDefinition previous = null;
// Check with full lock now in order to enforce the same merged instance.
if (containingBd == null) {
mbd = this.mergedBeanDefinitions.get(beanName);
}
if (mbd == null || mbd.stale) {
previous = mbd;
if (bd.getParentName() == null) {
// Use copy of given root bean definition.
if (bd instanceof RootBeanDefinition rootBeanDef) {
mbd = rootBeanDef.cloneBeanDefinition();
}
else {
mbd = new RootBeanDefinition(bd);
}
}
else {
// Child bean definition: needs to be merged with parent.
BeanDefinition pbd;
try {
String parentBeanName = transformedBeanName(bd.getParentName());
if (!beanName.equals(parentBeanName)) {
pbd = getMergedBeanDefinition(parentBeanName);
}
else {
if (getParentBeanFactory() instanceof ConfigurableBeanFactory parent) {
pbd = parent.getMergedBeanDefinition(parentBeanName);
}
else {
throw new NoSuchBeanDefinitionException(parentBeanName,
"Parent name '" + parentBeanName + "' is equal to bean name '" + beanName +
"': cannot be resolved without a ConfigurableBeanFactory parent");
}
}
}
catch (NoSuchBeanDefinitionException ex) {
throw new BeanDefinitionStoreException(bd.getResourceDescription(), beanName,
"Could not resolve parent bean definition '" + bd.getParentName() + "'", ex);
}
// Deep copy with overridden values.
mbd = new RootBeanDefinition(pbd);
mbd.overrideFrom(bd);
}
// Set default singleton scope, if not configured before.
if (!StringUtils.hasLength(mbd.getScope())) {
mbd.setScope(SCOPE_SINGLETON);
}
// A bean contained in a non-singleton bean cannot be a singleton itself.
// Let's correct this on the fly here, since this might be the result of
// parent-child merging for the outer bean, in which case the original inner bean
// definition will not have inherited the merged outer bean's singleton status.
if (containingBd != null && !containingBd.isSingleton() && mbd.isSingleton()) {
mbd.setScope(containingBd.getScope());
}
// Cache the merged bean definition for the time being
// (it might still get re-merged later on in order to pick up metadata changes)
if (containingBd == null && (isCacheBeanMetadata() || isBeanEligibleForMetadataCaching(beanName))) {
cacheMergedBeanDefinition(mbd, beanName);
}
}
if (previous != null) {
copyRelevantMergedBeanDefinitionCaches(previous, mbd);
}
return mbd;
}
}
private void copyRelevantMergedBeanDefinitionCaches(RootBeanDefinition previous, RootBeanDefinition mbd) {
if (ObjectUtils.nullSafeEquals(mbd.getBeanClassName(), previous.getBeanClassName()) &&
ObjectUtils.nullSafeEquals(mbd.getFactoryBeanName(), previous.getFactoryBeanName()) &&
ObjectUtils.nullSafeEquals(mbd.getFactoryMethodName(), previous.getFactoryMethodName())) {
ResolvableType targetType = mbd.targetType;
ResolvableType previousTargetType = previous.targetType;
if (targetType == null || targetType.equals(previousTargetType)) {
mbd.targetType = previousTargetType;
mbd.isFactoryBean = previous.isFactoryBean;
mbd.resolvedTargetType = previous.resolvedTargetType;
mbd.factoryMethodReturnType = previous.factoryMethodReturnType;
mbd.factoryMethodToIntrospect = previous.factoryMethodToIntrospect;
}
if (previous.hasMethodOverrides()) {
mbd.setMethodOverrides(new MethodOverrides(previous.getMethodOverrides()));
}
}
}
/**
* Cache the given merged bean definition.
* <p>Subclasses can override this to derive additional cached state
* from the final post-processed bean definition.
* @param mbd the merged bean definition to cache
* @param beanName the name of the bean
* @since 6.2.6
*/
protected void cacheMergedBeanDefinition(RootBeanDefinition mbd, String beanName) {
this.mergedBeanDefinitions.put(beanName, mbd);
}
/**
* Check the given merged bean definition,
* potentially throwing validation exceptions.
* @param mbd the merged bean definition to check
* @param beanName the name of the bean
* @param args the arguments for bean creation, if any
*/
protected void checkMergedBeanDefinition(RootBeanDefinition mbd, String beanName, @Nullable Object @Nullable [] args) {
if (mbd.isAbstract()) {
throw new BeanIsAbstractException(beanName);
}
}
/**
* Remove the merged bean definition for the specified bean,
* recreating it on next access.
* @param beanName the bean name to clear the merged definition for
*/
protected void clearMergedBeanDefinition(String beanName) {
RootBeanDefinition bd = this.mergedBeanDefinitions.get(beanName);
if (bd != null) {
bd.stale = true;
}
}
/**
* Clear the merged bean definition cache, removing entries for beans
* which are not considered eligible for full metadata caching yet.
* <p>Typically triggered after changes to the original bean definitions,
* for example, after applying a {@code BeanFactoryPostProcessor}. Note that metadata
* for beans which have already been created at this point will be kept around.
* @since 4.2
*/
public void clearMetadataCache() {
this.mergedBeanDefinitions.forEach((beanName, bd) -> {
if (!isBeanEligibleForMetadataCaching(beanName)) {
bd.stale = true;
}
});
}
/**
* Resolve the bean
|
must
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclUtil.java
|
{
"start": 1224,
"end": 4367
}
|
class ____ {
/**
* Given permissions and extended ACL entries, returns the full logical ACL.
*
* @param perm FsPermission containing permissions
* @param entries List<AclEntry> containing extended ACL entries
* @return List<AclEntry> containing full logical ACL
*/
public static List<AclEntry> getAclFromPermAndEntries(FsPermission perm,
List<AclEntry> entries) {
List<AclEntry> acl = Lists.newArrayListWithCapacity(entries.size() + 3);
// Owner entry implied by owner permission bits.
acl.add(new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.USER)
.setPermission(perm.getUserAction())
.build());
// All extended access ACL entries.
boolean hasAccessAcl = false;
Iterator<AclEntry> entryIter = entries.iterator();
AclEntry curEntry = null;
while (entryIter.hasNext()) {
curEntry = entryIter.next();
if (curEntry.getScope() == AclEntryScope.DEFAULT) {
break;
}
hasAccessAcl = true;
acl.add(curEntry);
}
// Mask entry implied by group permission bits, or group entry if there is
// no access ACL (only default ACL).
acl.add(new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(hasAccessAcl ? AclEntryType.MASK : AclEntryType.GROUP)
.setPermission(perm.getGroupAction())
.build());
// Other entry implied by other bits.
acl.add(new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.OTHER)
.setPermission(perm.getOtherAction())
.build());
// Default ACL entries.
if (curEntry != null && curEntry.getScope() == AclEntryScope.DEFAULT) {
acl.add(curEntry);
while (entryIter.hasNext()) {
acl.add(entryIter.next());
}
}
return acl;
}
/**
* Translates the given permission bits to the equivalent minimal ACL.
*
* @param perm FsPermission to translate
* @return List<AclEntry> containing exactly 3 entries representing the
* owner, group and other permissions
*/
public static List<AclEntry> getMinimalAcl(FsPermission perm) {
return Lists.newArrayList(
new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.USER)
.setPermission(perm.getUserAction())
.build(),
new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.GROUP)
.setPermission(perm.getGroupAction())
.build(),
new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.OTHER)
.setPermission(perm.getOtherAction())
.build());
}
/**
* Checks if the given entries represent a minimal ACL (contains exactly 3
* entries).
*
* @param entries List<AclEntry> entries to check
* @return boolean true if the entries represent a minimal ACL
*/
public static boolean isMinimalAcl(List<AclEntry> entries) {
return entries.size() == 3;
}
/**
* There is no reason to instantiate this class.
*/
private AclUtil() {
}
}
|
AclUtil
|
java
|
netty__netty
|
common/src/test/java/io/netty/util/internal/JfrEventSafeTest.java
|
{
"start": 2911,
"end": 3005
}
|
class ____ extends Event {
String foo;
}
@Enabled(false)
static final
|
MyEvent
|
java
|
spring-projects__spring-framework
|
spring-messaging/src/main/java/org/springframework/messaging/rsocket/service/MetadataArgumentResolver.java
|
{
"start": 1251,
"end": 2115
}
|
class ____ implements RSocketServiceArgumentResolver {
@Override
public boolean resolve(
@Nullable Object argument, MethodParameter parameter, RSocketRequestValues.Builder requestValues) {
int index = parameter.getParameterIndex();
Class<?>[] paramTypes = parameter.getExecutable().getParameterTypes();
if (parameter.getParameterType().equals(MimeType.class)) {
Assert.notNull(argument, "MimeType parameter is required");
Assert.state(index > 0, "MimeType parameter should have preceding metadata object parameter");
requestValues.addMimeType((MimeType) argument);
return true;
}
if (paramTypes.length > (index + 1) && MimeType.class.equals(paramTypes[index + 1])) {
Assert.notNull(argument, "MimeType parameter is required");
requestValues.addMetadata(argument);
return true;
}
return false;
}
}
|
MetadataArgumentResolver
|
java
|
spring-projects__spring-framework
|
spring-beans/src/main/java/org/springframework/beans/AbstractNestablePropertyAccessor.java
|
{
"start": 39589,
"end": 39826
}
|
class ____ {
public PropertyTokenHolder(String name) {
this.actualName = name;
this.canonicalName = name;
}
public String actualName;
public String canonicalName;
public String @Nullable [] keys;
}
}
|
PropertyTokenHolder
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/boot/jaxb/mapping/spi/JaxbAttributesContainer.java
|
{
"start": 459,
"end": 865
}
|
interface ____ extends JaxbBaseAttributesContainer {
List<JaxbOneToOneImpl> getOneToOneAttributes();
List<JaxbElementCollectionImpl> getElementCollectionAttributes();
List<JaxbOneToManyImpl> getOneToManyAttributes();
List<JaxbManyToManyImpl> getManyToManyAttributes();
List<JaxbPluralAnyMappingImpl> getPluralAnyMappingAttributes();
List<JaxbTransientImpl> getTransients();
}
|
JaxbAttributesContainer
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/factories/TestValuesRuntimeFunctions.java
|
{
"start": 43742,
"end": 45434
}
|
class ____
extends AsyncTestValueLookupFunction {
private static final long serialVersionUID = 1L;
private static Collection<RowData> emptyResult = Collections.emptyList();
/** The threshold that a real lookup can happen, otherwise no lookup at all. */
private final int lookupThreshold;
private transient Map<RowData, Integer> accessCounter;
public TestNoLookupUntilNthAccessAsyncLookupFunction(
List<Row> data,
int[] lookupIndices,
RowType producedRowType,
LookupTableSource.DataStructureConverter converter,
Optional<GeneratedProjection> generatedProjection,
int lookupThreshold) {
super(data, lookupIndices, producedRowType, converter, generatedProjection);
this.lookupThreshold = lookupThreshold;
}
@Override
public void open(FunctionContext context) throws Exception {
super.open(context);
accessCounter = new HashMap<>();
}
protected int counter(RowData key) {
int currentCnt = accessCounter.computeIfAbsent(key, cnt -> 0) + 1;
accessCounter.put(key, currentCnt);
return currentCnt;
}
@Override
public CompletableFuture<Collection<RowData>> asyncLookup(RowData keyRow) {
int currentCnt = counter(keyRow);
if (currentCnt <= lookupThreshold) {
return CompletableFuture.supplyAsync(() -> emptyResult);
}
return super.asyncLookup(keyRow);
}
}
public static
|
TestNoLookupUntilNthAccessAsyncLookupFunction
|
java
|
spring-projects__spring-framework
|
spring-expression/src/test/java/org/springframework/expression/spel/ScenariosForSpringSecurityExpressionTests.java
|
{
"start": 6915,
"end": 6987
}
|
class ____ implements PropertyAccessor {
static
|
SecurityPrincipalAccessor
|
java
|
apache__camel
|
core/camel-support/src/main/java/org/apache/camel/support/ScheduledPollConsumerHealthCheck.java
|
{
"start": 1189,
"end": 5492
}
|
class ____ implements HealthCheck {
private final HealthCheckRegistry registry;
private HealthCheck.State initialState;
private final ScheduledPollConsumer consumer;
private final String id;
private final String sanitizedBaseUri;
private final String sanitizedUri;
private boolean enabled = true;
public ScheduledPollConsumerHealthCheck(ScheduledPollConsumer consumer, String id) {
this.registry = HealthCheckRegistry.get(consumer.getEndpoint().getCamelContext());
this.initialState = registry != null ? registry.getInitialState() : State.DOWN;
this.consumer = consumer;
this.id = id;
this.sanitizedBaseUri = URISupport.sanitizeUri(consumer.getEndpoint().getEndpointBaseUri());
this.sanitizedUri = URISupport.sanitizeUri(consumer.getEndpoint().getEndpointUri());
}
@Override
public boolean isEnabled() {
return enabled;
}
@Override
public void setEnabled(boolean enabled) {
this.enabled = enabled;
}
@Override
public Result call(Map<String, Object> options) {
final HealthCheckResultBuilder builder = HealthCheckResultBuilder.on(this);
// set initial state
builder.state(initialState);
// ensure to sanitize uri, so we do not show sensitive information such as passwords
builder.detail(ENDPOINT_URI, sanitizedUri);
// what kind of check is this
HealthCheck.Kind kind;
if (isLiveness() && isReadiness()) {
// if we can do both then use kind from what type we were invoked as
kind = (Kind) options.getOrDefault(CHECK_KIND, Kind.ALL);
} else {
// we can only be either live or ready so report that
kind = isLiveness() ? Kind.LIVENESS : Kind.READINESS;
}
builder.detail(CHECK_KIND, kind);
if (!isEnabled()) {
builder.message("Disabled");
builder.detail(CHECK_ENABLED, false);
return builder.unknown().build();
}
long ec = consumer.getErrorCounter();
boolean ready = consumer.isConsumerReady();
Throwable cause = consumer.getLastError();
boolean healthy = ec == 0;
boolean readiness = kind.equals(Kind.READINESS);
if (readiness && !ready) {
// special for readiness check before first poll is done or not yet ready
// if initial state is UP or UNKNOWN then return that
// otherwise we are DOWN
boolean down = builder.state().equals(State.DOWN);
if (!down) {
return builder.build();
} else {
healthy = false;
}
}
if (healthy) {
builder.up();
} else {
builder.down();
builder.detail(FAILURE_ERROR_COUNT, ec);
String rid = consumer.getRouteId();
if (ec > 0) {
String msg = "Consumer failed polling %s times route: %s (%s)";
builder.message(String.format(msg, ec, rid, sanitizedBaseUri));
} else {
String msg = "Consumer has not yet polled route: %s (%s)";
builder.message(String.format(msg, rid, sanitizedBaseUri));
}
builder.error(cause);
// include any additional details
if (consumer.getLastErrorDetails() != null) {
builder.details(consumer.getLastErrorDetails());
}
}
return builder.build();
}
public State getInitialState() {
return initialState;
}
/**
* Used to allow special consumers to override the initial state of the health check (readiness check) during
* startup.
*
* Consumers that are internal only such as camel-scheduler uses UP as initial state because the scheduler may be
* configured to run only very in-frequently and therefore the overall health-check state would be affected and seen
* as DOWN.
*/
public void setInitialState(State initialState) {
this.initialState = initialState;
}
@Override
public String getGroup() {
return "camel";
}
@Override
public String getId() {
return id;
}
}
|
ScheduledPollConsumerHealthCheck
|
java
|
spring-projects__spring-boot
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/init/ProjectType.java
|
{
"start": 898,
"end": 1643
}
|
class ____ {
private final String id;
private final String name;
private final String action;
private final boolean defaultType;
private final Map<String, String> tags = new HashMap<>();
ProjectType(String id, String name, String action, boolean defaultType, @Nullable Map<String, String> tags) {
this.id = id;
this.name = name;
this.action = action;
this.defaultType = defaultType;
if (tags != null) {
this.tags.putAll(tags);
}
}
String getId() {
return this.id;
}
String getName() {
return this.name;
}
String getAction() {
return this.action;
}
boolean isDefaultType() {
return this.defaultType;
}
Map<String, String> getTags() {
return Collections.unmodifiableMap(this.tags);
}
}
|
ProjectType
|
java
|
quarkusio__quarkus
|
extensions/devui/deployment-spi/src/main/java/io/quarkus/devui/spi/AbstractDevUIBuildItem.java
|
{
"start": 1011,
"end": 3071
}
|
class ____ will be used to auto-detect the name
StackWalker stackWalker = StackWalker.getInstance(StackWalker.Option.RETAIN_CLASS_REFERENCE);
stackWalker.walk(frames -> frames.collect(Collectors.toList()));
Optional<StackWalker.StackFrame> stackFrame = stackWalker.walk(frames -> frames
.filter(frame -> (!frame.getDeclaringClass().getPackageName().startsWith("io.quarkus.devui.spi")
&& !frame.getDeclaringClass().getPackageName().startsWith("io.quarkus.devui.deployment")
&& !frame.getDeclaringClass().equals(MethodHandle.class)))
.findFirst());
if (stackFrame.isPresent()) {
this.callerClass = stackFrame.get().getDeclaringClass();
if (this.callerClass == null)
this.extensionIdentifier = DEV_UI;
} else {
throw new RuntimeException("Could not detect extension identifier automatically");
}
} else {
this.callerClass = null;
}
}
public ArtifactKey getArtifactKey(CurateOutcomeBuildItem curateOutcomeBuildItem) {
if (this.artifactKey == null) {
if (callerClass != null) {
Map.Entry<String, String> groupIdAndArtifactId = ArtifactInfoUtil.groupIdAndArtifactId(callerClass,
curateOutcomeBuildItem);
this.artifactKey = ArtifactKey.ga(groupIdAndArtifactId.getKey(), groupIdAndArtifactId.getValue());
}
}
return this.artifactKey;
}
public String getExtensionPathName(CurateOutcomeBuildItem curateOutcomeBuildItem) {
if (this.extensionIdentifier == null) {
ArtifactKey ak = getArtifactKey(curateOutcomeBuildItem);
this.extensionIdentifier = ak.getArtifactId();
}
return this.extensionIdentifier;
}
public boolean isInternal() {
return this.isInternal;
}
public static final String DEV_UI = "devui";
}
|
that
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/aop/framework/adapter/AdvisorAdapterRegistrationTests.java
|
{
"start": 2825,
"end": 3216
}
|
class ____ implements AdvisorAdapter, Serializable {
@Override
public boolean supportsAdvice(Advice advice) {
return (advice instanceof SimpleBeforeAdvice);
}
@Override
public MethodInterceptor getInterceptor(Advisor advisor) {
SimpleBeforeAdvice advice = (SimpleBeforeAdvice) advisor.getAdvice();
return new SimpleBeforeAdviceInterceptor(advice) ;
}
}
|
SimpleBeforeAdviceAdapter
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/query/sqm/tree/domain/SqmCorrelatedDerivedJoin.java
|
{
"start": 696,
"end": 3523
}
|
class ____<T> extends SqmDerivedJoin<T> implements SqmCorrelation<T, T>, SqmCorrelatedSingularValuedJoin<T, T> {
private final SqmCorrelatedRootJoin<T> correlatedRootJoin;
private final SqmDerivedJoin<T> correlationParent;
public SqmCorrelatedDerivedJoin(SqmDerivedJoin<T> correlationParent) {
//noinspection unchecked
super(
correlationParent.getNavigablePath(),
correlationParent.getQueryPart(),
correlationParent.isLateral(),
correlationParent.getReferencedPathSource(),
correlationParent.getExplicitAlias(),
correlationParent.getSqmJoinType(),
(SqmRoot<T>) correlationParent.getRoot()
);
this.correlatedRootJoin = SqmCorrelatedDerivedRootJoin.create( correlationParent, this );
this.correlationParent = correlationParent;
}
private SqmCorrelatedDerivedJoin(
NavigablePath navigablePath,
SqmSubQuery<T> subQuery,
boolean lateral,
SqmPathSource<T> pathSource,
@Nullable String alias,
SqmJoinType joinType,
SqmRoot<T> sqmRoot,
SqmCorrelatedRootJoin<T> correlatedRootJoin,
SqmDerivedJoin<T> correlationParent) {
super( navigablePath, subQuery, lateral, pathSource, alias, joinType, sqmRoot );
this.correlatedRootJoin = correlatedRootJoin;
this.correlationParent = correlationParent;
}
@Override
public SqmCorrelatedDerivedJoin<T> copy(SqmCopyContext context) {
final SqmCorrelatedDerivedJoin<T> existing = context.getCopy( this );
if ( existing != null ) {
return existing;
}
final SqmCorrelatedDerivedJoin<T> path = context.registerCopy(
this,
new SqmCorrelatedDerivedJoin<>(
getNavigablePath(),
getQueryPart(),
isLateral(),
getReferencedPathSource(),
getExplicitAlias(),
getSqmJoinType(),
(SqmRoot<T>) findRoot().copy( context ),
correlatedRootJoin.copy( context ),
correlationParent.copy( context )
)
);
copyTo( path, context );
return path;
}
@Override
public SqmDerivedJoin<T> getCorrelationParent() {
return correlationParent;
}
@Override
public SqmPath<T> getWrappedPath() {
return correlationParent;
}
@Override
public boolean isCorrelated() {
return true;
}
@Override
public SqmRoot<T> getCorrelatedRoot() {
return correlatedRootJoin;
}
@Override
public <X> X accept(SemanticQueryWalker<X> walker) {
return walker.visitCorrelatedDerivedJoin( this );
}
@Override
public boolean deepEquals(SqmFrom<?, ?> other) {
return super.deepEquals( other )
&& other instanceof SqmCorrelatedDerivedJoin<?> that
&& correlationParent.equals( that.correlationParent );
}
@Override
public boolean isDeepCompatible(SqmFrom<?, ?> other) {
return super.isDeepCompatible( other )
&& other instanceof SqmCorrelatedDerivedJoin<?> that
&& correlationParent.isCompatible( that.correlationParent );
}
}
|
SqmCorrelatedDerivedJoin
|
java
|
apache__camel
|
components/camel-github/src/test/java/org/apache/camel/component/github/producer/ClosePullRequestProducerTest.java
|
{
"start": 3255,
"end": 3588
}
|
class ____ implements Processor {
@Override
public void process(Exchange exchange) {
Message in = exchange.getIn();
Map<String, Object> headers = in.getHeaders();
headers.put(GitHubConstants.GITHUB_PULLREQUEST, latestPullRequestId);
}
}
}
|
ClosePullRequestProducerProcessor
|
java
|
processing__processing4
|
java/src/processing/mode/java/preproc/PreprocessIssueMessageSimplifier.java
|
{
"start": 13344,
"end": 16018
}
|
class ____
implements PreprocIssueMessageSimplifierStrategy {
private final Pattern pattern;
private final String hintTemplate;
/**
* Create a new instance of this strategy.
*
* @param newRegex The regex that should be matched in order to activate this strategy.
* @param newHintTemplate template string with a "%s" where the "offending snippet of code" can
* be inserted where the resulting rendered template can be used as an error hint for the
* user. For example, "Invalid identifier near %s" may be rendered to the user like "Syntax
* error. Hint: Invalid identifier near ,1a);".
*/
public RegexTemplateMessageSimplifierStrategy(String newRegex, String newHintTemplate) {
pattern = Pattern.compile(newRegex);
hintTemplate = newHintTemplate;
}
@Override
public Optional<PdeIssueEmitter.IssueMessageSimplification> simplify(String message) {
if (pattern.matcher(message).find()) {
String newMessage = String.format(
hintTemplate,
getOffendingArea(message)
);
return Optional.of(
new PdeIssueEmitter.IssueMessageSimplification(newMessage, getAttributeToPrior())
);
} else {
return Optional.empty();
}
}
/**
* Determine if this issue should be attributed to the prior token.
*
* @return True if should be attributed to prior token. False otherwise.
*/
public boolean getAttributeToPrior() {
return false;
}
}
/**
* Shortcut to create a regex matcher with a localized error message.
*
* @param regex The regex to match.
* @param localStr The localized string identifier to use when the regex matches.
* @return Newly created simplifier strategy.
*/
protected PreprocIssueMessageSimplifierStrategy createRegexStrategyUsingLocalStr(String regex,
String localStr) {
return new RegexTemplateMessageSimplifierStrategy(
regex,
getLocalStr(localStr)
);
}
/**
* Strategy for invalid parameter.
*/
protected PreprocIssueMessageSimplifierStrategy createErrorOnParameterStrategy() {
return createRegexStrategyUsingLocalStr(
"([a-zA-Z0-9_]+\\s*,|[a-zA-Z0-9_]+\\)|\\([^\\)]+)",
"editor.status.bad.parameter"
);
}
/**
* Strategy for missing method name.
*/
protected PreprocIssueMessageSimplifierStrategy createMethodMissingNameStrategy() {
return createRegexStrategyUsingLocalStr(
"[a-zA-Z0-9_]+\\s*\\(.*\\)\\s*\\{",
"editor.status.missing.name"
);
}
/**
* Strategy for missing
|
RegexTemplateMessageSimplifierStrategy
|
java
|
spring-projects__spring-framework
|
spring-webmvc/src/main/java/org/springframework/web/servlet/mvc/method/annotation/PathVariableMapMethodArgumentResolver.java
|
{
"start": 1736,
"end": 2769
}
|
class ____ implements HandlerMethodArgumentResolver {
@Override
public boolean supportsParameter(MethodParameter parameter) {
PathVariable ann = parameter.getParameterAnnotation(PathVariable.class);
return (ann != null && Map.class.isAssignableFrom(parameter.getParameterType()) &&
!StringUtils.hasText(ann.value()));
}
/**
* Return a Map with all URI template variables or an empty map.
*/
@Override
public Object resolveArgument(MethodParameter parameter, @Nullable ModelAndViewContainer mavContainer,
NativeWebRequest webRequest, @Nullable WebDataBinderFactory binderFactory) throws Exception {
@SuppressWarnings("unchecked")
Map<String, String> uriTemplateVars =
(Map<String, String>) webRequest.getAttribute(
HandlerMapping.URI_TEMPLATE_VARIABLES_ATTRIBUTE, RequestAttributes.SCOPE_REQUEST);
if (!CollectionUtils.isEmpty(uriTemplateVars)) {
return Collections.unmodifiableMap(uriTemplateVars);
}
else {
return Collections.emptyMap();
}
}
}
|
PathVariableMapMethodArgumentResolver
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/read/ValueSourceReaderTypeConversionTests.java
|
{
"start": 30252,
"end": 30389
}
|
interface ____ {
void check(boolean forcedRowByRow, int pageCount, int segmentCount, Map<?, ?> readersBuilt);
}
|
CheckReaders
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/NameValuePair.java
|
{
"start": 985,
"end": 1147
}
|
class ____ a name and value pair, used for specifying filters in
* {@link TimelineReader}.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public
|
holding
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/jmx/access/MBeanClientInterceptor.java
|
{
"start": 10323,
"end": 11286
}
|
interface ____ the managed resource.
*/
private void retrieveMBeanInfo(MBeanServerConnection server) throws MBeanInfoRetrievalException {
try {
MBeanInfo info = server.getMBeanInfo(this.objectName);
MBeanAttributeInfo[] attributeInfo = info.getAttributes();
this.allowedAttributes = CollectionUtils.newHashMap(attributeInfo.length);
for (MBeanAttributeInfo infoEle : attributeInfo) {
this.allowedAttributes.put(infoEle.getName(), infoEle);
}
MBeanOperationInfo[] operationInfo = info.getOperations();
this.allowedOperations = CollectionUtils.newHashMap(operationInfo.length);
for (MBeanOperationInfo infoEle : operationInfo) {
Class<?>[] paramTypes = JmxUtils.parameterInfoToTypes(infoEle.getSignature(), this.beanClassLoader);
this.allowedOperations.put(new MethodCacheKey(infoEle.getName(), paramTypes), infoEle);
}
}
catch (ClassNotFoundException ex) {
throw new MBeanInfoRetrievalException("Unable to locate
|
of
|
java
|
apache__kafka
|
streams/src/main/java/org/apache/kafka/streams/kstream/internals/KTableFilter.java
|
{
"start": 1660,
"end": 4391
}
|
class ____<KIn, VIn> implements KTableProcessorSupplier<KIn, VIn, KIn, VIn> {
private final KTableImpl<KIn, ?, VIn> parent;
private final Predicate<? super KIn, ? super VIn> predicate;
private final boolean filterNot;
private final String queryableName;
private boolean sendOldValues;
private boolean useVersionedSemantics = false;
private final StoreFactory storeFactory;
KTableFilter(final KTableImpl<KIn, ?, VIn> parent,
final Predicate<? super KIn, ? super VIn> predicate,
final boolean filterNot,
final String queryableName,
final StoreFactory storeFactory) {
this.parent = parent;
this.predicate = predicate;
this.filterNot = filterNot;
this.queryableName = queryableName;
// If upstream is already materialized, enable sending old values to avoid sending unnecessary tombstones:
this.sendOldValues = parent.enableSendingOldValues(false);
this.storeFactory = storeFactory;
}
public void setUseVersionedSemantics(final boolean useVersionedSemantics) {
this.useVersionedSemantics = useVersionedSemantics;
}
// VisibleForTesting
boolean isUseVersionedSemantics() {
return useVersionedSemantics;
}
@Override
public Processor<KIn, Change<VIn>, KIn, Change<VIn>> get() {
return new KTableFilterProcessor();
}
@Override
public Set<StoreBuilder<?>> stores() {
if (storeFactory == null) {
return null;
}
return Collections.singleton(new StoreFactory.FactoryWrappingStoreBuilder<>(storeFactory));
}
@Override
public boolean enableSendingOldValues(final boolean forceMaterialization) {
if (queryableName != null) {
sendOldValues = true;
return true;
}
if (parent.enableSendingOldValues(forceMaterialization)) {
sendOldValues = true;
}
return sendOldValues;
}
private VIn computeValue(final KIn key, final VIn value) {
VIn newValue = null;
if (value != null && (filterNot ^ predicate.test(key, value))) {
newValue = value;
}
return newValue;
}
private ValueAndTimestamp<VIn> computeValue(final KIn key, final ValueAndTimestamp<VIn> valueAndTimestamp) {
ValueAndTimestamp<VIn> newValueAndTimestamp = null;
if (valueAndTimestamp != null) {
final VIn value = valueAndTimestamp.value();
if (filterNot ^ predicate.test(key, value)) {
newValueAndTimestamp = valueAndTimestamp;
}
}
return newValueAndTimestamp;
}
private
|
KTableFilter
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancingPolicy.java
|
{
"start": 4233,
"end": 5601
}
|
class ____ extends BalancingPolicy {
static final Pool INSTANCE = new Pool();
private Pool() {}
@Override
String getName() {
return "blockpool";
}
@Override
void accumulateSpaces(DatanodeStorageReport r) {
for(StorageReport s : r.getStorageReports()) {
final StorageType t = s.getStorage().getStorageType();
// Use s.getRemaining() + s.getBlockPoolUsed() instead of
// s.getCapacity() here to avoid moving blocks towards nodes with
// little actual available space.
// The util is computed as blockPoolUsed/(remaining+blockPoolUsed),
// which means nodes with more remaining space and less blockPoolUsed
// will serve as the recipient during the balancing process.
totalCapacities.add(t, s.getRemaining() + s.getBlockPoolUsed());
totalUsedSpaces.add(t, s.getBlockPoolUsed());
}
}
@Override
Double getUtilization(DatanodeStorageReport r, final StorageType t) {
long capacity = 0L;
long blockPoolUsed = 0L;
for(StorageReport s : r.getStorageReports()) {
if (s.getStorage().getStorageType() == t) {
capacity += s.getRemaining() + s.getBlockPoolUsed();
blockPoolUsed += s.getBlockPoolUsed();
}
}
return capacity == 0L ? null : blockPoolUsed * 100.0 / capacity;
}
}
}
|
Pool
|
java
|
grpc__grpc-java
|
util/src/main/java/io/grpc/util/HealthProducerHelper.java
|
{
"start": 1639,
"end": 1845
}
|
class ____ {
* private final LoadBalancer.Helper helper;
* public HealthProducer(Helper helper) {
* this.helper = new MyHelper(HealthCheckUtil.HealthCheckHelper(helper));
* }
*
|
HealthProducerLB
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/metrics/util/InterceptingTaskMetricGroup.java
|
{
"start": 1159,
"end": 1875
}
|
class ____
extends UnregisteredMetricGroups.UnregisteredTaskMetricGroup {
private Map<String, Metric> intercepted;
/**
* Returns the registered metric for the given name, or null if it was never registered.
*
* @param name metric name
* @return registered metric for the given name, or null if it was never registered
*/
public Metric get(String name) {
return intercepted.get(name);
}
@Override
protected void addMetric(String name, Metric metric) {
if (intercepted == null) {
intercepted = new HashMap<>();
}
intercepted.put(name, metric);
super.addMetric(name, metric);
}
}
|
InterceptingTaskMetricGroup
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/api/push/PushListener.java
|
{
"start": 279,
"end": 451
}
|
interface ____ {
/**
* Handle a push message.
*
* @param message message to respond to.
*/
void onPushMessage(PushMessage message);
}
|
PushListener
|
java
|
apache__camel
|
components/camel-file/src/main/java/org/apache/camel/component/file/GenericFileEndpoint.java
|
{
"start": 2163,
"end": 2208
}
|
class ____ file endpoints
*/
public abstract
|
for
|
java
|
apache__kafka
|
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinator.java
|
{
"start": 3939,
"end": 19525
}
|
interface ____ {
/**
* Heartbeat to a Consumer Group.
*
* @param context The request context.
* @param request The ConsumerGroupHeartbeatResponse data.
*
* @return A future yielding the response.
* The error code(s) of the response are set to indicate the error(s) occurred during the execution.
*/
CompletableFuture<ConsumerGroupHeartbeatResponseData> consumerGroupHeartbeat(
AuthorizableRequestContext context,
ConsumerGroupHeartbeatRequestData request
);
/**
* Heartbeat to a Streams Group.
*
* @param context The request context.
* @param request The StreamsGroupHeartbeatResponseData data.
*
* @return A future yielding the response together with internal topics to create.
* The error code(s) of the response are set to indicate the error(s) occurred during the execution.
*/
CompletableFuture<StreamsGroupHeartbeatResult> streamsGroupHeartbeat(
AuthorizableRequestContext context,
StreamsGroupHeartbeatRequestData request
);
/**
* Heartbeat to a Share Group.
*
* @param context The request context.
* @param request The ShareGroupHeartbeatResponse data.
*
* @return A future yielding the response.
* The error code(s) of the response are set to indicate the error(s) occurred during the execution.
*/
CompletableFuture<ShareGroupHeartbeatResponseData> shareGroupHeartbeat(
AuthorizableRequestContext context,
ShareGroupHeartbeatRequestData request
);
/**
* Join a Classic Group.
*
* @param context The request context.
* @param request The JoinGroupRequest data.
* @param bufferSupplier The buffer supplier tight to the request thread.
*
* @return A future yielding the response.
* The error code(s) of the response are set to indicate the error(s) occurred during the execution.
*/
CompletableFuture<JoinGroupResponseData> joinGroup(
AuthorizableRequestContext context,
JoinGroupRequestData request,
BufferSupplier bufferSupplier
);
/**
* Sync a Classic Group.
*
* @param context The coordinator request context.
* @param request The SyncGroupRequest data.
* @param bufferSupplier The buffer supplier tight to the request thread.
*
* @return A future yielding the response.
* The error code(s) of the response are set to indicate the error(s) occurred during the execution.
*/
CompletableFuture<SyncGroupResponseData> syncGroup(
AuthorizableRequestContext context,
SyncGroupRequestData request,
BufferSupplier bufferSupplier
);
/**
* Heartbeat to a Classic Group.
*
* @param context The coordinator request context.
* @param request The HeartbeatRequest data.
*
* @return A future yielding the response.
* The error code(s) of the response are set to indicate the error(s) occurred during the execution.
*/
CompletableFuture<HeartbeatResponseData> heartbeat(
AuthorizableRequestContext context,
HeartbeatRequestData request
);
/**
* Leave a Classic Group.
*
* @param context The coordinator request context.
* @param request The LeaveGroupRequest data.
*
* @return A future yielding the response.
* The error code(s) of the response are set to indicate the error(s) occurred during the execution.
*/
CompletableFuture<LeaveGroupResponseData> leaveGroup(
AuthorizableRequestContext context,
LeaveGroupRequestData request
);
/**
* List Groups.
*
* @param context The coordinator request context.
* @param request The ListGroupRequest data.
*
* @return A future yielding the response.
* The error code(s) of the response are set to indicate the error(s) occurred during the execution.
*/
CompletableFuture<ListGroupsResponseData> listGroups(
AuthorizableRequestContext context,
ListGroupsRequestData request
);
/**
* Describe Groups.
*
* @param context The coordinator request context.
* @param groupIds The group ids.
*
* @return A future yielding the results.
* The error codes of the results are set to indicate the errors occurred during the execution.
*/
CompletableFuture<List<DescribeGroupsResponseData.DescribedGroup>> describeGroups(
AuthorizableRequestContext context,
List<String> groupIds
);
/**
* Describe consumer groups.
*
* @param context The coordinator request context.
* @param groupIds The group ids.
*
* @return A future yielding the results or an exception.
*/
CompletableFuture<List<ConsumerGroupDescribeResponseData.DescribedGroup>> consumerGroupDescribe(
AuthorizableRequestContext context,
List<String> groupIds
);
/**
* Describe streams groups.
*
* @param context The coordinator request context.
* @param groupIds The group ids.
*
* @return A future yielding the results or an exception.
*/
CompletableFuture<List<StreamsGroupDescribeResponseData.DescribedGroup>> streamsGroupDescribe(
AuthorizableRequestContext context,
List<String> groupIds
);
/**
* Describe share groups.
*
* @param context The coordinator request context.
* @param groupIds The group ids.
*
* @return A future yielding the results or an exception.
*/
CompletableFuture<List<ShareGroupDescribeResponseData.DescribedGroup>> shareGroupDescribe(
AuthorizableRequestContext context,
List<String> groupIds
);
/**
* Alter Share Group Offsets for a given group.
*
* @param context The request context.
* @param groupId The group id.
* @param requestData The AlterShareGroupOffsetsRequest data.
* @return A future yielding the results or an exception.
*/
CompletableFuture<AlterShareGroupOffsetsResponseData> alterShareGroupOffsets(
AuthorizableRequestContext context,
String groupId,
AlterShareGroupOffsetsRequestData requestData
);
/**
* Delete Groups.
*
* @param context The request context.
* @param groupIds The group ids.
* @param bufferSupplier The buffer supplier tight to the request thread.
*
* @return A future yielding the results.
* The error codes of the results are set to indicate the errors occurred during the execution.
*/
CompletableFuture<DeleteGroupsResponseData.DeletableGroupResultCollection> deleteGroups(
AuthorizableRequestContext context,
List<String> groupIds,
BufferSupplier bufferSupplier
);
/**
* Fetch offsets for a given Group.
*
* @param context The request context.
* @param request The OffsetFetchRequestGroup request.
*
* @return A future yielding the results.
* The error codes of the results are set to indicate the errors occurred during the execution.
*/
CompletableFuture<OffsetFetchResponseData.OffsetFetchResponseGroup> fetchOffsets(
AuthorizableRequestContext context,
OffsetFetchRequestData.OffsetFetchRequestGroup request,
boolean requireStable
);
/**
* Describe the Share Group Offsets for a given group.
*
* @param context The request context
* @param request The DescribeShareGroupOffsetsRequestGroup request.
*
* @return A future yielding the results.
* The error codes of the response are set to indicate the errors occurred during the execution.
*/
CompletableFuture<DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup> describeShareGroupOffsets(
AuthorizableRequestContext context,
DescribeShareGroupOffsetsRequestData.DescribeShareGroupOffsetsRequestGroup request
);
/**
* Describe all Share Group Offsets for a given group.
*
* @param context The request context
* @param request The DescribeShareGroupOffsetsRequestGroup request.
*
* @return A future yielding the results.
* The error codes of the response are set to indicate the errors occurred during the execution.
*/
CompletableFuture<DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup> describeShareGroupAllOffsets(
AuthorizableRequestContext context,
DescribeShareGroupOffsetsRequestData.DescribeShareGroupOffsetsRequestGroup request
);
/**
* Delete the Share Group Offsets for a given group.
*
* @param context The request context
* @param request The DeleteShareGroupOffsetsRequestGroup request.
*
* @return A future yielding the results.
* The error codes of the response are set to indicate the errors occurred during the execution.
*/
CompletableFuture<DeleteShareGroupOffsetsResponseData> deleteShareGroupOffsets(
AuthorizableRequestContext context,
DeleteShareGroupOffsetsRequestData request
);
/**
* Commit offsets for a given Group.
*
* @param context The request context.
* @param request The OffsetCommitRequest data.
* @param bufferSupplier The buffer supplier tight to the request thread.
*
* @return A future yielding the response.
* The error code(s) of the response are set to indicate the error(s) occurred during the execution.
*/
CompletableFuture<OffsetCommitResponseData> commitOffsets(
AuthorizableRequestContext context,
OffsetCommitRequestData request,
BufferSupplier bufferSupplier
);
/**
* Commit transactional offsets for a given Group.
*
* @param context The request context.
* @param request The TnxOffsetCommitRequest data.
* @param bufferSupplier The buffer supplier tight to the request thread.
*
* @return A future yielding the response.
* The error code(s) of the response are set to indicate the error(s) occurred during the execution.
*/
CompletableFuture<TxnOffsetCommitResponseData> commitTransactionalOffsets(
AuthorizableRequestContext context,
TxnOffsetCommitRequestData request,
BufferSupplier bufferSupplier
);
/**
* Delete offsets for a given Group.
*
* @param context The request context.
* @param request The OffsetDeleteRequest data.
* @param bufferSupplier The buffer supplier tight to the request thread.
*
* @return A future yielding the response.
* The error code(s) of the response are set to indicate the error(s) occurred during the execution.
*/
CompletableFuture<OffsetDeleteResponseData> deleteOffsets(
AuthorizableRequestContext context,
OffsetDeleteRequestData request,
BufferSupplier bufferSupplier
);
/**
* Complete a transaction. This is called when the WriteTxnMarkers API is called
* by the Transaction Coordinator in order to write the markers to the
* __consumer_offsets partitions.
*
* @param tp The topic-partition.
* @param producerId The producer id.
* @param producerEpoch The producer epoch.
* @param coordinatorEpoch The epoch of the transaction coordinator.
* @param result The transaction result.
* @param transactionVersion The transaction version (1 = TV1, 2 = TV2, etc.).
* @param timeout The operation timeout.
*
* @return A future yielding the result.
*/
CompletableFuture<Void> completeTransaction(
TopicPartition tp,
long producerId,
short producerEpoch,
int coordinatorEpoch,
TransactionResult result,
short transactionVersion,
Duration timeout
);
/**
* Return the partition index for the given Group.
*
* @param groupId The group id.
*
* @return The partition index.
*/
int partitionFor(String groupId);
/**
* Remove the provided deleted partitions offsets.
*
* @param topicPartitions The deleted partitions.
* @param bufferSupplier The buffer supplier tight to the request thread.
*/
void onPartitionsDeleted(
List<TopicPartition> topicPartitions,
BufferSupplier bufferSupplier
) throws ExecutionException, InterruptedException;
/**
* Group coordinator is now the leader for the given partition at the
* given leader epoch. It should load cached state from the partition
* and begin handling requests for groups mapped to it.
*
* @param groupMetadataPartitionIndex The partition index.
* @param groupMetadataPartitionLeaderEpoch The leader epoch of the partition.
*/
void onElection(
int groupMetadataPartitionIndex,
int groupMetadataPartitionLeaderEpoch
);
/**
* Group coordinator is no longer the leader for the given partition
* at the given leader epoch. It should unload cached state and stop
* handling requests for groups mapped to it.
*
* @param groupMetadataPartitionIndex The partition index.
* @param groupMetadataPartitionLeaderEpoch The leader epoch of the partition as an
* optional value. An empty value means that
* the topic was deleted.
*/
void onResignation(
int groupMetadataPartitionIndex,
OptionalInt groupMetadataPartitionLeaderEpoch
);
/**
* A new metadata image is available.
*
* @param newImage The new metadata image.
* @param delta The metadata delta.
*/
void onNewMetadataImage(
CoordinatorMetadataImage newImage,
CoordinatorMetadataDelta delta
);
/**
* Return the configuration properties of the internal group
* metadata topic.
*
* @return Properties of the internal topic.
*/
Properties groupMetadataTopicConfigs();
/**
* Return the configuration of the provided group.
*
* @param groupId The group id.
* @return The group config.
*/
Optional<GroupConfig> groupConfig(String groupId);
/**
* Update the configuration of the provided group.
*
* @param groupId The group id.
* @param newGroupConfig The new group config
*/
void updateGroupConfig(String groupId, Properties newGroupConfig);
/**
* Startup the group coordinator.
*
* @param groupMetadataTopicPartitionCount A supplier to get the number of partitions
* of the consumer offsets topic.
*/
void startup(IntSupplier groupMetadataTopicPartitionCount);
/**
* Shutdown the group coordinator.
*/
void shutdown();
}
|
GroupCoordinator
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterPage.java
|
{
"start": 1070,
"end": 1870
}
|
class ____ extends AppView {
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
*/
@Override protected void preHead(Page.HTML<__> html) {
commonPreHead(html);
String tid = $(TASK_ID);
String activeNav = "3";
if(tid == null || tid.isEmpty()) {
activeNav = "2";
}
set(initID(ACCORDION, "nav"), "{autoHeight:false, active:"+activeNav+"}");
set(DATATABLES_ID, "singleCounter");
set(initID(DATATABLES, "singleCounter"), counterTableInit());
setTableStyles(html, "singleCounter");
}
/**
* @return The end of a javascript map that is the jquery datatable
* configuration for the jobs table. the Jobs table is assumed to be
* rendered by the
|
SingleCounterPage
|
java
|
google__auto
|
value/src/test/java/com/google/auto/value/extension/serializable/processor/SerializableAutoValueExtensionTest.java
|
{
"start": 8136,
"end": 9708
}
|
class ____<T extends Serializable, S> {
abstract Builder<T, S> setA(T value);
abstract Builder<T, S> setOptionalB(S value);
abstract HasTypeParameters<T, S> build();
}
}
@Test
public void typeParameterizedFieldsAreSet_noEmpty() {
HasTypeParameters<String, Integer> autoValue =
HasTypeParameters.<String, Integer>builder().setA(A).setOptionalB(B).build();
assertThat(autoValue.a()).isEqualTo(A);
assertThat(autoValue.optionalB()).hasValue(B);
}
@Test
public void typeParameterizedFieldsAreSet_withEmpty() {
HasTypeParameters<String, Integer> autoValue =
HasTypeParameters.<String, Integer>builder().setA(A).build();
assertThat(autoValue.a()).isEqualTo(A);
assertThat(autoValue.optionalB()).isEmpty();
}
@Test
public void typeParameterizedFieldsAreSerializable_noEmpty() {
HasTypeParameters<String, Integer> autoValue =
HasTypeParameters.<String, Integer>builder().setA(A).setOptionalB(B).build();
HasTypeParameters<String, Integer> actualAutoValue = SerializableTester.reserialize(autoValue);
assertThat(actualAutoValue).isEqualTo(autoValue);
}
@Test
public void typeParameterizedFieldsAreSerializable_withEmpty() {
HasTypeParameters<String, Integer> autoValue =
HasTypeParameters.<String, Integer>builder().setA(A).build();
HasTypeParameters<String, Integer> actualAutoValue = SerializableTester.reserialize(autoValue);
assertThat(actualAutoValue).isEqualTo(autoValue);
}
@SerializableAutoValue
@AutoValue
abstract static
|
Builder
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/MapMultibindingValidationTest.java
|
{
"start": 3447,
"end": 7075
}
|
class ____");
subject.hasErrorContaining("provideObjectForAKey()");
subject.hasErrorContaining("provideObjectForAKeyAgain()");
});
// If there's Map<K, V> and Map<K, Provider<V>>, report only Map<K, V>.
CompilerTests.daggerCompiler(
module,
component(
"Map<String, Object> objects();",
"Map<String, Provider<Object>> objectProviders();"))
.withProcessingOptions(compilerMode.processorOptions())
.compile(
subject -> {
subject.hasErrorCount(1);
subject.hasErrorContaining(
"The same map key is bound more than once for Map<String,Object>");
});
// If there's Map<K, V> and Map<K, Producer<V>>, report only Map<K, V>.
CompilerTests.daggerCompiler(
module,
component(
"Map<String, Object> objects();",
"Producer<Map<String, Producer<Object>>> objectProducers();"))
.withProcessingOptions(compilerMode.processorOptions())
.compile(
subject -> {
subject.hasErrorCount(1);
subject.hasErrorContaining(
"The same map key is bound more than once for Map<String,Object>");
});
// If there's Map<K, Provider<V>> and Map<K, Producer<V>>, report only Map<K, Provider<V>>.
CompilerTests.daggerCompiler(
module,
component(
"Map<String, Provider<Object>> objectProviders();",
"Producer<Map<String, Producer<Object>>> objectProducers();"))
.withProcessingOptions(compilerMode.processorOptions())
.compile(
subject -> {
subject.hasErrorCount(1);
subject.hasErrorContaining(
"The same map key is bound more than once for Map<String,Provider<Object>>");
});
CompilerTests.daggerCompiler(
module,
component("Map<String, Object> objects();"))
.withProcessingOptions(compilerMode.processorOptions())
.compile(
subject -> {
subject.hasErrorCount(1);
subject.hasErrorContaining(
"The same map key is bound more than once for Map<String,Object>");
});
CompilerTests.daggerCompiler(
module,
component("Map<String, Provider<Object>> objectProviders();"))
.withProcessingOptions(compilerMode.processorOptions())
.compile(
subject -> {
subject.hasErrorCount(1);
subject.hasErrorContaining(
"The same map key is bound more than once for Map<String,Provider<Object>>");
});
CompilerTests.daggerCompiler(
module,
component("Producer<Map<String, Producer<Object>>> objectProducers();"))
.withProcessingOptions(compilerMode.processorOptions())
.compile(
subject -> {
subject.hasErrorCount(1);
subject.hasErrorContaining(
"The same map key is bound more than once for Map<String,Producer<Object>>");
});
}
@Test
public void duplicateMapKeys_WrappedMapKey() {
Source module =
CompilerTests.javaSource(
"test.MapModule",
"package test;",
"",
"import dagger.Module;",
"import dagger.Provides;",
"import dagger.multibindings.IntoMap;",
"import dagger.MapKey;",
"",
"@Module",
"abstract
|
MapModule
|
java
|
spring-projects__spring-framework
|
spring-messaging/src/main/java/org/springframework/messaging/rsocket/annotation/support/RSocketRequesterMethodArgumentResolver.java
|
{
"start": 1219,
"end": 2507
}
|
class ____ implements HandlerMethodArgumentResolver {
/**
* Message header name that is expected to have the {@link RSocket} to
* initiate new interactions to the remote peer with.
*/
public static final String RSOCKET_REQUESTER_HEADER = "rsocketRequester";
@Override
public boolean supportsParameter(MethodParameter parameter) {
Class<?> type = parameter.getParameterType();
return (RSocketRequester.class.equals(type) || RSocket.class.isAssignableFrom(type));
}
@Override
public Mono<Object> resolveArgument(MethodParameter parameter, Message<?> message) {
Object headerValue = message.getHeaders().get(RSOCKET_REQUESTER_HEADER);
Assert.notNull(headerValue, "Missing '" + RSOCKET_REQUESTER_HEADER + "'");
Assert.isInstanceOf(RSocketRequester.class, headerValue, "Expected header value of type RSocketRequester");
RSocketRequester requester = (RSocketRequester) headerValue;
Class<?> type = parameter.getParameterType();
if (RSocketRequester.class.equals(type)) {
return Mono.just(requester);
}
else if (RSocket.class.isAssignableFrom(type)) {
return Mono.justOrEmpty(requester.rsocket());
}
else {
return Mono.error(new IllegalArgumentException("Unexpected parameter type: " + parameter));
}
}
}
|
RSocketRequesterMethodArgumentResolver
|
java
|
apache__rocketmq
|
store/src/main/java/org/apache/rocketmq/store/hook/PutMessageHook.java
|
{
"start": 956,
"end": 1300
}
|
interface ____ {
/**
* Name of the hook.
*
* @return name of the hook
*/
String hookName();
/**
* Execute before put message. For example, Message verification or special message transform
* @param msg
* @return
*/
PutMessageResult executeBeforePutMessage(MessageExt msg);
}
|
PutMessageHook
|
java
|
apache__camel
|
components/camel-microprofile/camel-microprofile-config/src/main/java/org/apache/camel/component/microprofile/config/CamelMicroProfilePropertiesSource.java
|
{
"start": 1390,
"end": 2482
}
|
class ____ implements LoadablePropertiesSource {
@Override
public String getName() {
return "CamelMicroProfilePropertiesSource";
}
@Override
public String getProperty(String name) {
return ConfigProvider.getConfig().getOptionalValue(name, String.class).orElse(null);
}
@Override
public Properties loadProperties() {
return loadProperties(s -> true);
}
@Override
public Properties loadProperties(Predicate<String> filter) {
Properties answer = new OrderedProperties();
for (String name : ConfigProvider.getConfig().getPropertyNames()) {
if (filter.test(name)) {
var value = getProperty(name);
if (value != null) {
answer.put(name, getProperty(name));
}
}
}
return answer;
}
@Override
public void reloadProperties(String location) {
// noop
}
@Override
public String toString() {
return "camel-microprofile-config";
}
}
|
CamelMicroProfilePropertiesSource
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/java8stream/wildcard/ExtendsBoundSource.java
|
{
"start": 301,
"end": 816
}
|
class ____ {
private Stream<? extends Idea> elements;
private List<? extends Idea> listElements;
public Stream<? extends Idea> getElements() {
return elements;
}
public void setElements(Stream<? extends Idea> elements) {
this.elements = elements;
}
public List<? extends Idea> getListElements() {
return listElements;
}
public void setListElements(List<? extends Idea> listElements) {
this.listElements = listElements;
}
}
|
ExtendsBoundSource
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/main/java/org/apache/calcite/rel/type/RelDataTypeFactoryImpl.java
|
{
"start": 24036,
"end": 27473
}
|
class ____ extends RelDataTypeImpl {
private final Class clazz;
private final boolean nullable;
private @Nullable SqlCollation collation;
private @Nullable Charset charset;
public JavaType(Class clazz) {
this(clazz, !clazz.isPrimitive());
}
public JavaType(Class clazz, boolean nullable) {
this(clazz, nullable, null, null);
}
@SuppressWarnings("argument.type.incompatible")
public JavaType(
Class clazz,
boolean nullable,
@Nullable Charset charset,
@Nullable SqlCollation collation) {
super(fieldsOf(clazz));
this.clazz = clazz;
this.nullable = nullable;
assert (charset != null) == SqlTypeUtil.inCharFamily(this) : "Need to be a chartype";
this.charset = charset;
this.collation = collation;
computeDigest();
}
public Class getJavaClass() {
return clazz;
}
@Override
public boolean isNullable() {
return nullable;
}
@Override
public RelDataTypeFamily getFamily() {
RelDataTypeFamily family = CLASS_FAMILIES.get(clazz);
return family != null ? family : this;
}
@Override
protected void generateTypeString(StringBuilder sb, boolean withDetail) {
sb.append("JavaType(");
sb.append(clazz);
sb.append(")");
}
@Override
public @Nullable RelDataType getComponentType() {
final Class componentType = clazz.getComponentType();
if (componentType == null) {
return null;
} else {
return createJavaType(componentType);
}
}
/**
* For {@link JavaType} created with {@link Map} class, we cannot get the key type. Use ANY
* as key type.
*/
@Override
public @Nullable RelDataType getKeyType() {
if (Map.class.isAssignableFrom(clazz)) {
// Need to return a SQL type because the type inference needs SqlTypeName.
return createSqlType(SqlTypeName.ANY);
} else {
return null;
}
}
/**
* For {@link JavaType} created with {@link Map} class, we cannot get the value type. Use
* ANY as value type.
*/
@Override
public @Nullable RelDataType getValueType() {
if (Map.class.isAssignableFrom(clazz)) {
// Need to return a SQL type because the type inference needs SqlTypeName.
return createSqlType(SqlTypeName.ANY);
} else {
return null;
}
}
@Override
public @Nullable Charset getCharset() {
return this.charset;
}
@Override
public @Nullable SqlCollation getCollation() {
return this.collation;
}
@Override
public SqlTypeName getSqlTypeName() {
final SqlTypeName typeName = JavaToSqlTypeConversionRules.instance().lookup(clazz);
if (typeName == null) {
return SqlTypeName.OTHER;
}
return typeName;
}
}
/** Key to the data type cache. */
private static
|
JavaType
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/junit/jupiter/nested/TransactionalNestedTests.java
|
{
"start": 3000,
"end": 3210
}
|
class ____ {
@Test
void transactional(@Autowired DataSource dataSource) {
assertThatTransaction().isActive();
assertThat(dataSource).isNotNull();
assertCommit();
}
@Nested
|
InheritedConfigTests
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/insertordering/InsertOrderingRCATest.java
|
{
"start": 2941,
"end": 3962
}
|
class ____ implements ServiceRegistryProducer {
private final PreparedStatementSpyConnectionProvider connectionProvider = new PreparedStatementSpyConnectionProvider();
@Override
public StandardServiceRegistry produceServiceRegistry(StandardServiceRegistryBuilder builder) {
return builder.applySetting( ORDER_INSERTS, true )
.applySetting( ORDER_UPDATES, true )
.applySetting( STATEMENT_BATCH_SIZE, 50 )
.applySetting( CONNECTION_PROVIDER, connectionProvider )
.build();
}
@AfterEach
void tearDown(SessionFactoryScope factoryScope) {
factoryScope.dropData();
connectionProvider.stop();
}
@Test
public void testBatching(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( session -> {
connectionProvider.clear();
for (RCATemplate template : DefaultTemplatesVault.getDefaultRCATemplates()) {
session.persist(template);
}
});
}
@SuppressWarnings("unused")
@Entity(name = "WeightedCause")
@Table(name = "rca_weighted_cause")
public static
|
InsertOrderingRCATest
|
java
|
quarkusio__quarkus
|
extensions/resteasy-classic/resteasy/deployment/src/test/java/io/quarkus/resteasy/test/security/authzpolicy/AbstractAuthorizationPolicyTest.java
|
{
"start": 5404,
"end": 6477
}
|
class ____ @RolesAllowed("admin")
// method with @AuthorizationPolicy(policy = "permit-user")
RestAssured.given().auth().preemptive().basic("admin", "admin").get("/roles-allowed-class-authorization-policy-method")
.then().statusCode(403);
RestAssured.given().auth().preemptive().basic("user", "user").get("/roles-allowed-class-authorization-policy-method")
.then().statusCode(200).body(Matchers.equalTo("user"));
// no @AuthorizationPolicy on method, therefore require admin
RestAssured.given().auth().preemptive().basic("user", "user")
.get("/roles-allowed-class-authorization-policy-method/no-authz-policy")
.then().statusCode(403);
RestAssured.given().auth().preemptive().basic("admin", "admin")
.get("/roles-allowed-class-authorization-policy-method/no-authz-policy")
.then().statusCode(200).body(Matchers.equalTo("admin"));
}
@Test
public void testAuthorizationPolicyOnClassRolesAllowedOnMethod() {
//
|
with
|
java
|
quarkusio__quarkus
|
extensions/vertx-http/deployment/src/main/java/io/quarkus/vertx/http/deployment/RouteBuildItem.java
|
{
"start": 762,
"end": 3249
}
|
class ____ extends MultiBuildItem {
public static Builder builder() {
return new Builder();
}
private final boolean management;
private final Function<Router, Route> routeFunction;
private final Handler<RoutingContext> handler;
private final HandlerType type;
private final RouteType routeType;
private final RouteType routerType;
private final NotFoundPageDisplayableEndpointBuildItem notFoundPageDisplayableEndpoint;
private final String absolutePath;
private final ConfiguredPathInfo configuredPathInfo;
RouteBuildItem(Builder builder, RouteType routeType, RouteType routerType, boolean management) {
this.routeFunction = builder.routeFunction;
this.handler = builder.handler;
this.management = management;
this.type = builder.type;
this.routeType = routeType;
this.routerType = routerType;
this.notFoundPageDisplayableEndpoint = builder.getNotFoundEndpoint();
this.configuredPathInfo = builder.getRouteConfigInfo();
this.absolutePath = builder.absolutePath;
}
public Handler<RoutingContext> getHandler() {
return handler;
}
public HandlerType getType() {
return type;
}
public Function<Router, Route> getRouteFunction() {
return routeFunction;
}
public RouteType getRouteType() {
return routeType;
}
public RouteType getRouterType() {
return routerType;
}
public boolean isRouterFramework() {
return routerType.equals(RouteType.FRAMEWORK_ROUTE);
}
public boolean isRouterApplication() {
return routerType.equals(APPLICATION_ROUTE);
}
public boolean isRouterAbsolute() {
return routerType.equals(RouteType.ABSOLUTE_ROUTE);
}
public NotFoundPageDisplayableEndpointBuildItem getNotFoundPageDisplayableEndpoint() {
return notFoundPageDisplayableEndpoint;
}
public String getAbsolutePath() {
return absolutePath;
}
public ConfiguredPathInfo getConfiguredPathInfo() {
return configuredPathInfo;
}
/**
* @return {@code true} if the route is exposing a management endpoint.
* It matters when using a different interface/port for the management endpoints, as these routes will only
* be accessible from that different interface/port.
*/
public boolean isManagement() {
return management;
}
public
|
RouteBuildItem
|
java
|
quarkusio__quarkus
|
extensions/websockets-next/deployment/src/test/java/io/quarkus/websockets/next/test/upgrade/RequestScopedHttpUpgradeCheckValidationFailureTest.java
|
{
"start": 1368,
"end": 1589
}
|
class ____ implements HttpUpgradeCheck {
@Override
public Uni<CheckResult> perform(HttpUpgradeContext context) {
return CheckResult.permitUpgrade();
}
}
}
|
RequestScopedHttpUpgradeCheck
|
java
|
apache__dubbo
|
dubbo-rpc/dubbo-rpc-triple/src/test/java/org/apache/dubbo/rpc/protocol/tri/SingleProtobufUtilsTest.java
|
{
"start": 1659,
"end": 3746
}
|
class ____ {
@Test
void test() throws IOException {
Assertions.assertFalse(SingleProtobufUtils.isSupported(SingleProtobufUtilsTest.class));
Assertions.assertTrue(SingleProtobufUtils.isSupported(Empty.class));
Assertions.assertTrue(SingleProtobufUtils.isSupported(BoolValue.class));
Assertions.assertTrue(SingleProtobufUtils.isSupported(Int32Value.class));
Assertions.assertTrue(SingleProtobufUtils.isSupported(Int64Value.class));
Assertions.assertTrue(SingleProtobufUtils.isSupported(FloatValue.class));
Assertions.assertTrue(SingleProtobufUtils.isSupported(DoubleValue.class));
Assertions.assertTrue(SingleProtobufUtils.isSupported(BytesValue.class));
Assertions.assertTrue(SingleProtobufUtils.isSupported(StringValue.class));
Assertions.assertTrue(SingleProtobufUtils.isSupported(EnumValue.class));
Assertions.assertTrue(SingleProtobufUtils.isSupported(ListValue.class));
Assertions.assertTrue(SingleProtobufUtils.isSupported(HealthCheckResponse.class));
Assertions.assertTrue(SingleProtobufUtils.isSupported(HealthCheckRequest.class));
Message message = SingleProtobufUtils.defaultInst(HealthCheckRequest.class);
Assertions.assertNotNull(message);
Parser<HealthCheckRequest> parser = SingleProtobufUtils.getParser(HealthCheckRequest.class);
Assertions.assertNotNull(parser);
TripleWrapper.TripleRequestWrapper requestWrapper = TripleWrapper.TripleRequestWrapper.newBuilder()
.setSerializeType("hessian4")
.build();
ByteArrayOutputStream bos = new ByteArrayOutputStream();
SingleProtobufUtils.serialize(requestWrapper, bos);
ByteArrayInputStream bis = new ByteArrayInputStream(bos.toByteArray());
TripleWrapper.TripleRequestWrapper tripleRequestWrapper =
SingleProtobufUtils.deserialize(bis, TripleWrapper.TripleRequestWrapper.class);
Assertions.assertEquals(tripleRequestWrapper.getSerializeType(), "hessian4");
}
}
|
SingleProtobufUtilsTest
|
java
|
elastic__elasticsearch
|
modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java
|
{
"start": 1685,
"end": 17850
}
|
class ____ extends ESIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return List.of(DataStreamsPlugin.class, MockTransportService.TestPlugin.class);
}
public void testGetLifecycle() throws Exception {
DataStreamLifecycle.Template lifecycle = randomDataLifecycleTemplate();
putComposableIndexTemplate("id1", null, List.of("with-lifecycle*"), null, null, lifecycle);
putComposableIndexTemplate("id2", null, List.of("without-lifecycle*"), null, null, null);
{
String dataStreamName = "with-lifecycle-1";
CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(
TEST_REQUEST_TIMEOUT,
TEST_REQUEST_TIMEOUT,
dataStreamName
);
client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get();
}
{
String dataStreamName = "with-lifecycle-2";
CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(
TEST_REQUEST_TIMEOUT,
TEST_REQUEST_TIMEOUT,
dataStreamName
);
client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get();
}
{
String dataStreamName = "without-lifecycle";
CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(
TEST_REQUEST_TIMEOUT,
TEST_REQUEST_TIMEOUT,
dataStreamName
);
client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get();
}
// Test retrieving all lifecycles
{
GetDataStreamLifecycleAction.Request getDataLifecycleRequest = new GetDataStreamLifecycleAction.Request(
TEST_REQUEST_TIMEOUT,
new String[] { "*" }
);
GetDataStreamLifecycleAction.Response response = client().execute(
GetDataStreamLifecycleAction.INSTANCE,
getDataLifecycleRequest
).get();
assertThat(response.getDataStreamLifecycles().size(), equalTo(3));
assertThat(response.getDataStreamLifecycles().get(0).dataStreamName(), equalTo("with-lifecycle-1"));
assertThat(response.getDataStreamLifecycles().get(0).lifecycle(), equalTo(lifecycle.toDataStreamLifecycle()));
assertThat(response.getDataStreamLifecycles().get(1).dataStreamName(), equalTo("with-lifecycle-2"));
assertThat(response.getDataStreamLifecycles().get(1).lifecycle(), equalTo(lifecycle.toDataStreamLifecycle()));
assertThat(response.getDataStreamLifecycles().get(2).dataStreamName(), equalTo("without-lifecycle"));
assertThat(response.getDataStreamLifecycles().get(2).lifecycle(), is(nullValue()));
assertThat(response.getRolloverConfiguration(), nullValue());
}
// Test retrieving all lifecycles prefixed wildcard
{
GetDataStreamLifecycleAction.Request getDataLifecycleRequest = new GetDataStreamLifecycleAction.Request(
TEST_REQUEST_TIMEOUT,
new String[] { "with-lifecycle*" }
);
GetDataStreamLifecycleAction.Response response = client().execute(
GetDataStreamLifecycleAction.INSTANCE,
getDataLifecycleRequest
).get();
assertThat(response.getDataStreamLifecycles().size(), equalTo(2));
assertThat(response.getDataStreamLifecycles().get(0).dataStreamName(), equalTo("with-lifecycle-1"));
assertThat(response.getDataStreamLifecycles().get(0).lifecycle(), equalTo(lifecycle.toDataStreamLifecycle()));
assertThat(response.getDataStreamLifecycles().get(1).dataStreamName(), equalTo("with-lifecycle-2"));
assertThat(response.getDataStreamLifecycles().get(1).lifecycle(), is(lifecycle.toDataStreamLifecycle()));
assertThat(response.getRolloverConfiguration(), nullValue());
}
// Test retrieving concrete data streams
{
GetDataStreamLifecycleAction.Request getDataLifecycleRequest = new GetDataStreamLifecycleAction.Request(
TEST_REQUEST_TIMEOUT,
new String[] { "with-lifecycle-1", "with-lifecycle-2" }
);
GetDataStreamLifecycleAction.Response response = client().execute(
GetDataStreamLifecycleAction.INSTANCE,
getDataLifecycleRequest
).get();
assertThat(response.getDataStreamLifecycles().size(), equalTo(2));
assertThat(response.getDataStreamLifecycles().get(0).dataStreamName(), equalTo("with-lifecycle-1"));
assertThat(response.getDataStreamLifecycles().get(0).lifecycle(), equalTo(lifecycle.toDataStreamLifecycle()));
assertThat(response.getRolloverConfiguration(), nullValue());
}
// Test include defaults
GetDataStreamLifecycleAction.Request getDataLifecycleRequestWithDefaults = new GetDataStreamLifecycleAction.Request(
TEST_REQUEST_TIMEOUT,
new String[] { "*" }
).includeDefaults(true);
GetDataStreamLifecycleAction.Response responseWithRollover = client().execute(
GetDataStreamLifecycleAction.INSTANCE,
getDataLifecycleRequestWithDefaults
).get();
assertThat(responseWithRollover.getDataStreamLifecycles().size(), equalTo(3));
assertThat(responseWithRollover.getDataStreamLifecycles().get(0).dataStreamName(), equalTo("with-lifecycle-1"));
assertThat(responseWithRollover.getDataStreamLifecycles().get(0).lifecycle(), equalTo(lifecycle.toDataStreamLifecycle()));
assertThat(responseWithRollover.getDataStreamLifecycles().get(1).dataStreamName(), equalTo("with-lifecycle-2"));
assertThat(responseWithRollover.getDataStreamLifecycles().get(1).lifecycle(), equalTo(lifecycle.toDataStreamLifecycle()));
assertThat(responseWithRollover.getDataStreamLifecycles().get(2).dataStreamName(), equalTo("without-lifecycle"));
assertThat(responseWithRollover.getDataStreamLifecycles().get(2).lifecycle(), is(nullValue()));
assertThat(responseWithRollover.getRolloverConfiguration(), notNullValue());
}
public void testPutLifecycle() throws Exception {
putComposableIndexTemplate("id1", null, List.of("my-data-stream*"), null, null, null);
// Create index without a lifecycle
String dataStreamName = "my-data-stream";
CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(
TEST_REQUEST_TIMEOUT,
TEST_REQUEST_TIMEOUT,
dataStreamName
);
client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get();
{
GetDataStreamLifecycleAction.Request getDataLifecycleRequest = new GetDataStreamLifecycleAction.Request(
TEST_REQUEST_TIMEOUT,
new String[] { "my-data-stream" }
);
GetDataStreamLifecycleAction.Response response = client().execute(
GetDataStreamLifecycleAction.INSTANCE,
getDataLifecycleRequest
).get();
assertThat(response.getDataStreamLifecycles().isEmpty(), equalTo(false));
GetDataStreamLifecycleAction.Response.DataStreamLifecycle dataStreamLifecycle = response.getDataStreamLifecycles().get(0);
assertThat(dataStreamLifecycle.dataStreamName(), is(dataStreamName));
assertThat(dataStreamLifecycle.lifecycle(), is(nullValue()));
}
// Set lifecycle
{
TimeValue dataRetention = randomBoolean() ? null : TimeValue.timeValueMillis(randomMillisUpToYear9999());
PutDataStreamLifecycleAction.Request putDataLifecycleRequest = new PutDataStreamLifecycleAction.Request(
TEST_REQUEST_TIMEOUT,
TEST_REQUEST_TIMEOUT,
new String[] { "*" },
dataRetention
);
assertThat(
client().execute(PutDataStreamLifecycleAction.INSTANCE, putDataLifecycleRequest).get().isAcknowledged(),
equalTo(true)
);
GetDataStreamLifecycleAction.Request getDataLifecycleRequest = new GetDataStreamLifecycleAction.Request(
TEST_REQUEST_TIMEOUT,
new String[] { "my-data-stream" }
);
GetDataStreamLifecycleAction.Response response = client().execute(
GetDataStreamLifecycleAction.INSTANCE,
getDataLifecycleRequest
).get();
assertThat(response.getDataStreamLifecycles().size(), equalTo(1));
assertThat(response.getDataStreamLifecycles().get(0).dataStreamName(), equalTo("my-data-stream"));
assertThat(response.getDataStreamLifecycles().get(0).lifecycle().dataRetention(), equalTo(dataRetention));
assertThat(response.getDataStreamLifecycles().get(0).lifecycle().enabled(), equalTo(true));
}
// Disable the lifecycle
{
TimeValue dataRetention = randomBoolean() ? null : TimeValue.timeValueMillis(randomMillisUpToYear9999());
PutDataStreamLifecycleAction.Request putDataLifecycleRequest = new PutDataStreamLifecycleAction.Request(
TEST_REQUEST_TIMEOUT,
TEST_REQUEST_TIMEOUT,
new String[] { "*" },
dataRetention,
false
);
assertThat(
client().execute(PutDataStreamLifecycleAction.INSTANCE, putDataLifecycleRequest).get().isAcknowledged(),
equalTo(true)
);
GetDataStreamLifecycleAction.Request getDataLifecycleRequest = new GetDataStreamLifecycleAction.Request(
TEST_REQUEST_TIMEOUT,
new String[] { "my-data-stream" }
);
GetDataStreamLifecycleAction.Response response = client().execute(
GetDataStreamLifecycleAction.INSTANCE,
getDataLifecycleRequest
).get();
assertThat(response.getDataStreamLifecycles().size(), equalTo(1));
assertThat(response.getDataStreamLifecycles().get(0).dataStreamName(), equalTo("my-data-stream"));
assertThat(response.getDataStreamLifecycles().get(0).lifecycle().dataRetention(), equalTo(dataRetention));
assertThat(response.getDataStreamLifecycles().get(0).lifecycle().enabled(), equalTo(false));
}
}
public void testDeleteLifecycle() throws Exception {
DataStreamLifecycle.Template lifecycle = DataStreamLifecycle.dataLifecycleBuilder()
.dataRetention(randomTimeValueGreaterThan(TimeValue.timeValueSeconds(10)))
.buildTemplate();
putComposableIndexTemplate("id1", null, List.of("with-lifecycle*"), null, null, lifecycle);
putComposableIndexTemplate("id2", null, List.of("without-lifecycle*"), null, null, null);
{
String dataStreamName = "with-lifecycle-1";
CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(
TEST_REQUEST_TIMEOUT,
TEST_REQUEST_TIMEOUT,
dataStreamName
);
client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get();
}
{
String dataStreamName = "with-lifecycle-2";
CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(
TEST_REQUEST_TIMEOUT,
TEST_REQUEST_TIMEOUT,
dataStreamName
);
client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get();
}
{
String dataStreamName = "with-lifecycle-3";
CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(
TEST_REQUEST_TIMEOUT,
TEST_REQUEST_TIMEOUT,
dataStreamName
);
client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get();
}
// Verify that we have 3 data streams with lifecycles
{
GetDataStreamLifecycleAction.Request getDataLifecycleRequest = new GetDataStreamLifecycleAction.Request(
TEST_REQUEST_TIMEOUT,
new String[] { "with-lifecycle*" }
);
GetDataStreamLifecycleAction.Response response = client().execute(
GetDataStreamLifecycleAction.INSTANCE,
getDataLifecycleRequest
).get();
assertThat(response.getDataStreamLifecycles().size(), equalTo(3));
}
// Remove lifecycle from concrete data stream
{
DeleteDataStreamLifecycleAction.Request deleteDataLifecycleRequest = new DeleteDataStreamLifecycleAction.Request(
TEST_REQUEST_TIMEOUT,
AcknowledgedRequest.DEFAULT_ACK_TIMEOUT,
new String[] { "with-lifecycle-1" }
);
assertThat(
client().execute(DeleteDataStreamLifecycleAction.INSTANCE, deleteDataLifecycleRequest).get().isAcknowledged(),
equalTo(true)
);
GetDataStreamLifecycleAction.Request getDataLifecycleRequest = new GetDataStreamLifecycleAction.Request(
TEST_REQUEST_TIMEOUT,
new String[] { "with-lifecycle*" }
);
GetDataStreamLifecycleAction.Response response = client().execute(
GetDataStreamLifecycleAction.INSTANCE,
getDataLifecycleRequest
).get();
assertThat(response.getDataStreamLifecycles().size(), equalTo(3));
GetDataStreamLifecycleAction.Response.DataStreamLifecycle dataStreamLifecycle = response.getDataStreamLifecycles().get(0);
assertThat(dataStreamLifecycle.dataStreamName(), is("with-lifecycle-1"));
assertThat(dataStreamLifecycle.lifecycle(), is(nullValue()));
assertThat(response.getDataStreamLifecycles().get(1).dataStreamName(), equalTo("with-lifecycle-2"));
assertThat(response.getDataStreamLifecycles().get(2).dataStreamName(), equalTo("with-lifecycle-3"));
}
// Remove lifecycle from all data streams
{
DeleteDataStreamLifecycleAction.Request deleteDataLifecycleRequest = new DeleteDataStreamLifecycleAction.Request(
TEST_REQUEST_TIMEOUT,
AcknowledgedRequest.DEFAULT_ACK_TIMEOUT,
new String[] { "*" }
);
assertThat(
client().execute(DeleteDataStreamLifecycleAction.INSTANCE, deleteDataLifecycleRequest).get().isAcknowledged(),
equalTo(true)
);
GetDataStreamLifecycleAction.Request getDataLifecycleRequest = new GetDataStreamLifecycleAction.Request(
TEST_REQUEST_TIMEOUT,
new String[] { "with-lifecycle*" }
);
GetDataStreamLifecycleAction.Response response = client().execute(
GetDataStreamLifecycleAction.INSTANCE,
getDataLifecycleRequest
).get();
assertThat(response.getDataStreamLifecycles().size(), equalTo(3));
assertThat(response.getDataStreamLifecycles().get(0).dataStreamName(), equalTo("with-lifecycle-1"));
assertThat(response.getDataStreamLifecycles().get(1).dataStreamName(), equalTo("with-lifecycle-2"));
assertThat(response.getDataStreamLifecycles().get(2).dataStreamName(), equalTo("with-lifecycle-3"));
for (GetDataStreamLifecycleAction.Response.DataStreamLifecycle dataStreamLifecycle : response.getDataStreamLifecycles()) {
assertThat(dataStreamLifecycle.lifecycle(), nullValue());
}
}
}
}
|
CrudDataStreamLifecycleIT
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/compliance/AnnotationConverterAndEmbeddableTest2.java
|
{
"start": 1027,
"end": 2110
}
|
class ____ {
private static final String EXPECTED_ERROR_MESSAGE = "Exception was thrown by IntegerToStringConverter";
@Test
public void testConverterIsCorrectlyApplied(EntityManagerFactoryScope scope) {
SQLStatementInspector sqlStatementInspector = (SQLStatementInspector) scope.getStatementInspector();
sqlStatementInspector.clear();
scope.inTransaction(
entityManager -> {
Person b = new Person(
1,
"and n.",
new Address( "Localita S. Egidio n. 5", "Gradoli" )
);
entityManager.persist( b );
}
);
List<String> sqlQueries = sqlStatementInspector.getSqlQueries();
assertThat( sqlQueries.size() ).isEqualTo( 1 );
sqlStatementInspector.assertIsInsert( 0 );
String query = sqlQueries.get( 0 );
assertThat( query.contains( "Localita S. Egidio # 5" ) );
assertThat( query.contains( "and #" ) );
}
@Entity(name = "Person")
@Converts(
value = {
@Convert(attributeName = "name", converter = AnnotationConverterAndEmbeddableTest2.StreetConverter.class),
}
)
public static
|
AnnotationConverterAndEmbeddableTest2
|
java
|
mybatis__mybatis-3
|
src/test/java/org/apache/ibatis/submitted/annotion_many_one_add_resultmapid/OneManyResultMapTest.java
|
{
"start": 1203,
"end": 3357
}
|
class ____ {
private static SqlSessionFactory sqlSessionFactory;
@BeforeAll
static void setUp() throws Exception {
// create an SqlSessionFactory
try (Reader reader = Resources
.getResourceAsReader("org/apache/ibatis/submitted/annotion_many_one_add_resultmapid/SqlMapConfig.xml")) {
sqlSessionFactory = new SqlSessionFactoryBuilder().build(reader);
}
// populate in-memory database
BaseDataTest.runScript(sqlSessionFactory.getConfiguration().getEnvironment().getDataSource(),
"org/apache/ibatis/submitted/annotion_many_one_add_resultmapid/CreateDB.sql");
}
@Test
void shouldUseResultMapWithMany() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
UserDao mapper = sqlSession.getMapper(UserDao.class);
List<User> users = mapper.findAll();
assertNotNull(users);
assertEquals(4, users.size());
assertEquals(2, users.get(0).getRoles().size());
}
}
@Test
void shouldUseResultMapInXmlWithMany() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
UserDao mapper = sqlSession.getMapper(UserDao.class);
List<User> users = mapper.findAll2();
assertNotNull(users);
assertEquals(4, users.size());
assertEquals(2, users.get(0).getRoles().size());
}
}
@Test
void shouldUseResultMapWithOne() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
UserDao mapper = sqlSession.getMapper(UserDao.class);
List<User> users = mapper.findAll3();
assertNotNull(users);
assertEquals(2, users.size());
assertNotNull(users.get(0).getRole());
assertEquals("teacher", users.get(0).getRole().getRoleName());
}
}
@Test
void shouldResolveResultMapInTheSameNamespace() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
UserDao mapper = sqlSession.getMapper(UserDao.class);
User headmaster = mapper.findHeadmaster();
assertNotNull(headmaster);
assertEquals(3, headmaster.getTeachers().size());
assertEquals("Doug Lea", headmaster.getTeachers().get(0).getUsername());
}
}
}
|
OneManyResultMapTest
|
java
|
apache__camel
|
components/camel-google/camel-google-calendar/src/generated/java/org/apache/camel/component/google/calendar/internal/CalendarColorsApiMethod.java
|
{
"start": 673,
"end": 1560
}
|
enum ____ implements ApiMethod {
GET(
com.google.api.services.calendar.Calendar.Colors.Get.class,
"get");
private final ApiMethod apiMethod;
CalendarColorsApiMethod(Class<?> resultType, String name, ApiMethodArg... args) {
this.apiMethod = new ApiMethodImpl(Colors.class, resultType, name, args);
}
@Override
public String getName() { return apiMethod.getName(); }
@Override
public Class<?> getResultType() { return apiMethod.getResultType(); }
@Override
public List<String> getArgNames() { return apiMethod.getArgNames(); }
@Override
public List<String> getSetterArgNames() { return apiMethod.getSetterArgNames(); }
@Override
public List<Class<?>> getArgTypes() { return apiMethod.getArgTypes(); }
@Override
public Method getMethod() { return apiMethod.getMethod(); }
}
|
CalendarColorsApiMethod
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/boot/beanvalidation/GroupsPerOperation.java
|
{
"start": 3715,
"end": 4787
}
|
enum ____ {
INSERT( "persist", JPA_GROUP_PREFIX + "pre-persist", JAKARTA_JPA_GROUP_PREFIX + "pre-persist" ),
UPDATE( "update", JPA_GROUP_PREFIX + "pre-update", JAKARTA_JPA_GROUP_PREFIX + "pre-update" ),
DELETE( "remove", JPA_GROUP_PREFIX + "pre-remove", JAKARTA_JPA_GROUP_PREFIX + "pre-remove" ),
UPSERT( "upsert", JPA_GROUP_PREFIX + "pre-upsert", JAKARTA_JPA_GROUP_PREFIX + "pre-upsert" ),
DDL( "ddl", HIBERNATE_GROUP_PREFIX + "ddl", HIBERNATE_GROUP_PREFIX + "ddl" );
private final String exposedName;
private final String groupPropertyName;
private final String jakartaGroupPropertyName;
Operation(String exposedName, String groupProperty, String jakartaGroupPropertyName) {
this.exposedName = exposedName;
this.groupPropertyName = groupProperty;
this.jakartaGroupPropertyName = jakartaGroupPropertyName;
}
public String getName() {
return exposedName;
}
public String getGroupPropertyName() {
return groupPropertyName;
}
public String getJakartaGroupPropertyName() {
return jakartaGroupPropertyName;
}
}
}
|
Operation
|
java
|
spring-projects__spring-data-jpa
|
spring-data-jpa/src/main/java/org/springframework/data/jpa/repository/query/JpaQueryLookupStrategy.java
|
{
"start": 11301,
"end": 11617
}
|
class ____ implements RepositoryQuery {
@Override
public Object execute(Object[] parameters) {
throw new IllegalStateException("NoQuery should not be executed!");
}
@Override
public QueryMethod getQueryMethod() {
throw new IllegalStateException("NoQuery does not have a QueryMethod!");
}
}
}
|
NoQuery
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/rx/RedissonSetRx.java
|
{
"start": 1066,
"end": 3271
}
|
class ____<V> {
private final RSet<V> instance;
private final RedissonRxClient redisson;
public RedissonSetRx(RSet<V> instance, RedissonRxClient redisson) {
this.instance = instance;
this.redisson = redisson;
}
public Single<Boolean> addAll(Publisher<? extends V> c) {
return new PublisherAdder<Object>() {
@Override
public RFuture<Boolean> add(Object e) {
return instance.addAsync((V) e);
}
}.addAll(c);
}
public Flowable<V> iterator(int count) {
return iterator(null, count);
}
public Flowable<V> iterator(String pattern) {
return iterator(pattern, 10);
}
public Flowable<V> iterator(String pattern, int count) {
return new SetRxIterator<V>() {
@Override
protected RFuture<ScanResult<Object>> scanIterator(RedisClient client, String nextIterPos) {
return ((ScanIterator) instance).scanIteratorAsync(((RedissonObject) instance).getRawName(), client, nextIterPos, pattern, count);
}
}.create();
}
public Publisher<V> iterator() {
return iterator(null, 10);
}
public RPermitExpirableSemaphoreRx getPermitExpirableSemaphore(V value) {
String name = ((RedissonObject) instance).getLockByValue(value, "permitexpirablesemaphore");
return redisson.getPermitExpirableSemaphore(name);
}
public RSemaphoreRx getSemaphore(V value) {
String name = ((RedissonObject) instance).getLockByValue(value, "semaphore");
return redisson.getSemaphore(name);
}
public RLockRx getFairLock(V value) {
String name = ((RedissonObject) instance).getLockByValue(value, "fairlock");
return redisson.getFairLock(name);
}
public RReadWriteLockRx getReadWriteLock(V value) {
String name = ((RedissonObject) instance).getLockByValue(value, "rw_lock");
return redisson.getReadWriteLock(name);
}
public RLockRx getLock(V value) {
String name = ((RedissonObject) instance).getLockByValue(value, "lock");
return redisson.getLock(name);
}
}
|
RedissonSetRx
|
java
|
playframework__playframework
|
core/play/src/main/java/play/libs/crypto/DefaultCookieSigner.java
|
{
"start": 320,
"end": 1210
}
|
class ____ implements CookieSigner {
private final play.api.libs.crypto.CookieSigner signer;
@Inject
public DefaultCookieSigner(play.api.libs.crypto.CookieSigner signer) {
this.signer = signer;
}
/**
* Signs the given String using the application's secret key.
*
* @param message The message to sign.
* @return A hexadecimal encoded signature.
*/
@Override
public String sign(String message) {
return signer.sign(message);
}
/**
* Signs the given String using the given key. <br>
*
* @param message The message to sign.
* @param key The private key to sign with.
* @return A hexadecimal encoded signature.
*/
@Override
public String sign(String message, byte[] key) {
return signer.sign(message, key);
}
@Override
public play.api.libs.crypto.CookieSigner asScala() {
return this.signer;
}
}
|
DefaultCookieSigner
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/main/java/io/vertx/core/http/impl/HttpClientConnection.java
|
{
"start": 935,
"end": 3037
}
|
interface ____ extends HttpConnection {
Logger log = LoggerFactory.getLogger(HttpClientConnection.class);
Handler<Void> DEFAULT_EVICTION_HANDLER = v -> {
log.warn("Connection evicted");
};
Handler<Long> DEFAULT_CONCURRENCY_CHANGE_HANDLER = concurrency -> {};
MultiMap newHttpRequestHeaders();
/**
* @return the number of active request/response (streams)
*/
long activeStreams();
/**
* @return the max number of active streams this connection can handle concurrently
*/
long concurrency();
/**
* @return the connection authority
*/
HostAndPort authority();
/**
* Set a {@code handler} called when the connection should be evicted from a pool.
*
* @param handler the handler
* @return a reference to this, so the API can be used fluently
*/
HttpClientConnection evictionHandler(Handler<Void> handler);
/**
* Set a {@code handler} called when the connection receives invalid messages.
*
* @param handler the handler
* @return a reference to this, so the API can be used fluently
*/
HttpClientConnection invalidMessageHandler(Handler<Object> handler);
/**
* Set a {@code handler} called when the connection concurrency changes.
* The handler is called with the new concurrency.
*
* @param handler the handler
* @return a reference to this, so the API can be used fluently
*/
HttpClientConnection concurrencyChangeHandler(Handler<Long> handler);
/**
* @return the {@link ChannelHandlerContext} of the handler managing the connection
*/
ChannelHandlerContext channelHandlerContext();
/**
* Create an HTTP stream.
*
* @param context the stream context
* @return a future notified with the created stream
*/
Future<HttpClientStream> createStream(ContextInternal context);
/**
* @return the connection context
*/
ContextInternal context();
boolean isValid();
Object metric();
/**
* @return the timestamp of the last received response - this is used for LIFO connection pooling
*/
long lastResponseReceivedTimestamp();
}
|
HttpClientConnection
|
java
|
quarkusio__quarkus
|
extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/typesafe/Monks.java
|
{
"start": 135,
"end": 200
}
|
class ____ {
@CheckedTemplate(basePath = "foo")
static
|
Monks
|
java
|
apache__flink
|
flink-datastream/src/test/java/org/apache/flink/datastream/impl/operators/TwoInputBroadcastProcessOperatorTest.java
|
{
"start": 1571,
"end": 6503
}
|
class ____ {
@Test
void testProcessRecord() throws Exception {
List<Long> fromNonBroadcastInput = new ArrayList<>();
List<Long> fromBroadcastInput = new ArrayList<>();
TwoInputBroadcastProcessOperator<Integer, Long, Long> processOperator =
new TwoInputBroadcastProcessOperator<>(
new TwoInputBroadcastStreamProcessFunction<Integer, Long, Long>() {
@Override
public void processRecordFromNonBroadcastInput(
Integer record,
Collector<Long> output,
PartitionedContext<Long> ctx) {
fromNonBroadcastInput.add(Long.valueOf(record));
}
@Override
public void processRecordFromBroadcastInput(
Long record, NonPartitionedContext<Long> ctx) {
fromBroadcastInput.add(record);
}
});
try (TwoInputStreamOperatorTestHarness<Integer, Long, Long> testHarness =
new TwoInputStreamOperatorTestHarness<>(processOperator)) {
testHarness.open();
testHarness.processElement1(new StreamRecord<>(1));
testHarness.processElement2(new StreamRecord<>(2L));
testHarness.processElement1(new StreamRecord<>(3));
testHarness.processElement1(new StreamRecord<>(5));
testHarness.processElement2(new StreamRecord<>(4L));
assertThat(fromNonBroadcastInput).containsExactly(1L, 3L, 5L);
assertThat(fromBroadcastInput).containsExactly(2L, 4L);
}
}
@Test
void testEndInput() throws Exception {
AtomicInteger nonBroadcastInputCounter = new AtomicInteger();
AtomicInteger broadcastInputCounter = new AtomicInteger();
TwoInputBroadcastProcessOperator<Integer, Long, Long> processOperator =
new TwoInputBroadcastProcessOperator<>(
new TwoInputBroadcastStreamProcessFunction<Integer, Long, Long>() {
@Override
public void processRecordFromNonBroadcastInput(
Integer record,
Collector<Long> output,
PartitionedContext<Long> ctx) {
// do nothing.
}
@Override
public void processRecordFromBroadcastInput(
Long record, NonPartitionedContext<Long> ctx) {
// do nothing.
}
@Override
public void endNonBroadcastInput(NonPartitionedContext<Long> ctx) {
try {
ctx.applyToAllPartitions(
(out, context) -> {
nonBroadcastInputCounter.incrementAndGet();
out.collect(1L);
});
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public void endBroadcastInput(NonPartitionedContext<Long> ctx) {
try {
ctx.applyToAllPartitions(
(out, context) -> {
broadcastInputCounter.incrementAndGet();
out.collect(2L);
});
} catch (Exception e) {
throw new RuntimeException(e);
}
}
});
try (TwoInputStreamOperatorTestHarness<Integer, Long, Long> testHarness =
new TwoInputStreamOperatorTestHarness<>(processOperator)) {
testHarness.open();
testHarness.endInput1();
assertThat(nonBroadcastInputCounter).hasValue(1);
testHarness.endInput2();
assertThat(broadcastInputCounter).hasValue(1);
Collection<StreamRecord<Long>> recordOutput = testHarness.getRecordOutput();
assertThat(recordOutput)
.containsExactly(new StreamRecord<>(1L), new StreamRecord<>(2L));
}
}
}
|
TwoInputBroadcastProcessOperatorTest
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/action/support/WriteRequest.java
|
{
"start": 2380,
"end": 4404
}
|
enum ____ implements Writeable {
/**
* Don't refresh after this request. The default.
*/
NONE("false"),
/**
* Force a refresh as part of this request. This refresh policy does not scale for high indexing or search throughput but is useful
* to present a consistent view to for indices with very low traffic. And it is wonderful for tests!
*/
IMMEDIATE("true"),
/**
* Leave this request open until a refresh has made the contents of this request visible to search. This refresh policy is
* compatible with high indexing and search throughput but it causes the request to wait to reply until a refresh occurs.
*/
WAIT_UNTIL("wait_for");
private final String value;
RefreshPolicy(String value) {
this.value = value;
}
public String getValue() {
return value;
}
/**
* Parse the string representation of a refresh policy, usually from a request parameter.
*/
public static RefreshPolicy parse(String value) {
for (RefreshPolicy policy : values()) {
if (policy.getValue().equals(value)) {
return policy;
}
}
if ("".equals(value)) {
// Empty string is IMMEDIATE because that makes "POST /test/test/1?refresh" perform
// a refresh which reads well and is what folks are used to.
return IMMEDIATE;
}
throw new IllegalArgumentException("Unknown value for refresh: [" + value + "].");
}
private static final RefreshPolicy[] values = values();
public static RefreshPolicy readFrom(StreamInput in) throws IOException {
return values[in.readByte()];
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeByte((byte) ordinal());
}
}
}
|
RefreshPolicy
|
java
|
google__error-prone
|
core/src/main/java/com/google/errorprone/bugpatterns/collectionincompatibletype/ContainmentMatchers.java
|
{
"start": 5110,
"end": 6583
}
|
class ____ defines the method
0, // index of the owning class's type argument to extract
0, // index of the method argument whose type argument to extract
"java.util.Collection", // type of the method argument
0, // index of the method argument's type argument to extract
"retainAll", // method name
"java.util.Collection")); // method parameter
private static final ImmutableList<BinopMatcher> STATIC_MATCHERS =
ImmutableList.of(
new BinopMatcher("java.util.Collection", "java.util.Collections", "disjoint"),
new BinopMatcher("java.util.Set", "com.google.common.collect.Sets", "difference"));
private static final ImmutableList<AbstractCollectionIncompatibleTypeMatcher> ALL_MATCHERS =
ImmutableList.<AbstractCollectionIncompatibleTypeMatcher>builder()
.addAll(DIRECT_MATCHERS)
.addAll(TYPE_ARG_MATCHERS)
.addAll(STATIC_MATCHERS)
.build();
public static @Nullable MatchResult firstNonNullMatchResult(
ExpressionTree tree, VisitorState state) {
if (!FIRST_ORDER_MATCHER.matches(tree, state)) {
return null;
}
for (AbstractCollectionIncompatibleTypeMatcher matcher : ContainmentMatchers.ALL_MATCHERS) {
MatchResult result = matcher.matches(tree, state);
if (result != null) {
return result;
}
}
return null;
}
private ContainmentMatchers() {}
}
|
that
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/deser/NoStaticsDeserTest.java
|
{
"start": 360,
"end": 997
}
|
class ____
{
int _x;
public static void setX(int value) { throw new Error("Should NOT call static method"); }
@JsonProperty("x") public void assignX(int x) { _x = x; }
}
/*
/**********************************************************
/* Test methods
/**********************************************************
*/
@Test
public void testSimpleIgnore() throws Exception
{
ObjectMapper m = new ObjectMapper();
// should not care about static setter...
Bean result = m.readValue("{ \"x\":3}", Bean.class);
assertEquals(3, result._x);
}
}
|
Bean
|
java
|
alibaba__nacos
|
console/src/main/java/com/alibaba/nacos/console/handler/impl/inner/HealthInnerHandler.java
|
{
"start": 1100,
"end": 1459
}
|
class ____ implements HealthHandler {
@Override
public Result<String> checkReadiness() {
ReadinessResult result = ModuleHealthCheckerHolder.getInstance().checkReadiness();
if (result.isSuccess()) {
return Result.success("ok");
}
return Result.failure(result.getResultMessage());
}
}
|
HealthInnerHandler
|
java
|
apache__camel
|
components/camel-aws/camel-aws2-kms/src/test/java/org/apache/camel/component/aws2/kms/localstack/KmsListKeysIT.java
|
{
"start": 1573,
"end": 3040
}
|
class ____ extends Aws2KmsBase {
@EndpointInject
private ProducerTemplate template;
@EndpointInject("mock:result")
private MockEndpoint result;
@Test
public void sendIn() throws Exception {
result.expectedMessageCount(1);
template.send("direct:createKey", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(KMS2Constants.OPERATION, "createKey");
}
});
template.send("direct:listKeys", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(KMS2Constants.OPERATION, "listKeys");
}
});
MockEndpoint.assertIsSatisfied(context);
assertEquals(1, result.getExchanges().size());
assertTrue(result.getExchanges().get(0).getIn().getBody(ListKeysResponse.class).hasKeys());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
String awsEndpoint
= "aws2-kms://default?operation=createKey";
String listKeys = "aws2-kms://default?operation=listKeys";
from("direct:createKey").to(awsEndpoint);
from("direct:listKeys").to(listKeys).to("mock:result");
}
};
}
}
|
KmsListKeysIT
|
java
|
apache__avro
|
lang/java/mapred/src/main/java/org/apache/avro/mapred/tether/TetherJob.java
|
{
"start": 1456,
"end": 5575
}
|
class ____ extends Configured {
public static final String TETHER_EXEC = "avro.tether.executable";
public static final String TETHER_EXEC_ARGS = "avro.tether.executable_args";
public static final String TETHER_EXEC_CACHED = "avro.tether.executable_cached";
public static final String TETHER_PROTOCOL = "avro.tether.protocol";
/** Get the URI of the application's executable. */
public static URI getExecutable(JobConf job) {
try {
return new URI(job.get("avro.tether.executable"));
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
}
/** Set the URI for the application's executable. Normally this in HDFS. */
public static void setExecutable(JobConf job, File executable) {
setExecutable(job, executable, Collections.emptyList(), false);
}
/**
* Set the URI for the application's executable (i.e the program to run in a
* subprocess and provides the mapper/reducer).
*
* @param job - Job
* @param executable - The URI of the executable
* @param args - List of additional arguments; Null if no arguments
* @param cached - If true, the executable URI is cached using
* DistributedCache - if false its not cached. I.e if the file
* is already stored on each local file system or if its on a
* NFS share
*/
public static void setExecutable(JobConf job, File executable, List<String> args, boolean cached) {
job.set(TETHER_EXEC, executable.toString());
if (args != null) {
StringBuilder sb = new StringBuilder();
for (String a : args) {
sb.append(a);
sb.append('\n');
}
job.set(TETHER_EXEC_ARGS, sb.toString());
}
job.set(TETHER_EXEC_CACHED, (Boolean.valueOf(cached)).toString());
}
/**
* Extract from the job configuration file an instance of the TRANSPROTO
* enumeration to represent the protocol to use for the communication
*
* @param job
* @return - Get the currently used protocol
*/
public static TetheredProcess.Protocol getProtocol(JobConf job) {
if (job.get(TetherJob.TETHER_PROTOCOL) == null) {
return TetheredProcess.Protocol.NONE;
} else if (job.get(TetherJob.TETHER_PROTOCOL).equals("http")) {
return TetheredProcess.Protocol.HTTP;
} else if (job.get(TetherJob.TETHER_PROTOCOL).equals("sasl")) {
return TetheredProcess.Protocol.SASL;
} else {
throw new RuntimeException("Unknown value for protocol: " + job.get(TetherJob.TETHER_PROTOCOL));
}
}
/**
* Submit a job to the map/reduce cluster. All of the necessary modifications to
* the job to run under tether are made to the configuration.
*/
public static RunningJob runJob(JobConf job) throws IOException {
setupTetherJob(job);
return JobClient.runJob(job);
}
/** Submit a job to the Map-Reduce framework. */
public static RunningJob submitJob(JobConf conf) throws IOException {
setupTetherJob(conf);
return new JobClient(conf).submitJob(conf);
}
/**
* Determines which transport protocol (e.g http or sasl) used to communicate
* between the parent and subprocess
*
* @param job - job configuration
* @param proto - String identifying the protocol currently http or sasl
*/
public static void setProtocol(JobConf job, String proto) throws IOException {
proto = proto.trim().toLowerCase();
if (!(proto.equals("http") || proto.equals("sasl"))) {
throw new IOException("protocol must be 'http' or 'sasl'");
}
job.set(TETHER_PROTOCOL, proto);
}
private static void setupTetherJob(JobConf job) throws IOException {
job.setMapRunnerClass(TetherMapRunner.class);
job.setPartitionerClass(TetherPartitioner.class);
job.setReducerClass(TetherReducer.class);
job.setInputFormat(TetherInputFormat.class);
job.setOutputFormat(TetherOutputFormat.class);
job.setOutputKeyClass(TetherData.class);
job.setOutputKeyComparatorClass(TetherKeyComparator.class);
job.setMapOutputValueClass(NullWritable.class);
// set the map output key
|
TetherJob
|
java
|
elastic__elasticsearch
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/ToXContent.java
|
{
"start": 608,
"end": 911
}
|
interface ____ to transfer an object to "XContent" using an {@link XContentBuilder}.
* The output may or may not be a value object. Objects implementing {@link ToXContentObject} output a valid value
* but those that don't may or may not require emitting a startObject and an endObject.
*/
public
|
allowing
|
java
|
apache__flink
|
flink-tests/src/test/java/org/apache/flink/test/example/java/WordCountSubclassInterfacePOJOITCase.java
|
{
"start": 1902,
"end": 4475
}
|
class ____ extends JavaProgramTestBaseJUnit4
implements Serializable {
private static final long serialVersionUID = 1L;
protected String textPath;
protected String resultPath;
@Override
protected void preSubmit() throws Exception {
textPath = createTempFile("text.txt", WordCountData.TEXT);
resultPath = getTempDirPath("result");
}
@Override
protected void postSubmit() throws Exception {
compareResultsByLinesInMemory(WordCountData.COUNTS, resultPath);
}
@Override
protected void testProgram() throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStreamSource<String> text = env.createInput(new TextInputFormat(new Path(textPath)));
DataStream<WCBase> counts =
text.flatMap(new Tokenizer())
.keyBy(x -> x.word)
.window(GlobalWindows.createWithEndOfStreamTrigger())
.reduce(
new ReduceFunction<WCBase>() {
private static final long serialVersionUID = 1L;
public WCBase reduce(WCBase value1, WCBase value2) {
WC wc1 = (WC) value1;
WC wc2 = (WC) value2;
int c =
wc1.secretCount.getCount()
+ wc2.secretCount.getCount();
wc1.secretCount.setCount(c);
return wc1;
}
})
.map(
new MapFunction<WCBase, WCBase>() {
@Override
public WCBase map(WCBase value) throws Exception {
WC wc = (WC) value;
wc.count = wc.secretCount.getCount();
return wc;
}
});
counts.sinkTo(
FileSink.forRowFormat(new Path(resultPath), new SimpleStringEncoder<WCBase>())
.build());
env.execute("WordCount with custom data types example");
}
private static final
|
WordCountSubclassInterfacePOJOITCase
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/support/json/JSONParserTest4.java
|
{
"start": 135,
"end": 330
}
|
class ____ extends TestCase {
public void test_parse() throws Exception {
String text = "{\"\\u0006\":\"123\"}";
System.out.println(JSONUtils.parse(text));
}
}
|
JSONParserTest4
|
java
|
hibernate__hibernate-orm
|
hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/data/RecordFieldEntityTest.java
|
{
"start": 1600,
"end": 1978
}
|
class ____ {
@Id
private Integer id;
@Embedded
private TestRecord testRecord;
public Integer getId() {
return id;
}
static WithRecord of(int id, String foo, String bar) {
WithRecord withRecord = new WithRecord();
withRecord.id = id;
withRecord.testRecord = new TestRecord( foo, bar );
return withRecord;
}
}
@Entity
@Audited
static
|
WithRecord
|
java
|
apache__dubbo
|
dubbo-registry/dubbo-registry-api/src/main/java/org/apache/dubbo/registry/retry/AbstractRetryTask.java
|
{
"start": 1711,
"end": 5168
}
|
class ____ implements TimerTask {
protected final ErrorTypeAwareLogger logger = LoggerFactory.getErrorTypeAwareLogger(getClass());
/**
* url for retry task
*/
protected final URL url;
/**
* registry for this task
*/
protected final FailbackRegistry registry;
/**
* retry period
*/
private final long retryPeriod;
/**
* define the most retry times
*/
private final int retryTimes;
/**
* task name for this task
*/
private final String taskName;
/**
* times of retry.
* retry task is execute in single thread so that the times is not need volatile.
*/
private int times = 1;
private volatile boolean cancel;
AbstractRetryTask(URL url, FailbackRegistry registry, String taskName) {
if (url == null || StringUtils.isBlank(taskName)) {
throw new IllegalArgumentException();
}
this.url = url;
this.registry = registry;
this.taskName = taskName;
this.cancel = false;
this.retryPeriod = url.getParameter(REGISTRY_RETRY_PERIOD_KEY, DEFAULT_REGISTRY_RETRY_PERIOD);
this.retryTimes = url.getParameter(REGISTRY_RETRY_TIMES_KEY, DEFAULT_REGISTRY_RETRY_TIMES);
}
public void cancel() {
cancel = true;
}
public boolean isCancel() {
return cancel;
}
protected void reput(Timeout timeout, long tick) {
if (timeout == null) {
throw new IllegalArgumentException();
}
Timer timer = timeout.timer();
if (timer.isStop() || timeout.isCancelled() || isCancel()) {
return;
}
times++;
timer.newTimeout(timeout.task(), tick, TimeUnit.MILLISECONDS);
}
@Override
public void run(Timeout timeout) throws Exception {
if (timeout.isCancelled() || timeout.timer().isStop() || isCancel()) {
// other thread cancel this timeout or stop the timer.
return;
}
if (retryTimes > 0 && times > retryTimes) {
// 1-13 - failed to execute the retrying task.
logger.warn(
REGISTRY_EXECUTE_RETRYING_TASK,
"registry center offline",
"Check the registry server.",
"Final failed to execute task " + taskName + ", url: " + url + ", retry " + retryTimes + " times.");
return;
}
if (logger.isInfoEnabled()) {
logger.info(taskName + " : " + url);
}
try {
if (!registry.isAvailable()) {
throw new IllegalStateException("Registry is not available.");
}
doRetry(url, registry, timeout);
} catch (Throwable t) { // Ignore all the exceptions and wait for the next retry
// 1-13 - failed to execute the retrying task.
logger.warn(
REGISTRY_EXECUTE_RETRYING_TASK,
"registry center offline",
"Check the registry server.",
"Failed to execute task " + taskName + ", url: " + url + ", waiting for again, cause:"
+ t.getMessage(),
t);
// reput this task when catch exception.
reput(timeout, retryPeriod);
}
}
protected abstract void doRetry(URL url, FailbackRegistry registry, Timeout timeout);
}
|
AbstractRetryTask
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/FileEndpointBuilderFactory.java
|
{
"start": 156064,
"end": 165257
}
|
interface ____
extends
AdvancedFileEndpointConsumerBuilder,
AdvancedFileEndpointProducerBuilder {
default FileEndpointBuilder basic() {
return (FileEndpointBuilder) this;
}
/**
* Automatically create missing directories in the file's pathname. For
* the file consumer, that means creating the starting directory. For
* the file producer, it means the directory the files should be written
* to.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autoCreate the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointBuilder autoCreate(boolean autoCreate) {
doSetProperty("autoCreate", autoCreate);
return this;
}
/**
* Automatically create missing directories in the file's pathname. For
* the file consumer, that means creating the starting directory. For
* the file producer, it means the directory the files should be written
* to.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autoCreate the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointBuilder autoCreate(String autoCreate) {
doSetProperty("autoCreate", autoCreate);
return this;
}
/**
* When auto-creating directories should each subdirectory be created
* one at a time. This may be needed due to security issues on some
* file-shares.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param autoCreateStepwise the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointBuilder autoCreateStepwise(boolean autoCreateStepwise) {
doSetProperty("autoCreateStepwise", autoCreateStepwise);
return this;
}
/**
* When auto-creating directories should each subdirectory be created
* one at a time. This may be needed due to security issues on some
* file-shares.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param autoCreateStepwise the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointBuilder autoCreateStepwise(String autoCreateStepwise) {
doSetProperty("autoCreateStepwise", autoCreateStepwise);
return this;
}
/**
* Maximum number of messages to keep in memory available for browsing.
* Use 0 for unlimited.
*
* The option is a: <code>int</code> type.
*
* Default: 100
* Group: advanced
*
* @param browseLimit the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointBuilder browseLimit(int browseLimit) {
doSetProperty("browseLimit", browseLimit);
return this;
}
/**
* Maximum number of messages to keep in memory available for browsing.
* Use 0 for unlimited.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 100
* Group: advanced
*
* @param browseLimit the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointBuilder browseLimit(String browseLimit) {
doSetProperty("browseLimit", browseLimit);
return this;
}
/**
* Buffer size in bytes used for writing files (or in case of FTP for
* downloading and uploading files).
*
* The option is a: <code>int</code> type.
*
* Default: 131072
* Group: advanced
*
* @param bufferSize the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointBuilder bufferSize(int bufferSize) {
doSetProperty("bufferSize", bufferSize);
return this;
}
/**
* Buffer size in bytes used for writing files (or in case of FTP for
* downloading and uploading files).
*
* The option will be converted to a <code>int</code> type.
*
* Default: 131072
* Group: advanced
*
* @param bufferSize the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointBuilder bufferSize(String bufferSize) {
doSetProperty("bufferSize", bufferSize);
return this;
}
/**
* Whether to fall back and do a copy and delete file, in case the file
* could not be renamed directly. This option is not available for the
* FTP component.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param copyAndDeleteOnRenameFail the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointBuilder copyAndDeleteOnRenameFail(boolean copyAndDeleteOnRenameFail) {
doSetProperty("copyAndDeleteOnRenameFail", copyAndDeleteOnRenameFail);
return this;
}
/**
* Whether to fall back and do a copy and delete file, in case the file
* could not be renamed directly. This option is not available for the
* FTP component.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param copyAndDeleteOnRenameFail the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointBuilder copyAndDeleteOnRenameFail(String copyAndDeleteOnRenameFail) {
doSetProperty("copyAndDeleteOnRenameFail", copyAndDeleteOnRenameFail);
return this;
}
/**
* Perform rename operations using a copy and delete strategy. This is
* primarily used in environments where the regular rename operation is
* unreliable (e.g., across different file systems or networks). This
* option takes precedence over the copyAndDeleteOnRenameFail parameter
* that will automatically fall back to the copy and delete strategy,
* but only after additional delays.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param renameUsingCopy the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointBuilder renameUsingCopy(boolean renameUsingCopy) {
doSetProperty("renameUsingCopy", renameUsingCopy);
return this;
}
/**
* Perform rename operations using a copy and delete strategy. This is
* primarily used in environments where the regular rename operation is
* unreliable (e.g., across different file systems or networks). This
* option takes precedence over the copyAndDeleteOnRenameFail parameter
* that will automatically fall back to the copy and delete strategy,
* but only after additional delays.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param renameUsingCopy the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointBuilder renameUsingCopy(String renameUsingCopy) {
doSetProperty("renameUsingCopy", renameUsingCopy);
return this;
}
/**
* Sets whether synchronous processing should be strictly used.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param synchronous the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointBuilder synchronous(boolean synchronous) {
doSetProperty("synchronous", synchronous);
return this;
}
/**
* Sets whether synchronous processing should be strictly used.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param synchronous the value to set
* @return the dsl builder
*/
default AdvancedFileEndpointBuilder synchronous(String synchronous) {
doSetProperty("synchronous", synchronous);
return this;
}
}
public
|
AdvancedFileEndpointBuilder
|
java
|
apache__camel
|
core/camel-management/src/test/java/org/apache/camel/management/ManagedFileIdempotentConsumerTest.java
|
{
"start": 1635,
"end": 5075
}
|
class ____ extends ManagementTestSupport {
private IdempotentRepository repo;
@Test
public void testDuplicateMessagesAreFilteredOut() throws Exception {
Endpoint startEndpoint = resolveMandatoryEndpoint("direct:start");
MockEndpoint resultEndpoint = getMockEndpoint("mock:result");
MBeanServer mbeanServer = getMBeanServer();
// services
Set<ObjectName> names = mbeanServer.queryNames(new ObjectName("org.apache.camel" + ":type=services,*"), null);
ObjectName on = null;
for (ObjectName name : names) {
if (name.toString().contains("FileIdempotentRepository")) {
on = name;
break;
}
}
assertTrue(mbeanServer.isRegistered(on), "Should be registered");
String path = (String) mbeanServer.getAttribute(on, "FilePath");
assertEquals(FileUtil.normalizePath(testFile("idempotentfilestore.dat").toString()), FileUtil.normalizePath(path));
Integer size = (Integer) mbeanServer.getAttribute(on, "CacheSize");
assertEquals(1, size.intValue());
assertFalse(repo.contains("1"));
assertFalse(repo.contains("2"));
assertFalse(repo.contains("3"));
assertTrue(repo.contains("4"));
resultEndpoint.expectedBodiesReceived("one", "two", "three");
sendMessage(startEndpoint, "1", "one");
sendMessage(startEndpoint, "2", "two");
sendMessage(startEndpoint, "1", "one");
sendMessage(startEndpoint, "2", "two");
sendMessage(startEndpoint, "4", "four");
sendMessage(startEndpoint, "1", "one");
sendMessage(startEndpoint, "3", "three");
resultEndpoint.assertIsSatisfied();
assertTrue(repo.contains("1"));
assertTrue(repo.contains("2"));
assertTrue(repo.contains("3"));
assertTrue(repo.contains("4"));
size = (Integer) mbeanServer.getAttribute(on, "CacheSize");
assertEquals(4, size.intValue());
// remove one from repo
mbeanServer.invoke(on, "remove", new Object[] { "1" }, new String[] { "java.lang.String" });
// reset
mbeanServer.invoke(on, "reset", null, null);
// there should be 3 now
size = (Integer) mbeanServer.getAttribute(on, "CacheSize");
assertEquals(3, size.intValue());
assertFalse(repo.contains("1"));
assertTrue(repo.contains("2"));
assertTrue(repo.contains("3"));
assertTrue(repo.contains("4"));
}
protected void sendMessage(final Endpoint startEndpoint, final Object messageId, final Object body) {
template.send(startEndpoint, exchange -> {
// now lets fire in a message
Message in = exchange.getIn();
in.setBody(body);
in.setHeader("messageId", messageId);
});
}
@Override
protected RouteBuilder createRouteBuilder() {
File store = testFile("idempotentfilestore.dat").toFile();
return new RouteBuilder() {
public void configure() {
repo = FileIdempotentRepository.fileIdempotentRepository(store);
// let's add 4 to start with
repo.add("4");
from("direct:start")
.idempotentConsumer(header("messageId"), repo)
.to("mock:result");
}
};
}
}
|
ManagedFileIdempotentConsumerTest
|
java
|
playframework__playframework
|
core/play/src/main/java/play/mvc/BodyParser.java
|
{
"start": 32078,
"end": 32679
}
|
class ____
extends AbstractFunction1<
Multipart.FileInfo,
play.api.libs.streams.Accumulator<
ByteString, play.api.mvc.MultipartFormData.FilePart<A>>> {
@Override
public play.api.libs.streams.Accumulator<
ByteString, play.api.mvc.MultipartFormData.FilePart<A>>
apply(Multipart.FileInfo fileInfo) {
return createFilePartHandler()
.apply(fileInfo)
.asScala()
.map(new JavaFilePartToScalaFilePart(), materializer.executionContext());
}
}
private
|
ScalaFilePartHandler
|
java
|
quarkusio__quarkus
|
core/deployment/src/main/java/io/quarkus/deployment/dev/IDEDevModeMain.java
|
{
"start": 1019,
"end": 6006
}
|
class ____ implements BiConsumer<CuratedApplication, Map<String, Object>>, Closeable {
private static final Logger log = Logger.getLogger(IDEDevModeMain.class.getName());
private static final String APP_PROJECT = "app-project";
private IsolatedDevModeMain delegate;
@Override
public void accept(CuratedApplication curatedApplication, Map<String, Object> stringObjectMap) {
Path appClasses = (Path) stringObjectMap.get("app-classes");
DevModeContext devModeContext = new DevModeContext();
devModeContext.setArgs((String[]) stringObjectMap.get("args"));
Properties buildSystemProperties = curatedApplication.getQuarkusBootstrap().getBuildSystemProperties();
for (String key : buildSystemProperties.stringPropertyNames()) {
devModeContext.getBuildSystemProperties().put(key, buildSystemProperties.getProperty(key));
}
ApplicationModel appModel = null;
try {
if (BuildToolHelper.isMavenProject(appClasses)) {
appModel = curatedApplication.getApplicationModel();
} else {
appModel = BootstrapUtils
.deserializeQuarkusModel((Path) stringObjectMap.get(BootstrapConstants.SERIALIZED_APP_MODEL));
}
if (appModel != null) {
for (ResolvedDependency project : DependenciesFilter.getReloadableModules(appModel)) {
final ModuleInfo module = toModule(project);
if (project.getKey().equals(appModel.getAppArtifact().getKey())
&& project.getVersion().equals(appModel.getAppArtifact().getVersion())) {
devModeContext.setApplicationRoot(module);
} else {
devModeContext.getAdditionalModules().add(module);
devModeContext.getLocalArtifacts().add(project.getKey());
}
}
}
} catch (AppModelResolverException e) {
log.error("Failed to load workspace, hot reload will not be available", e);
}
terminateIfRunning();
delegate = new IsolatedDevModeMain();
Map<String, Object> params = new HashMap<>();
params.put(DevModeContext.class.getName(), devModeContext);
params.put(DevModeType.class.getName(), DevModeType.LOCAL);
delegate.accept(curatedApplication,
params);
}
@Override
public void close() {
terminateIfRunning();
}
private void terminateIfRunning() {
if (delegate != null) {
delegate.close();
}
}
private DevModeContext.ModuleInfo toModule(ResolvedDependency module) throws BootstrapGradleException {
String classesDir = null;
String generatedSourcesDir = null;
final Set<Path> sourceParents = new LinkedHashSet<>();
final PathList.Builder srcPaths = PathList.builder();
final ArtifactSources sources = module.getSources();
for (SourceDir src : sources.getSourceDirs()) {
for (Path p : src.getSourceTree().getRoots()) {
sourceParents.add(p.getParent());
if (!srcPaths.contains(p)) {
srcPaths.add(p);
}
}
if (classesDir == null) {
classesDir = src.getOutputDir().toString();
}
if (generatedSourcesDir == null && src.getAptSourcesDir() != null) {
generatedSourcesDir = src.getAptSourcesDir().toString();
}
}
String resourceDirectory = null;
final PathList.Builder resourcesPaths = PathList.builder();
for (SourceDir src : sources.getResourceDirs()) {
for (Path p : src.getSourceTree().getRoots()) {
if (!resourcesPaths.contains(p)) {
resourcesPaths.add(p);
}
}
if (resourceDirectory == null) {
// Peek the first one as we assume that it is the primary
resourceDirectory = src.getOutputDir().toString();
}
}
return new DevModeContext.ModuleInfo.Builder()
.setArtifactKey(module.getKey())
.setProjectDirectory(module.getWorkspaceModule().getModuleDir().getPath())
.setSourcePaths(srcPaths.build())
.setClassesPath(classesDir)
.setGeneratedSourcesPath(generatedSourcesDir)
.setResourcePaths(resourcesPaths.build())
.setResourcesOutputPath(resourceDirectory)
.setSourceParents(PathList.from(sourceParents))
.setPreBuildOutputDir(module.getWorkspaceModule().getBuildDir().toPath().resolve("generated-sources")
.toAbsolutePath().toString())
.setTargetDir(module.getWorkspaceModule().getBuildDir().toString()).build();
}
}
|
IDEDevModeMain
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/IndexVersions.java
|
{
"start": 1073,
"end": 22232
}
|
class ____, referenced by the registerIndexVersion method.
* When all the index version constants have been registered, the map is cleared & never touched again.
*/
@SuppressWarnings("UnusedAssignment")
static TreeSet<Integer> IDS = new TreeSet<>();
private static IndexVersion def(int id, Version luceneVersion) {
if (IDS == null) throw new IllegalStateException("The IDS map needs to be present to call this method");
if (IDS.add(id) == false) {
throw new IllegalArgumentException("Version id " + id + " defined twice");
}
if (id < IDS.last()) {
throw new IllegalArgumentException("Version id " + id + " is not defined in the right location. Keep constants sorted");
}
return new IndexVersion(id, luceneVersion);
}
// TODO: this is just a hack to allow to keep the V7 IndexVersion constants, during compilation. Remove
private static Version parseUnchecked(String version) {
try {
return Version.parse(version);
} catch (ParseException e) {
throw new RuntimeException(e);
}
}
public static final IndexVersion ZERO = def(0, Version.LATEST);
public static final IndexVersion V_7_0_0 = def(7_00_00_99, parseUnchecked("8.0.0"));
public static final IndexVersion V_7_1_0 = def(7_01_00_99, parseUnchecked("8.0.0"));
public static final IndexVersion V_7_2_0 = def(7_02_00_99, parseUnchecked("8.0.0"));
public static final IndexVersion V_7_2_1 = def(7_02_01_99, parseUnchecked("8.0.0"));
public static final IndexVersion V_7_3_0 = def(7_03_00_99, parseUnchecked("8.1.0"));
public static final IndexVersion V_7_4_0 = def(7_04_00_99, parseUnchecked("8.2.0"));
public static final IndexVersion V_7_5_0 = def(7_05_00_99, parseUnchecked("8.3.0"));
public static final IndexVersion V_7_5_2 = def(7_05_02_99, parseUnchecked("8.3.0"));
public static final IndexVersion V_7_6_0 = def(7_06_00_99, parseUnchecked("8.4.0"));
public static final IndexVersion V_7_7_0 = def(7_07_00_99, parseUnchecked("8.5.1"));
public static final IndexVersion V_7_8_0 = def(7_08_00_99, parseUnchecked("8.5.1"));
public static final IndexVersion V_7_9_0 = def(7_09_00_99, parseUnchecked("8.6.0"));
public static final IndexVersion V_7_10_0 = def(7_10_00_99, parseUnchecked("8.7.0"));
public static final IndexVersion V_7_11_0 = def(7_11_00_99, parseUnchecked("8.7.0"));
public static final IndexVersion V_7_12_0 = def(7_12_00_99, parseUnchecked("8.8.0"));
public static final IndexVersion V_7_13_0 = def(7_13_00_99, parseUnchecked("8.8.2"));
public static final IndexVersion V_7_14_0 = def(7_14_00_99, parseUnchecked("8.9.0"));
public static final IndexVersion V_7_15_0 = def(7_15_00_99, parseUnchecked("8.9.0"));
public static final IndexVersion V_7_16_0 = def(7_16_00_99, parseUnchecked("8.10.1"));
public static final IndexVersion V_7_17_0 = def(7_17_00_99, parseUnchecked("8.11.1"));
public static final IndexVersion V_8_0_0 = def(8_00_00_99, Version.LUCENE_9_0_0);
public static final IndexVersion V_8_1_0 = def(8_01_00_99, Version.LUCENE_9_0_0);
public static final IndexVersion V_8_2_0 = def(8_02_00_99, Version.LUCENE_9_1_0);
public static final IndexVersion V_8_3_0 = def(8_03_00_99, Version.LUCENE_9_2_0);
public static final IndexVersion V_8_4_0 = def(8_04_00_99, Version.LUCENE_9_3_0);
public static final IndexVersion V_8_5_0 = def(8_05_00_99, Version.LUCENE_9_4_1);
public static final IndexVersion V_8_5_3 = def(8_05_03_99, Version.LUCENE_9_4_2);
public static final IndexVersion V_8_6_0 = def(8_06_00_99, Version.LUCENE_9_4_2);
public static final IndexVersion V_8_7_0 = def(8_07_00_99, Version.LUCENE_9_5_0);
public static final IndexVersion V_8_8_0 = def(8_08_00_99, Version.LUCENE_9_6_0);
public static final IndexVersion V_8_8_2 = def(8_08_02_99, Version.LUCENE_9_6_0);
public static final IndexVersion V_8_9_0 = def(8_09_00_99, Version.LUCENE_9_7_0);
public static final IndexVersion V_8_9_1 = def(8_09_01_99, Version.LUCENE_9_7_0);
public static final IndexVersion V_8_10_0 = def(8_10_00_99, Version.LUCENE_9_7_0);
/*
* READ THE COMMENT BELOW THIS BLOCK OF DECLARATIONS BEFORE ADDING NEW INDEX VERSIONS
* Detached index versions added below here.
*/
public static final IndexVersion FIRST_DETACHED_INDEX_VERSION = def(8_500_000, Version.LUCENE_9_7_0);
public static final IndexVersion NEW_SPARSE_VECTOR = def(8_500_001, Version.LUCENE_9_7_0);
public static final IndexVersion SPARSE_VECTOR_IN_FIELD_NAMES_SUPPORT = def(8_500_002, Version.LUCENE_9_7_0);
public static final IndexVersion UPGRADE_LUCENE_9_8 = def(8_500_003, Version.LUCENE_9_8_0);
public static final IndexVersion ES_VERSION_8_12 = def(8_500_004, Version.LUCENE_9_8_0);
public static final IndexVersion NORMALIZED_VECTOR_COSINE = def(8_500_005, Version.LUCENE_9_8_0);
public static final IndexVersion UPGRADE_LUCENE_9_9 = def(8_500_006, Version.LUCENE_9_9_0);
public static final IndexVersion NORI_DUPLICATES = def(8_500_007, Version.LUCENE_9_9_0);
public static final IndexVersion UPGRADE_LUCENE_9_9_1 = def(8_500_008, Version.LUCENE_9_9_1);
public static final IndexVersion ES_VERSION_8_12_1 = def(8_500_009, Version.LUCENE_9_9_1);
public static final IndexVersion UPGRADE_8_12_1_LUCENE_9_9_2 = def(8_500_010, Version.LUCENE_9_9_2);
public static final IndexVersion NEW_INDEXVERSION_FORMAT = def(8_501_0_00, Version.LUCENE_9_9_1);
public static final IndexVersion UPGRADE_LUCENE_9_9_2 = def(8_502_0_00, Version.LUCENE_9_9_2);
public static final IndexVersion TIME_SERIES_ID_HASHING = def(8_502_0_01, Version.LUCENE_9_9_2);
public static final IndexVersion UPGRADE_TO_LUCENE_9_10 = def(8_503_0_00, Version.LUCENE_9_10_0);
public static final IndexVersion TIME_SERIES_ROUTING_HASH_IN_ID = def(8_504_0_00, Version.LUCENE_9_10_0);
public static final IndexVersion DEFAULT_DENSE_VECTOR_TO_INT8_HNSW = def(8_505_0_00, Version.LUCENE_9_10_0);
public static final IndexVersion DOC_VALUES_FOR_IGNORED_META_FIELD = def(8_505_0_01, Version.LUCENE_9_10_0);
public static final IndexVersion SOURCE_MAPPER_LOSSY_PARAMS_CHECK = def(8_506_0_00, Version.LUCENE_9_10_0);
public static final IndexVersion SEMANTIC_TEXT_FIELD_TYPE = def(8_507_0_00, Version.LUCENE_9_10_0);
public static final IndexVersion UPGRADE_TO_LUCENE_9_11 = def(8_508_0_00, Version.LUCENE_9_11_0);
public static final IndexVersion UNIQUE_TOKEN_FILTER_POS_FIX = def(8_509_0_00, Version.LUCENE_9_11_0);
public static final IndexVersion ADD_SECURITY_MIGRATION = def(8_510_0_00, Version.LUCENE_9_11_0);
public static final IndexVersion UPGRADE_TO_LUCENE_9_11_1 = def(8_511_0_00, Version.LUCENE_9_11_1);
public static final IndexVersion INDEX_SORTING_ON_NESTED = def(8_512_0_00, Version.LUCENE_9_11_1);
public static final IndexVersion LENIENT_UPDATEABLE_SYNONYMS = def(8_513_0_00, Version.LUCENE_9_11_1);
public static final IndexVersion ENABLE_IGNORE_MALFORMED_LOGSDB = def(8_514_0_00, Version.LUCENE_9_11_1);
public static final IndexVersion MERGE_ON_RECOVERY_VERSION = def(8_515_0_00, Version.LUCENE_9_11_1);
public static final IndexVersion UPGRADE_TO_LUCENE_9_12 = def(8_516_0_00, Version.LUCENE_9_12_0);
public static final IndexVersion ENABLE_IGNORE_ABOVE_LOGSDB = def(8_517_0_00, Version.LUCENE_9_12_0);
public static final IndexVersion ADD_ROLE_MAPPING_CLEANUP_MIGRATION = def(8_518_0_00, Version.LUCENE_9_12_0);
public static final IndexVersion LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT_BACKPORT = def(8_519_0_00, Version.LUCENE_9_12_0);
public static final IndexVersion TIME_BASED_K_ORDERED_DOC_ID_BACKPORT = def(8_520_0_00, Version.LUCENE_9_12_0);
public static final IndexVersion V8_DEPRECATE_SOURCE_MODE_MAPPER = def(8_521_0_00, Version.LUCENE_9_12_0);
public static final IndexVersion USE_SYNTHETIC_SOURCE_FOR_RECOVERY_BACKPORT = def(8_522_0_00, Version.LUCENE_9_12_0);
public static final IndexVersion UPGRADE_TO_LUCENE_9_12_1 = def(8_523_0_00, parseUnchecked("9.12.1"));
public static final IndexVersion INFERENCE_METADATA_FIELDS_BACKPORT = def(8_524_0_00, parseUnchecked("9.12.1"));
public static final IndexVersion LOGSB_OPTIONAL_SORTING_ON_HOST_NAME_BACKPORT = def(8_525_0_00, parseUnchecked("9.12.1"));
public static final IndexVersion USE_SYNTHETIC_SOURCE_FOR_RECOVERY_BY_DEFAULT_BACKPORT = def(8_526_0_00, parseUnchecked("9.12.1"));
public static final IndexVersion SYNTHETIC_SOURCE_STORE_ARRAYS_NATIVELY_BACKPORT_8_X = def(8_527_0_00, Version.LUCENE_9_12_1);
public static final IndexVersion ADD_RESCORE_PARAMS_TO_QUANTIZED_VECTORS_BACKPORT_8_X = def(8_528_0_00, Version.LUCENE_9_12_1);
public static final IndexVersion RESCORE_PARAMS_ALLOW_ZERO_TO_QUANTIZED_VECTORS_BACKPORT_8_X = def(8_529_0_00, Version.LUCENE_9_12_1);
public static final IndexVersion DEFAULT_OVERSAMPLE_VALUE_FOR_BBQ_BACKPORT_8_X = def(8_530_0_00, Version.LUCENE_9_12_1);
public static final IndexVersion SEMANTIC_TEXT_DEFAULTS_TO_BBQ_BACKPORT_8_X = def(8_531_0_00, Version.LUCENE_9_12_1);
public static final IndexVersion INDEX_INT_SORT_INT_TYPE_8_19 = def(8_532_0_00, Version.LUCENE_9_12_1);
public static final IndexVersion MAPPER_TEXT_MATCH_ONLY_MULTI_FIELDS_DEFAULT_NOT_STORED_8_19 = def(8_533_0_00, Version.LUCENE_9_12_1);
public static final IndexVersion UPGRADE_TO_LUCENE_9_12_2 = def(8_534_0_00, Version.LUCENE_9_12_2);
public static final IndexVersion SPARSE_VECTOR_PRUNING_INDEX_OPTIONS_SUPPORT_BACKPORT_8_X = def(8_535_0_00, Version.LUCENE_9_12_2);
public static final IndexVersion MATCH_ONLY_TEXT_STORED_AS_BYTES_BACKPORT_8_X = def(8_536_0_00, Version.LUCENE_9_12_2);
public static final IndexVersion UPGRADE_TO_LUCENE_10_0_0 = def(9_000_0_00, Version.LUCENE_10_0_0);
public static final IndexVersion LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT = def(9_001_0_00, Version.LUCENE_10_0_0);
public static final IndexVersion TIME_BASED_K_ORDERED_DOC_ID = def(9_002_0_00, Version.LUCENE_10_0_0);
public static final IndexVersion DEPRECATE_SOURCE_MODE_MAPPER = def(9_003_0_00, Version.LUCENE_10_0_0);
public static final IndexVersion USE_SYNTHETIC_SOURCE_FOR_RECOVERY = def(9_004_0_00, Version.LUCENE_10_0_0);
public static final IndexVersion INFERENCE_METADATA_FIELDS = def(9_005_0_00, Version.LUCENE_10_0_0);
public static final IndexVersion LOGSB_OPTIONAL_SORTING_ON_HOST_NAME = def(9_006_0_00, Version.LUCENE_10_0_0);
public static final IndexVersion SOURCE_MAPPER_MODE_ATTRIBUTE_NOOP = def(9_007_0_00, Version.LUCENE_10_0_0);
public static final IndexVersion HOSTNAME_DOC_VALUES_SPARSE_INDEX = def(9_008_0_00, Version.LUCENE_10_0_0);
public static final IndexVersion UPGRADE_TO_LUCENE_10_1_0 = def(9_009_0_00, Version.LUCENE_10_1_0);
public static final IndexVersion USE_SYNTHETIC_SOURCE_FOR_RECOVERY_BY_DEFAULT = def(9_010_00_0, Version.LUCENE_10_1_0);
public static final IndexVersion TIMESTAMP_DOC_VALUES_SPARSE_INDEX = def(9_011_0_00, Version.LUCENE_10_1_0);
public static final IndexVersion TIME_SERIES_ID_DOC_VALUES_SPARSE_INDEX = def(9_012_0_00, Version.LUCENE_10_1_0);
public static final IndexVersion SYNTHETIC_SOURCE_STORE_ARRAYS_NATIVELY_KEYWORD = def(9_013_0_00, Version.LUCENE_10_1_0);
public static final IndexVersion SYNTHETIC_SOURCE_STORE_ARRAYS_NATIVELY_IP = def(9_014_0_00, Version.LUCENE_10_1_0);
public static final IndexVersion ADD_RESCORE_PARAMS_TO_QUANTIZED_VECTORS = def(9_015_0_00, Version.LUCENE_10_1_0);
public static final IndexVersion SYNTHETIC_SOURCE_STORE_ARRAYS_NATIVELY_NUMBER = def(9_016_0_00, Version.LUCENE_10_1_0);
public static final IndexVersion SYNTHETIC_SOURCE_STORE_ARRAYS_NATIVELY_BOOLEAN = def(9_017_0_00, Version.LUCENE_10_1_0);
public static final IndexVersion RESCORE_PARAMS_ALLOW_ZERO_TO_QUANTIZED_VECTORS = def(9_018_0_00, Version.LUCENE_10_1_0);
public static final IndexVersion SYNTHETIC_SOURCE_STORE_ARRAYS_NATIVELY_UNSIGNED_LONG = def(9_019_0_00, Version.LUCENE_10_1_0);
public static final IndexVersion SYNTHETIC_SOURCE_STORE_ARRAYS_NATIVELY_SCALED_FLOAT = def(9_020_0_00, Version.LUCENE_10_1_0);
public static final IndexVersion USE_LUCENE101_POSTINGS_FORMAT = def(9_021_0_00, Version.LUCENE_10_1_0);
public static final IndexVersion UPGRADE_TO_LUCENE_10_2_0 = def(9_022_00_0, Version.LUCENE_10_2_0);
public static final IndexVersion UPGRADE_TO_LUCENE_10_2_1 = def(9_023_00_0, Version.LUCENE_10_2_1);
public static final IndexVersion DEFAULT_OVERSAMPLE_VALUE_FOR_BBQ = def(9_024_0_00, Version.LUCENE_10_2_1);
public static final IndexVersion SEMANTIC_TEXT_DEFAULTS_TO_BBQ = def(9_025_0_00, Version.LUCENE_10_2_1);
public static final IndexVersion DEFAULT_TO_ACORN_HNSW_FILTER_HEURISTIC = def(9_026_0_00, Version.LUCENE_10_2_1);
public static final IndexVersion SEQ_NO_WITHOUT_POINTS = def(9_027_0_00, Version.LUCENE_10_2_1);
public static final IndexVersion INDEX_INT_SORT_INT_TYPE = def(9_028_0_00, Version.LUCENE_10_2_1);
public static final IndexVersion MAPPER_TEXT_MATCH_ONLY_MULTI_FIELDS_DEFAULT_NOT_STORED = def(9_029_0_00, Version.LUCENE_10_2_1);
public static final IndexVersion UPGRADE_TO_LUCENE_10_2_2 = def(9_030_0_00, Version.LUCENE_10_2_2);
public static final IndexVersion SPARSE_VECTOR_PRUNING_INDEX_OPTIONS_SUPPORT = def(9_031_0_00, Version.LUCENE_10_2_2);
public static final IndexVersion DEFAULT_DENSE_VECTOR_TO_BBQ_HNSW = def(9_032_0_00, Version.LUCENE_10_2_2);
public static final IndexVersion MATCH_ONLY_TEXT_STORED_AS_BYTES = def(9_033_0_00, Version.LUCENE_10_2_2);
public static final IndexVersion IGNORED_SOURCE_COALESCED_ENTRIES_WITH_FF = def(9_034_0_00, Version.LUCENE_10_2_2);
public static final IndexVersion EXCLUDE_SOURCE_VECTORS_DEFAULT = def(9_035_0_00, Version.LUCENE_10_2_2);
public static final IndexVersion DISABLE_NORMS_BY_DEFAULT_FOR_LOGSDB_AND_TSDB = def(9_036_0_00, Version.LUCENE_10_2_2);
public static final IndexVersion TSID_CREATED_DURING_ROUTING = def(9_037_0_00, Version.LUCENE_10_2_2);
public static final IndexVersion UPGRADE_TO_LUCENE_10_3_0 = def(9_038_0_00, Version.LUCENE_10_3_0);
public static final IndexVersion IGNORED_SOURCE_COALESCED_ENTRIES = def(9_039_0_00, Version.LUCENE_10_3_0);
public static final IndexVersion BACKPORT_UPGRADE_TO_LUCENE_10_3_1 = def(9_039_0_01, Version.LUCENE_10_3_1);
public static final IndexVersion BACKPORT_UPGRADE_TO_LUCENE_10_3_2 = def(9_039_0_02, Version.LUCENE_10_3_2);
public static final IndexVersion KEYWORD_MULTI_FIELDS_NOT_STORED_WHEN_IGNORED = def(9_040_0_00, Version.LUCENE_10_3_0);
public static final IndexVersion UPGRADE_TO_LUCENE_10_3_1 = def(9_041_0_00, Version.LUCENE_10_3_1);
public static final IndexVersion REENABLED_TIMESTAMP_DOC_VALUES_SPARSE_INDEX = def(9_042_0_00, Version.LUCENE_10_3_1);
public static final IndexVersion SKIPPERS_ENABLED_BY_DEFAULT = def(9_043_0_00, Version.LUCENE_10_3_1);
public static final IndexVersion TIME_SERIES_USE_SYNTHETIC_ID = def(9_044_0_00, Version.LUCENE_10_3_1);
public static final IndexVersion TIME_SERIES_DIMENSIONS_USE_SKIPPERS = def(9_045_0_00, Version.LUCENE_10_3_1);
public static final IndexVersion TIME_SERIES_ALL_FIELDS_USE_SKIPPERS = def(9_046_0_00, Version.LUCENE_10_3_1);
public static final IndexVersion UPGRADE_TO_LUCENE_10_3_2 = def(9_047_0_00, Version.LUCENE_10_3_2);
public static final IndexVersion SECURITY_MIGRATIONS_METADATA_FLATTENED_UPDATE = def(9_048_0_00, Version.LUCENE_10_3_2);
public static final IndexVersion STANDARD_INDEXES_USE_SKIPPERS = def(9_049_0_00, Version.LUCENE_10_3_2);
/*
* STOP! READ THIS FIRST! No, really,
* ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _
* / ___|_ _/ _ \| _ \| | | _ \| ____| / \ | _ \ |_ _| | | |_ _/ ___| | ___|_ _| _ \/ ___|_ _| |
* \___ \ | || | | | |_) | | | |_) | _| / _ \ | | | | | | | |_| || |\___ \ | |_ | || |_) \___ \ | | | |
* ___) || || |_| | __/|_| | _ <| |___ / ___ \| |_| | | | | _ || | ___) | | _| | || _ < ___) || | |_|
* |____/ |_| \___/|_| (_) |_| \_\_____/_/ \_\____/ |_| |_| |_|___|____/ |_| |___|_| \_\____/ |_| (_)
*
* A new index version should be added EVERY TIME a change is made to index metadata or data storage.
* Each index version should only be used in a single merged commit (apart from the BwC versions copied from o.e.Version, ≤V_8_11_0).
*
* ADDING AN INDEX VERSION
* To add a new index version, add a new constant at the bottom of the list, above this comment. Don't add other lines,
* comments, etc. The version id has the following layout:
*
* M_NNN_S_PP
*
* M - The major version of Elasticsearch
* NNN - The server version part
* S - The subsidiary version part. It should always be 0 here, it is only used in subsidiary repositories.
* PP - The patch version part
*
* To determine the id of the next IndexVersion constant, do the following:
* - Use the same major version, unless bumping majors
* - Bump the server version part by 1, unless creating a patch version
* - Leave the subsidiary part as 0
* - Bump the patch part if creating a patch version
*
* If a patch version is created, it should be placed sorted among the other existing constants.
*
* REVERTING AN INDEX VERSION
*
* If you revert a commit with an index version change, you MUST ensure there is a NEW index version representing the reverted
* change. DO NOT let the index version go backwards, it must ALWAYS be incremented.
*
* DETERMINING INDEX VERSIONS FROM GIT HISTORY
*
* If your git checkout has the expected minor-version-numbered branches and the expected release-version tags then you can find the
* index versions known by a particular release ...
*
* git show v8.12.0:server/src/main/java/org/elasticsearch/index/IndexVersions.java | grep '= def'
*
* ... or by a particular branch ...
*
* git show 8.12:server/src/main/java/org/elasticsearch/index/IndexVersions.java | grep '= def'
*
* ... and you can see which versions were added in between two versions too ...
*
* git diff v8.12.0..main -- server/src/main/java/org/elasticsearch/index/IndexVersions.java
*
* In branches 8.7-8.11 see server/src/main/java/org/elasticsearch/index/IndexVersion.java for the equivalent definitions.
*/
public static final IndexVersion MINIMUM_COMPATIBLE = V_8_0_0;
public static final IndexVersion MINIMUM_READONLY_COMPATIBLE = V_7_0_0;
static final NavigableMap<Integer, IndexVersion> VERSION_IDS = getAllVersionIds(IndexVersions.class);
static final IndexVersion LATEST_DEFINED;
static {
LATEST_DEFINED = VERSION_IDS.lastEntry().getValue();
// see comment on IDS field
// now we're registered the index versions, we can clear the map
IDS = null;
}
static NavigableMap<Integer, IndexVersion> getAllVersionIds(Class<?> cls) {
Map<Integer, String> versionIdFields = new HashMap<>();
NavigableMap<Integer, IndexVersion> builder = new TreeMap<>();
Set<String> ignore = Set.of("ZERO", "MINIMUM_COMPATIBLE", "MINIMUM_READONLY_COMPATIBLE");
for (Field declaredField : cls.getFields()) {
if (declaredField.getType().equals(IndexVersion.class)) {
String fieldName = declaredField.getName();
if (ignore.contains(fieldName)) {
continue;
}
IndexVersion version;
try {
version = (IndexVersion) declaredField.get(null);
} catch (IllegalAccessException e) {
throw new AssertionError(e);
}
builder.put(version.id(), version);
if (Assertions.ENABLED) {
// check the version number is unique
var sameVersionNumber = versionIdFields.put(version.id(), fieldName);
assert sameVersionNumber == null
: "Versions ["
+ sameVersionNumber
+ "] and ["
+ fieldName
+ "] have the same version number ["
+ version.id()
+ "]. Each IndexVersion should have a different version number";
}
}
}
return Collections.unmodifiableNavigableMap(builder);
}
static Collection<IndexVersion> getAllVersions() {
return VERSION_IDS.values();
}
static final IntFunction<String> VERSION_LOOKUP = ReleaseVersions.generateVersionsLookup(IndexVersions.class, LATEST_DEFINED.id());
// no instance
private IndexVersions() {}
}
|
construction
|
java
|
playframework__playframework
|
persistence/play-java-jpa/src/main/java/play/db/jpa/DefaultJPAApi.java
|
{
"start": 651,
"end": 1003
}
|
class ____ implements JPAApi {
private static final Logger logger = LoggerFactory.getLogger(DefaultJPAApi.class);
private final JPAConfig jpaConfig;
private final Map<String, EntityManagerFactory> emfs = new HashMap<>();
public DefaultJPAApi(JPAConfig jpaConfig) {
this.jpaConfig = jpaConfig;
}
@Singleton
public static
|
DefaultJPAApi
|
java
|
alibaba__nacos
|
common/src/test/java/com/alibaba/nacos/common/http/client/request/DefaultAsyncHttpClientRequestTest.java
|
{
"start": 1893,
"end": 6345
}
|
class ____ {
DefaultAsyncHttpClientRequest httpClientRequest;
@Mock
private CloseableHttpAsyncClient client;
@Mock
private DefaultConnectingIOReactor ioReactor;
@Mock
private Callback callback;
@Mock
private ResponseHandler responseHandler;
private RequestConfig defaultConfig;
private URI uri;
@BeforeEach
void setUp() throws Exception {
defaultConfig = RequestConfig.DEFAULT;
httpClientRequest = new DefaultAsyncHttpClientRequest(client, ioReactor, defaultConfig);
uri = URI.create("http://127.0.0.1:8080");
}
@AfterEach
void tearDown() throws Exception {
httpClientRequest.close();
}
@Test
void testExecuteOnFail() throws Exception {
Header header = Header.newInstance();
Map<String, String> body = new HashMap<>();
body.put("test", "test");
RequestHttpEntity httpEntity = new RequestHttpEntity(header, Query.EMPTY, body);
RuntimeException exception = new RuntimeException("test");
when(client.execute(any(), any())).thenAnswer(invocationOnMock -> {
((FutureCallback) invocationOnMock.getArgument(1)).failed(exception);
return null;
});
httpClientRequest.execute(uri, "PUT", httpEntity, responseHandler, callback);
verify(callback).onError(exception);
}
@Test
void testExecuteOnCancel() throws Exception {
Header header = Header.newInstance();
Map<String, String> body = new HashMap<>();
body.put("test", "test");
RequestHttpEntity httpEntity = new RequestHttpEntity(header, Query.EMPTY, body);
when(client.execute(any(), any())).thenAnswer(invocationOnMock -> {
((FutureCallback) invocationOnMock.getArgument(1)).cancelled();
return null;
});
httpClientRequest.execute(uri, "PUT", httpEntity, responseHandler, callback);
verify(callback).onCancel();
}
@Test
void testExecuteOnComplete() throws Exception {
Header header = Header.newInstance();
Map<String, String> body = new HashMap<>();
body.put("test", "test");
RequestHttpEntity httpEntity = new RequestHttpEntity(header, Query.EMPTY, body);
SimpleHttpResponse response = mock(SimpleHttpResponse.class);
HttpRestResult restResult = new HttpRestResult();
when(responseHandler.handle(any())).thenReturn(restResult);
when(client.execute(any(), any())).thenAnswer(invocationOnMock -> {
((FutureCallback) invocationOnMock.getArgument(1)).completed(response);
return null;
});
httpClientRequest.execute(uri, "PUT", httpEntity, responseHandler, callback);
verify(callback).onReceive(restResult);
}
@Test
void testExecuteOnCompleteWithException() throws Exception {
Header header = Header.newInstance();
Map<String, String> body = new HashMap<>();
body.put("test", "test");
RequestHttpEntity httpEntity = new RequestHttpEntity(header, Query.EMPTY, body);
SimpleHttpResponse response = mock(SimpleHttpResponse.class);
RuntimeException exception = new RuntimeException("test");
when(responseHandler.handle(any())).thenThrow(exception);
when(client.execute(any(), any())).thenAnswer(invocationOnMock -> {
((FutureCallback) invocationOnMock.getArgument(1)).completed(response);
return null;
});
httpClientRequest.execute(uri, "PUT", httpEntity, responseHandler, callback);
verify(callback).onError(exception);
}
@Test
void testExecuteException() throws Exception {
Header header = Header.newInstance();
Map<String, String> body = new HashMap<>();
body.put("test", "test");
RequestHttpEntity httpEntity = new RequestHttpEntity(header, Query.EMPTY, body);
IllegalStateException exception = new IllegalStateException("test");
when(client.execute(any(), any())).thenThrow(exception);
// when(ioReactor.getAuditLog()).thenReturn(Collections.singletonList(new ExceptionEvent(exception, new Date())));
try {
httpClientRequest.execute(uri, "PUT", httpEntity, responseHandler, callback);
} catch (Exception e) {
assertEquals(exception, e);
}
}
}
|
DefaultAsyncHttpClientRequestTest
|
java
|
apache__camel
|
components/camel-smooks/src/generated/java/org/apache/camel/component/smooks/SmooksEndpointUriFactory.java
|
{
"start": 516,
"end": 2219
}
|
class ____ extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory {
private static final String BASE = ":smooksConfig";
private static final Set<String> PROPERTY_NAMES;
private static final Set<String> SECRET_PROPERTY_NAMES;
private static final Map<String, String> MULTI_VALUE_PREFIXES;
static {
Set<String> props = new HashSet<>(4);
props.add("allowExecutionContextFromHeader");
props.add("lazyStartProducer");
props.add("reportPath");
props.add("smooksConfig");
PROPERTY_NAMES = Collections.unmodifiableSet(props);
SECRET_PROPERTY_NAMES = Collections.emptySet();
MULTI_VALUE_PREFIXES = Collections.emptyMap();
}
@Override
public boolean isEnabled(String scheme) {
return "smooks".equals(scheme);
}
@Override
public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException {
String syntax = scheme + BASE;
String uri = syntax;
Map<String, Object> copy = new HashMap<>(properties);
uri = buildPathParameter(syntax, uri, "smooksConfig", null, true, copy);
uri = buildQueryParameters(uri, copy, encode);
return uri;
}
@Override
public Set<String> propertyNames() {
return PROPERTY_NAMES;
}
@Override
public Set<String> secretPropertyNames() {
return SECRET_PROPERTY_NAMES;
}
@Override
public Map<String, String> multiValuePrefixes() {
return MULTI_VALUE_PREFIXES;
}
@Override
public boolean isLenientProperties() {
return false;
}
}
|
SmooksEndpointUriFactory
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/collections/UnidirectionalComparatorSortedSetTest.java
|
{
"start": 931,
"end": 1866
}
|
class ____ {
@Test
public void testLifecycle(EntityManagerFactoryScope scope) {
scope.inTransaction( entityManager -> {
Person person = new Person(1L);
person.getPhones().add(new Phone(1L, "landline", "028-234-9876"));
person.getPhones().add(new Phone(2L, "mobile", "072-122-9876"));
entityManager.persist(person);
});
scope.inTransaction( entityManager -> {
Person person = entityManager.find(Person.class, 1L);
Set<Phone> phones = person.getPhones();
assertEquals(2, phones.size());
phones.remove(phones.iterator().next());
assertEquals(1, phones.size());
});
scope.inTransaction( entityManager -> {
Person person = entityManager.find(Person.class, 1L);
Set<Phone> phones = person.getPhones();
assertEquals(1, phones.size());
});
}
//tag::collections-unidirectional-sorted-set-custom-comparator-example[]
@Entity(name = "Person")
public static
|
UnidirectionalComparatorSortedSetTest
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/mock/MockConnectionClosedException.java
|
{
"start": 687,
"end": 870
}
|
class ____ extends SQLException {
private static final long serialVersionUID = 1L;
public MockConnectionClosedException() {
super();
}
}
|
MockConnectionClosedException
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/adaptivebatch/AdaptiveExecutionHandlerFactory.java
|
{
"start": 2101,
"end": 3339
}
|
class ____ for the user code.
* @param serializationExecutor The executor used for serialization tasks.
* @return An instance of {@link AdaptiveExecutionHandler}.
* @throws IllegalArgumentException if the execution plan is neither a {@link JobGraph} nor a
* {@link StreamGraph}.
*/
public static AdaptiveExecutionHandler create(
ExecutionPlan executionPlan,
boolean enableBatchJobRecovery,
ClassLoader userClassLoader,
Executor serializationExecutor)
throws DynamicCodeLoadingException {
if (executionPlan instanceof JobGraph) {
return new NonAdaptiveExecutionHandler((JobGraph) executionPlan);
} else {
checkState(executionPlan instanceof StreamGraph, "Unsupported execution plan.");
if (enableBatchJobRecovery) {
StreamGraph streamGraph = (StreamGraph) executionPlan;
return new NonAdaptiveExecutionHandler(streamGraph.getJobGraph(userClassLoader));
} else {
return new DefaultAdaptiveExecutionHandler(
userClassLoader, (StreamGraph) executionPlan, serializationExecutor);
}
}
}
}
|
loader
|
java
|
apache__dubbo
|
dubbo-config/dubbo-config-api/src/test/java/org/apache/dubbo/config/AbstractConfigTest.java
|
{
"start": 39477,
"end": 39850
}
|
class ____ {
private Integer b1;
private Integer b2;
public Integer getB1() {
return b1;
}
public void setB1(Integer b1) {
this.b1 = b1;
}
public Integer getB2() {
return b2;
}
public void setB2(Integer b2) {
this.b2 = b2;
}
}
}
|
InnerConfig
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/dialect/aggregate/PostgreSQLAggregateSupport.java
|
{
"start": 1944,
"end": 12387
}
|
class ____ extends AggregateSupportImpl {
private static final AggregateSupport INSTANCE = new PostgreSQLAggregateSupport();
private static final String XML_EXTRACT_START = "xmlelement(name \"" + XmlHelper.ROOT_TAG + "\",(select xmlagg(t.v) from xmltable(";
private static final String XML_EXTRACT_SEPARATOR = "/*' passing ";
private static final String XML_EXTRACT_END = " columns v xml path '.')t))";
private static final String XML_QUERY_START = "(select xmlagg(t.v) from xmltable(";
private static final String XML_QUERY_SEPARATOR = "' passing ";
private static final String XML_QUERY_END = " columns v xml path '.')t)";
public static AggregateSupport valueOf(Dialect dialect) {
return PostgreSQLAggregateSupport.INSTANCE;
}
@Override
public String aggregateComponentCustomReadExpression(
String template,
String placeholder,
String aggregateParentReadExpression,
String columnExpression,
int aggregateColumnTypeCode,
SqlTypedMapping column,
TypeConfiguration typeConfiguration) {
switch ( aggregateColumnTypeCode ) {
case JSON_ARRAY:
case JSON:
switch ( column.getJdbcMapping().getJdbcType().getDefaultSqlTypeCode() ) {
case JSON:
case JSON_ARRAY:
return template.replace(
placeholder,
aggregateParentReadExpression + "->'" + columnExpression + "'"
);
case BINARY:
case VARBINARY:
case LONG32VARBINARY:
// We encode binary data as hex, so we have to decode here
return template.replace(
placeholder,
"decode(" + aggregateParentReadExpression + "->>'" + columnExpression + "','hex')"
);
case ARRAY:
final BasicPluralType<?, ?> pluralType = (BasicPluralType<?, ?>) column.getJdbcMapping();
switch ( pluralType.getElementType().getJdbcType().getDefaultSqlTypeCode() ) {
case BOOLEAN:
case TINYINT:
case SMALLINT:
case INTEGER:
case BIGINT:
case FLOAT:
case DOUBLE:
// For types that are natively supported in jsonb we can use jsonb_array_elements,
// but note that we can't use that for string types,
// because casting a jsonb[] to text[] will not omit the quotes of the jsonb text values
return template.replace(
placeholder,
"cast(array(select jsonb_array_elements(" + aggregateParentReadExpression + "->'" + columnExpression + "')) as " + column.getColumnDefinition() + ')'
);
case BINARY:
case VARBINARY:
case LONG32VARBINARY:
// We encode binary data as hex, so we have to decode here
return template.replace(
placeholder,
"array(select decode(jsonb_array_elements_text(" + aggregateParentReadExpression + "->'" + columnExpression + "'),'hex'))"
);
default:
return template.replace(
placeholder,
"cast(array(select jsonb_array_elements_text(" + aggregateParentReadExpression + "->'" + columnExpression + "')) as " + column.getColumnDefinition() + ')'
);
}
default:
return template.replace(
placeholder,
"cast(" + aggregateParentReadExpression + "->>'" + columnExpression + "' as " + column.getColumnDefinition() + ')'
);
}
case XML_ARRAY:
case SQLXML:
switch ( column.getJdbcMapping().getJdbcType().getDefaultSqlTypeCode() ) {
case SQLXML:
return template.replace(
placeholder,
XML_EXTRACT_START + xmlExtractArguments( aggregateParentReadExpression, columnExpression + "/*" ) + XML_EXTRACT_END
);
case XML_ARRAY:
if ( typeConfiguration.getCurrentBaseSqlTypeIndicators().isXmlFormatMapperLegacyFormatEnabled() ) {
throw new IllegalArgumentException( "XML array '" + columnExpression + "' in '" + aggregateParentReadExpression + "' is not supported with legacy format enabled." );
}
else {
return template.replace(
placeholder,
"xmlelement(name \"Collection\",(select xmlagg(t.v order by t.i) from xmltable(" + xmlExtractArguments( aggregateParentReadExpression, columnExpression + "/*" ) + " columns v xml path '.', i for ordinality)t))"
);
}
case BINARY:
case VARBINARY:
case LONG32VARBINARY:
// We encode binary data as hex, so we have to decode here
return template.replace(
placeholder,
"decode((select t.v from xmltable(" + xmlExtractArguments( aggregateParentReadExpression, columnExpression )+ " columns v text path '.') t),'hex')"
);
case ARRAY:
throw new UnsupportedOperationException( "Transforming XML_ARRAY to native arrays is not supported on PostgreSQL!" );
default:
return template.replace(
placeholder,
"(select t.v from xmltable(" + xmlExtractArguments( aggregateParentReadExpression, columnExpression ) + " columns v " + column.getColumnDefinition() + " path '.') t)"
);
}
case STRUCT:
case STRUCT_ARRAY:
case STRUCT_TABLE:
return template.replace( placeholder, '(' + aggregateParentReadExpression + ")." + columnExpression );
}
throw new IllegalArgumentException( "Unsupported aggregate SQL type: " + aggregateColumnTypeCode );
}
private static String xmlExtractArguments(String aggregateParentReadExpression, String xpathFragment) {
final String extractArguments;
int separatorIndex;
if ( aggregateParentReadExpression.startsWith( XML_EXTRACT_START )
&& aggregateParentReadExpression.endsWith( XML_EXTRACT_END )
&& (separatorIndex = aggregateParentReadExpression.indexOf( XML_EXTRACT_SEPARATOR )) != -1 ) {
final StringBuilder sb = new StringBuilder( aggregateParentReadExpression.length() - XML_EXTRACT_START.length() + xpathFragment.length() );
sb.append( aggregateParentReadExpression, XML_EXTRACT_START.length(), separatorIndex );
sb.append( '/' );
sb.append( xpathFragment );
sb.append( aggregateParentReadExpression, separatorIndex + 2, aggregateParentReadExpression.length() - XML_EXTRACT_END.length() );
extractArguments = sb.toString();
}
else if ( aggregateParentReadExpression.startsWith( XML_QUERY_START )
&& aggregateParentReadExpression.endsWith( XML_QUERY_END )
&& (separatorIndex = aggregateParentReadExpression.indexOf( XML_QUERY_SEPARATOR )) != -1 ) {
final StringBuilder sb = new StringBuilder( aggregateParentReadExpression.length() - XML_QUERY_START.length() + xpathFragment.length() );
sb.append( aggregateParentReadExpression, XML_QUERY_START.length(), separatorIndex );
sb.append( '/' );
sb.append( xpathFragment );
sb.append( aggregateParentReadExpression, separatorIndex, aggregateParentReadExpression.length() - XML_QUERY_END.length() );
extractArguments = sb.toString();
}
else {
extractArguments = "'/" + XmlHelper.ROOT_TAG + "/" + xpathFragment + "' passing " + aggregateParentReadExpression;
}
return extractArguments;
}
private static String jsonCustomWriteExpression(String customWriteExpression, JdbcMapping jdbcMapping) {
final int sqlTypeCode = jdbcMapping.getJdbcType().getDefaultSqlTypeCode();
switch ( sqlTypeCode ) {
case BINARY:
case VARBINARY:
case LONG32VARBINARY:
// We encode binary data as hex
return "to_jsonb(encode(" + customWriteExpression + ",'hex'))";
case ARRAY:
final BasicPluralType<?, ?> pluralType = (BasicPluralType<?, ?>) jdbcMapping;
switch ( pluralType.getElementType().getJdbcType().getDefaultSqlTypeCode() ) {
case BINARY:
case VARBINARY:
case LONG32VARBINARY:
// We encode binary data as hex
return "to_jsonb(array(select encode(unnest(" + customWriteExpression + "),'hex')))";
default:
return "to_jsonb(" + customWriteExpression + ")";
}
default:
return "to_jsonb(" + customWriteExpression + ")";
}
}
private static String xmlCustomWriteExpression(String customWriteExpression, JdbcMapping jdbcMapping) {
final int sqlTypeCode = jdbcMapping.getJdbcType().getDefaultSqlTypeCode();
switch ( sqlTypeCode ) {
case BINARY:
case VARBINARY:
case LONG32VARBINARY:
// We encode binary data as hex
return "encode(" + customWriteExpression + ",'hex')";
// case ARRAY:
// final BasicPluralType<?, ?> pluralType = (BasicPluralType<?, ?>) jdbcMapping;
// switch ( pluralType.getElementType().getJdbcType().getDefaultSqlTypeCode() ) {
// case BINARY:
// case VARBINARY:
// case LONG32VARBINARY:
// // We encode binary data as hex
// return "to_jsonb(array(select encode(unnest(" + customWriteExpression + "),'hex')))";
// default:
// return "to_jsonb(" + customWriteExpression + ")";
// }
default:
return customWriteExpression;
}
}
@Override
public String aggregateComponentAssignmentExpression(
String aggregateParentAssignmentExpression,
String columnExpression,
int aggregateColumnTypeCode,
Column column) {
switch ( aggregateColumnTypeCode ) {
case JSON:
case JSON_ARRAY:
case SQLXML:
case XML_ARRAY:
// For JSON/XML we always have to replace the whole object
return aggregateParentAssignmentExpression;
case STRUCT:
case STRUCT_ARRAY:
case STRUCT_TABLE:
return aggregateParentAssignmentExpression + "." + columnExpression;
}
throw new IllegalArgumentException( "Unsupported aggregate SQL type: " + aggregateColumnTypeCode );
}
@Override
public boolean requiresAggregateCustomWriteExpressionRenderer(int aggregateSqlTypeCode) {
switch ( aggregateSqlTypeCode ) {
case JSON:
case SQLXML:
return true;
}
return false;
}
@Override
public boolean preferSelectAggregateMapping(int aggregateSqlTypeCode) {
// The JDBC driver does not support selecting java.sql.Struct, so return false to select individual parts
return aggregateSqlTypeCode != STRUCT;
}
@Override
public WriteExpressionRenderer aggregateCustomWriteExpressionRenderer(
SelectableMapping aggregateColumn,
SelectableMapping[] columnsToUpdate,
TypeConfiguration typeConfiguration) {
final int aggregateSqlTypeCode = aggregateColumn.getJdbcMapping().getJdbcType().getDefaultSqlTypeCode();
switch ( aggregateSqlTypeCode ) {
case JSON:
return new RootJsonWriteExpression( aggregateColumn, columnsToUpdate );
case SQLXML:
return new RootXmlWriteExpression( aggregateColumn, columnsToUpdate );
}
throw new IllegalArgumentException( "Unsupported aggregate SQL type: " + aggregateSqlTypeCode );
}
|
PostgreSQLAggregateSupport
|
java
|
resilience4j__resilience4j
|
resilience4j-rxjava2/src/test/java/io/github/resilience4j/micrometer/transformer/SingleTimerTest.java
|
{
"start": 1236,
"end": 2479
}
|
class ____ {
@Test
public void shouldTimeSuccessfulSingle() {
String message = "Hello!";
MeterRegistry registry = new SimpleMeterRegistry();
Timer timer = Timer.of("timer 1", registry);
String result = Single.just(message)
.compose(TimerTransformer.of(timer))
.blockingGet();
then(result).isEqualTo(message);
thenSuccessTimed(registry, timer);
}
@Test
public void shouldTimeFailedSingle() {
IllegalStateException exception = new IllegalStateException();
MeterRegistry registry = new SimpleMeterRegistry();
TimerConfig config = TimerConfig.custom()
.onFailureTagResolver(ex -> {
then(ex).isEqualTo(exception);
return ex.toString();
})
.build();
Timer timer = Timer.of("timer 1", registry, config);
try {
Single.error(exception)
.compose(TimerTransformer.of(timer))
.blockingGet();
failBecauseExceptionWasNotThrown(exception.getClass());
} catch (Exception e) {
thenFailureTimed(registry, timer, e);
}
}
}
|
SingleTimerTest
|
java
|
apache__commons-lang
|
src/test/java/org/apache/commons/lang3/FunctionsTest.java
|
{
"start": 38613,
"end": 39052
}
|
interface ____ properly defined to throw any exception using String and IOExceptions as
* generic test types.
*/
@Test
void testThrows_FailableRunnable_String_IOException() {
new Functions.FailableRunnable<IOException>() {
@Override
public void run() throws IOException {
throw new IOException("test");
}
};
}
/**
* Tests that our failable
|
is
|
java
|
apache__camel
|
components/camel-ehcache/src/test/java/org/apache/camel/component/ehcache/EhcacheSpringConfigurationFactory.java
|
{
"start": 1177,
"end": 2168
}
|
class ____ extends AbstractFactoryBean<CacheConfiguration> {
private Class<?> keyType = Object.class;
private Class<?> valueType = Object.class;
public Class<?> getKeyType() {
return keyType;
}
public void setKeyType(Class<?> keyType) {
this.keyType = keyType;
}
public Class<?> getValueType() {
return valueType;
}
public void setValueType(Class<?> valueType) {
this.valueType = valueType;
}
@Override
public Class<?> getObjectType() {
return CacheConfiguration.class;
}
@Override
protected CacheConfiguration createInstance() {
return CacheConfigurationBuilder.newCacheConfigurationBuilder(
keyType,
valueType,
ResourcePoolsBuilder.newResourcePoolsBuilder()
.heap(100, EntryUnit.ENTRIES)
.offheap(1, MemoryUnit.MB))
.build();
}
}
|
EhcacheSpringConfigurationFactory
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/RestrictedApiCheckerTest.java
|
{
"start": 1007,
"end": 1528
}
|
class ____ {
private final CompilationTestHelper helper =
CompilationTestHelper.newInstance(RestrictedApiChecker.class, RestrictedApiCheckerTest.class)
.addSourceLines(
"Allowlist.java",
"""
package com.google.errorprone.bugpatterns.testdata;
import java.lang.annotation.ElementType;
import java.lang.annotation.Target;
@Target({ElementType.METHOD, ElementType.CONSTRUCTOR})
public @
|
RestrictedApiCheckerTest
|
java
|
apache__spark
|
examples/src/main/java/org/apache/spark/examples/ml/JavaWord2VecExample.java
|
{
"start": 1257,
"end": 2627
}
|
class ____ {
public static void main(String[] args) {
SparkSession spark = SparkSession
.builder()
.appName("JavaWord2VecExample")
.getOrCreate();
// $example on$
// Input data: Each row is a bag of words from a sentence or document.
List<Row> data = Arrays.asList(
RowFactory.create(Arrays.asList("Hi I heard about Spark".split(" "))),
RowFactory.create(Arrays.asList("I wish Java could use case classes".split(" "))),
RowFactory.create(Arrays.asList("Logistic regression models are neat".split(" ")))
);
StructType schema = new StructType(new StructField[]{
new StructField("text", new ArrayType(DataTypes.StringType, true), false, Metadata.empty())
});
Dataset<Row> documentDF = spark.createDataFrame(data, schema);
// Learn a mapping from words to Vectors.
Word2Vec word2Vec = new Word2Vec()
.setInputCol("text")
.setOutputCol("result")
.setVectorSize(3)
.setMinCount(0);
Word2VecModel model = word2Vec.fit(documentDF);
Dataset<Row> result = model.transform(documentDF);
for (Row row : result.collectAsList()) {
List<String> text = row.getList(0);
Vector vector = (Vector) row.get(1);
System.out.println("Text: " + text + " => \nVector: " + vector + "\n");
}
// $example off$
spark.stop();
}
}
|
JavaWord2VecExample
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/bean/override/easymock/EasyMockBean.java
|
{
"start": 1134,
"end": 1415
}
|
class ____ signal that a bean should be replaced with an {@link org.easymock.EasyMock
* EasyMock} mock.
*
* @author Sam Brannen
* @since 6.2
*/
@Target(ElementType.FIELD)
@Retention(RetentionPolicy.RUNTIME)
@Documented
@BeanOverride(EasyMockBeanOverrideProcessor.class)
public @
|
to
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/DocumentStoreUtils.java
|
{
"start": 3494,
"end": 19160
}
|
class ____ {
private DocumentStoreUtils(){}
/** milliseconds in one day. */
private static final long MILLIS_ONE_DAY = 86400000L;
private static final String TIMELINE_STORE_TYPE =
YarnConfiguration.TIMELINE_SERVICE_PREFIX + "document-store-type";
static final String TIMELINE_SERVICE_COSMOSDB_ENDPOINT =
"yarn.timeline-service.document-store.cosmos-db.endpoint";
static final String TIMELINE_SERVICE_COSMOSDB_MASTER_KEY =
"yarn.timeline-service.document-store.cosmos-db.masterkey";
static final String TIMELINE_SERVICE_DOCUMENTSTORE_DATABASE_NAME =
"yarn.timeline-service.document-store.db-name";
private static final String
DEFAULT_TIMELINE_SERVICE_DOCUMENTSTORE_DATABASE_NAME = "timeline_service";
/**
* Checks whether the cosmosdb conf are set properly in yarn-site.xml conf.
* @param conf
* related to yarn
* @throws YarnException if required config properties are missing
*/
public static void validateCosmosDBConf(Configuration conf)
throws YarnException {
if (conf == null) {
throw new NullPointerException("Configuration cannot be null");
}
if (isNullOrEmpty(conf.get(TIMELINE_SERVICE_COSMOSDB_ENDPOINT),
conf.get(TIMELINE_SERVICE_COSMOSDB_MASTER_KEY))) {
throw new YarnException("One or more CosmosDB configuration property is" +
" missing in yarn-site.xml");
}
}
/**
* Retrieves {@link DocumentStoreVendor} configured.
* @param conf
* related to yarn
* @return Returns the {@link DocumentStoreVendor} that is configured, else
* uses {@link DocumentStoreVendor#COSMOS_DB} as default
*/
public static DocumentStoreVendor getStoreVendor(Configuration conf) {
return DocumentStoreVendor.getStoreType(conf.get(TIMELINE_STORE_TYPE,
DocumentStoreVendor.COSMOS_DB.name()));
}
/**
* Retrieves a {@link TimelineEvent} from {@link TimelineEntity#events}.
* @param timelineEntity
* from which the set of events are examined.
* @param eventType
* that has to be checked.
* @return {@link TimelineEvent} if found else null
*/
public static TimelineEvent fetchEvent(TimelineEntity timelineEntity,
String eventType) {
for (TimelineEvent event : timelineEntity.getEvents()) {
if (event.getId().equals(eventType)) {
return event;
}
}
return null;
}
/**
* Checks if the string is null or empty.
* @param values
* array of string to be checked
* @return false if any of the string is null or empty else true
*/
public static boolean isNullOrEmpty(String...values) {
if (values == null || values.length == 0) {
return true;
}
for (String value : values) {
if (value == null || value.isEmpty()) {
return true;
}
}
return false;
}
/**
* Creates CosmosDB Async Document Client.
* @param conf
* to retrieve cosmos db endpoint and key
* @return async document client for CosmosDB
*/
public static AsyncDocumentClient createCosmosDBAsyncClient(
Configuration conf){
return new AsyncDocumentClient.Builder()
.withServiceEndpoint(DocumentStoreUtils.getCosmosDBEndpoint(conf))
.withMasterKeyOrResourceToken(
DocumentStoreUtils.getCosmosDBMasterKey(conf))
.withConnectionPolicy(ConnectionPolicy.GetDefault())
.withConsistencyLevel(ConsistencyLevel.Session)
.build();
}
/**
* Returns the timestamp of the day's start (which is midnight 00:00:00 AM)
* for a given input timestamp.
*
* @param timeStamp Timestamp.
* @return timestamp of that day's beginning (midnight)
*/
public static long getTopOfTheDayTimestamp(long timeStamp) {
return timeStamp - (timeStamp % MILLIS_ONE_DAY);
}
/**
* Creates a composite key for storing {@link TimelineEntityDocument}.
* @param collectorContext
* of the timeline writer
* @param type
* of the entity
* @return composite key delimited with !
*/
public static String constructTimelineEntityDocId(TimelineCollectorContext
collectorContext, String type) {
return String.format("%s!%s!%s!%d!%s!%s",
collectorContext.getClusterId(), collectorContext.getUserId(),
collectorContext.getFlowName(), collectorContext.getFlowRunId(),
collectorContext.getAppId(), type);
}
/**
* Creates a composite key for storing {@link TimelineEntityDocument}.
* @param collectorContext
* of the timeline writer
* @param type
* of the entity
* @param id
* of the entity
* @return composite key delimited with !
*/
public static String constructTimelineEntityDocId(TimelineCollectorContext
collectorContext, String type, String id) {
return String.format("%s!%s!%s!%d!%s!%s!%s",
collectorContext.getClusterId(), collectorContext.getUserId(),
collectorContext.getFlowName(), collectorContext.getFlowRunId(),
collectorContext.getAppId(), type, id);
}
/**
* Creates a composite key for storing {@link FlowRunDocument}.
* @param collectorContext
* of the timeline writer
* @return composite key delimited with !
*/
public static String constructFlowRunDocId(TimelineCollectorContext
collectorContext) {
return String.format("%s!%s!%s!%s", collectorContext.getClusterId(),
collectorContext.getUserId(), collectorContext.getFlowName(),
collectorContext.getFlowRunId());
}
/**
* Creates a composite key for storing {@link FlowActivityDocument}.
* @param collectorContext
* of the timeline writer
* @param eventTimestamp
* of the timeline entity
* @return composite key delimited with !
*/
public static String constructFlowActivityDocId(TimelineCollectorContext
collectorContext, long eventTimestamp) {
return String.format("%s!%s!%s!%s", collectorContext.getClusterId(),
getTopOfTheDayTimestamp(eventTimestamp),
collectorContext.getUserId(), collectorContext.getFlowName());
}
private static String getCosmosDBEndpoint(Configuration conf) {
return conf.get(TIMELINE_SERVICE_COSMOSDB_ENDPOINT);
}
private static String getCosmosDBMasterKey(Configuration conf) {
return conf.get(TIMELINE_SERVICE_COSMOSDB_MASTER_KEY);
}
public static String getCosmosDBDatabaseName(Configuration conf) {
return conf.get(TIMELINE_SERVICE_DOCUMENTSTORE_DATABASE_NAME,
getDefaultTimelineServiceDBName(conf));
}
private static String getDefaultTimelineServiceDBName(
Configuration conf) {
return getClusterId(conf) + "_" +
DEFAULT_TIMELINE_SERVICE_DOCUMENTSTORE_DATABASE_NAME;
}
private static String getClusterId(Configuration conf) {
return conf.get(YarnConfiguration.RM_CLUSTER_ID,
YarnConfiguration.DEFAULT_RM_CLUSTER_ID);
}
private static boolean isTimeInRange(long time, long timeBegin,
long timeEnd) {
return (time >= timeBegin) && (time <= timeEnd);
}
/**
* Checks if the {@link TimelineEntityFilters} are not matching for a given
* {@link TimelineEntity}.
* @param filters
* that has to be checked for an entity
* @param timelineEntity
* for which the filters would be applied
* @return true if any one of the filter is not matching else false
* @throws IOException if an unsupported filter is being matched.
*/
static boolean isFilterNotMatching(TimelineEntityFilters filters,
TimelineEntity timelineEntity) throws IOException {
if (timelineEntity.getCreatedTime() != null && !isTimeInRange(timelineEntity
.getCreatedTime(), filters.getCreatedTimeBegin(),
filters.getCreatedTimeEnd())) {
return true;
}
if (filters.getRelatesTo() != null &&
!filters.getRelatesTo().getFilterList().isEmpty() &&
!TimelineStorageUtils.matchRelatesTo(timelineEntity,
filters.getRelatesTo())) {
return true;
}
if (filters.getIsRelatedTo() != null &&
!filters.getIsRelatedTo().getFilterList().isEmpty() &&
!TimelineStorageUtils.matchIsRelatedTo(timelineEntity,
filters.getIsRelatedTo())) {
return true;
}
if (filters.getInfoFilters() != null &&
!filters.getInfoFilters().getFilterList().isEmpty() &&
!TimelineStorageUtils.matchInfoFilters(timelineEntity,
filters.getInfoFilters())) {
return true;
}
if (filters.getConfigFilters() != null &&
!filters.getConfigFilters().getFilterList().isEmpty() &&
!TimelineStorageUtils.matchConfigFilters(timelineEntity,
filters.getConfigFilters())) {
return true;
}
if (filters.getMetricFilters() != null &&
!filters.getMetricFilters().getFilterList().isEmpty() &&
!TimelineStorageUtils.matchMetricFilters(timelineEntity,
filters.getMetricFilters())) {
return true;
}
return filters.getEventFilters() != null &&
!filters.getEventFilters().getFilterList().isEmpty() &&
!TimelineStorageUtils.matchEventFilters(timelineEntity,
filters.getEventFilters());
}
/**
* Creates the final entity to be returned as the result.
* @param timelineEntityDocument
* which has all the information for the entity
* @param dataToRetrieve
* specifies filters and fields to retrieve
* @return {@link TimelineEntity} as the result
*/
public static TimelineEntity createEntityToBeReturned(
TimelineEntityDocument timelineEntityDocument,
TimelineDataToRetrieve dataToRetrieve) {
TimelineEntity entityToBeReturned = createTimelineEntity(
timelineEntityDocument.getType(),
timelineEntityDocument.fetchTimelineEntity());
entityToBeReturned.setIdentifier(new TimelineEntity.Identifier(
timelineEntityDocument.getType(), timelineEntityDocument.getId()));
entityToBeReturned.setCreatedTime(
timelineEntityDocument.getCreatedTime());
entityToBeReturned.setInfo(timelineEntityDocument.getInfo());
if (dataToRetrieve.getFieldsToRetrieve() != null) {
fillFields(entityToBeReturned, timelineEntityDocument,
dataToRetrieve);
}
return entityToBeReturned;
}
/**
* Creates the final entity to be returned as the result.
* @param timelineEntityDocument
* which has all the information for the entity
* @param confsToRetrieve
* specifies config filters to be applied
* @param metricsToRetrieve
* specifies metric filters to be applied
*
* @return {@link TimelineEntity} as the result
*/
public static TimelineEntity createEntityToBeReturned(
TimelineEntityDocument timelineEntityDocument,
TimelineFilterList confsToRetrieve,
TimelineFilterList metricsToRetrieve) {
TimelineEntity timelineEntity = timelineEntityDocument
.fetchTimelineEntity();
if (confsToRetrieve != null) {
timelineEntity.setConfigs(DocumentStoreUtils.applyConfigFilter(
confsToRetrieve, timelineEntity.getConfigs()));
}
if (metricsToRetrieve != null) {
timelineEntity.setMetrics(DocumentStoreUtils.transformMetrics(
metricsToRetrieve, timelineEntityDocument.getMetrics()));
}
return timelineEntity;
}
private static TimelineEntity createTimelineEntity(String type,
TimelineEntity timelineEntity) {
switch (TimelineEntityType.valueOf(type)) {
case YARN_APPLICATION:
return new ApplicationEntity();
case YARN_FLOW_RUN:
return new FlowRunEntity();
case YARN_FLOW_ACTIVITY:
FlowActivityEntity flowActivityEntity =
(FlowActivityEntity) timelineEntity;
FlowActivityEntity newFlowActivity = new FlowActivityEntity();
newFlowActivity.addFlowRuns(flowActivityEntity.getFlowRuns());
return newFlowActivity;
default:
return new TimelineEntity();
}
}
// fetch required fields for final entity to be returned
private static void fillFields(TimelineEntity finalEntity,
TimelineEntityDocument entityDoc,
TimelineDataToRetrieve dataToRetrieve) {
EnumSet<TimelineReader.Field> fieldsToRetrieve =
dataToRetrieve.getFieldsToRetrieve();
if (fieldsToRetrieve.contains(TimelineReader.Field.ALL)) {
fieldsToRetrieve = EnumSet.allOf(TimelineReader.Field.class);
}
for (TimelineReader.Field field : fieldsToRetrieve) {
switch(field) {
case CONFIGS:
finalEntity.setConfigs(applyConfigFilter(dataToRetrieve
.getConfsToRetrieve(), entityDoc.getConfigs()));
break;
case METRICS:
finalEntity.setMetrics(transformMetrics(dataToRetrieve
.getMetricsToRetrieve(), entityDoc.getMetrics()));
break;
case INFO:
finalEntity.setInfo(entityDoc.getInfo());
break;
case IS_RELATED_TO:
finalEntity.setIsRelatedToEntities(entityDoc.getIsRelatedToEntities());
break;
case RELATES_TO:
finalEntity.setIsRelatedToEntities(entityDoc.getIsRelatedToEntities());
break;
case EVENTS:
finalEntity.setEvents(transformEvents(entityDoc.getEvents().values()));
break;
default:
}
}
}
/* Transforms Collection<Set<TimelineEventSubDoc>> to
NavigableSet<TimelineEvent> */
private static NavigableSet<TimelineEvent> transformEvents(
Collection<Set<TimelineEventSubDoc>> eventSetColl) {
NavigableSet<TimelineEvent> timelineEvents = new TreeSet<>();
for (Set<TimelineEventSubDoc> eventSubDocs : eventSetColl) {
for (TimelineEventSubDoc eventSubDoc : eventSubDocs) {
timelineEvents.add(eventSubDoc.fetchTimelineEvent());
}
}
return timelineEvents;
}
public static Set<TimelineMetric> transformMetrics(
TimelineFilterList metricsToRetrieve,
Map<String, Set<TimelineMetricSubDoc>> metrics) {
if (metricsToRetrieve == null ||
hasDataToBeRetrieve(metricsToRetrieve, metrics.keySet())) {
Set<TimelineMetric> metricSet = new HashSet<>();
for(Set<TimelineMetricSubDoc> metricSubDocs : metrics.values()) {
for(TimelineMetricSubDoc metricSubDoc : metricSubDocs) {
metricSet.add(metricSubDoc.fetchTimelineMetric());
}
}
return metricSet;
}
return new HashSet<>();
}
public static Map<String, String> applyConfigFilter(
TimelineFilterList configsToRetrieve, Map<String, String> configs) {
if (configsToRetrieve == null ||
hasDataToBeRetrieve(configsToRetrieve, configs.keySet())) {
return configs;
}
return new HashMap<>();
}
private static boolean hasDataToBeRetrieve(
TimelineFilterList timelineFilters, Set<String> dataSet) {
Set<String> dataToBeRetrieved = new HashSet<>();
TimelinePrefixFilter timelinePrefixFilter;
for (TimelineFilter timelineFilter : timelineFilters.getFilterList()) {
timelinePrefixFilter = (TimelinePrefixFilter) timelineFilter;
dataToBeRetrieved.add(timelinePrefixFilter.getPrefix());
}
switch (timelineFilters.getOperator()) {
case OR:
if (dataToBeRetrieved.size() == 0 ||
!Collections.disjoint(dataSet, dataToBeRetrieved)) {
return true;
}
case AND:
if (dataToBeRetrieved.size() == 0 ||
dataSet.containsAll(dataToBeRetrieved)) {
return true;
}
default:
return false;
}
}
}
|
DocumentStoreUtils
|
java
|
google__error-prone
|
core/src/main/java/com/google/errorprone/bugpatterns/ParameterName.java
|
{
"start": 2884,
"end": 12638
}
|
class ____ extends BugChecker
implements MethodInvocationTreeMatcher, NewClassTreeMatcher {
private final ImmutableList<String> exemptPackages;
private final boolean matchImplicitSource;
@Inject
ParameterName(ErrorProneFlags flags) {
this.exemptPackages =
flags.getListOrEmpty("ParameterName:exemptPackagePrefixes").stream()
// add a trailing '.' so that e.g. com.foo matches as a prefix of com.foo.bar, but not
// com.foobar
.map(p -> p.endsWith(".") ? p : p + ".")
.collect(toImmutableList());
this.matchImplicitSource = flags.getBoolean("ParameterName:matchImplicitSource").orElse(true);
}
@Override
public Description matchMethodInvocation(MethodInvocationTree tree, VisitorState state) {
checkArguments(
tree, tree.getArguments(), argListStartPosition(tree.getMethodSelect(), state), state);
return NO_MATCH;
}
@Override
public Description matchNewClass(NewClassTree tree, VisitorState state) {
checkArguments(
tree, tree.getArguments(), argListStartPosition(tree.getIdentifier(), state), state);
return NO_MATCH;
}
int argListStartPosition(Tree tree, VisitorState state) {
if (!matchImplicitSource && !hasExplicitSource(tree, state)) {
return Position.NOPOS;
}
int pos = state.getEndPosition(tree);
if (pos != Position.NOPOS) {
return pos;
}
return getStartPosition(tree);
}
private void checkArguments(
Tree tree,
List<? extends ExpressionTree> arguments,
int argListStartPosition,
VisitorState state) {
if (arguments.isEmpty()) {
return;
}
MethodSymbol sym = (MethodSymbol) ASTHelpers.getSymbol(tree);
if (NamedParameterComment.containsSyntheticParameterName(sym)) {
return;
}
int start = argListStartPosition;
if (start == Position.NOPOS) {
// best effort work-around for https://github.com/google/error-prone/issues/780
return;
}
String enclosingClass = ASTHelpers.enclosingClass(sym).toString();
if (exemptPackages.stream().anyMatch(enclosingClass::startsWith)) {
return;
}
Iterator<? extends ExpressionTree> argumentIterator = arguments.iterator();
// For each parameter/argument pair, we tokenize the characters between the end of the
// previous argument (or the start of the argument list, in the case of the first argument)
// and the start of the current argument. The `start` variable is advanced each time, stepping
// over each argument when we finish processing it.
for (VarSymbol param : sym.getParameters()) {
if (!argumentIterator.hasNext()) {
return; // A vararg parameter has zero corresponding arguments passed
}
ExpressionTree argument = argumentIterator.next();
Optional<Range<Integer>> positions = positions(argument, state);
if (positions.isEmpty()) {
return;
}
start =
processArgument(
positions.get(), start, state, tok -> checkArgument(param, argument, tok, state));
}
// handle any varargs arguments after the first
while (argumentIterator.hasNext()) {
ExpressionTree argument = argumentIterator.next();
Optional<Range<Integer>> positions = positions(argument, state);
if (positions.isEmpty()) {
return;
}
start =
processArgument(positions.get(), start, state, tok -> checkComment(argument, tok, state));
}
}
/** Returns the source span for a tree, or empty if the position information is not available. */
Optional<Range<Integer>> positions(Tree tree, VisitorState state) {
int endPosition = state.getEndPosition(tree);
if (endPosition == Position.NOPOS) {
return Optional.empty();
}
return Optional.of(Range.closedOpen(getStartPosition(tree), endPosition));
}
private static int processArgument(
Range<Integer> positions,
int offset,
VisitorState state,
Consumer<ErrorProneToken> consumer) {
String source = state.getSourceCode().subSequence(offset, positions.upperEndpoint()).toString();
Deque<ErrorProneToken> tokens =
new ArrayDeque<>(ErrorProneTokens.getTokens(source, offset, state.context));
if (advanceTokens(tokens, positions)) {
consumer.accept(tokens.removeFirst());
}
return positions.upperEndpoint();
}
private static boolean advanceTokens(Deque<ErrorProneToken> tokens, Range<Integer> actual) {
while (!tokens.isEmpty() && tokens.getFirst().pos() < actual.lowerEndpoint()) {
tokens.removeFirst();
}
if (tokens.isEmpty()) {
return false;
}
if (!actual.contains(tokens.getFirst().pos())) {
return false;
}
return true;
}
private record FixInfo(
boolean isFormatCorrect, boolean isNameCorrect, ErrorProneComment comment, String name) {
static FixInfo create(
boolean isFormatCorrect, boolean isNameCorrect, ErrorProneComment comment, String name) {
return new FixInfo(isFormatCorrect, isNameCorrect, comment, name);
}
}
private void checkArgument(
VarSymbol formal, ExpressionTree actual, ErrorProneToken token, VisitorState state) {
List<FixInfo> matches = new ArrayList<>();
for (ErrorProneComment comment : token.comments()) {
if (comment.getStyle().equals(ErrorProneCommentStyle.LINE)) {
// These are usually not intended as a parameter comment, and we don't want to flag if they
// happen to match the parameter comment format.
continue;
}
Matcher m =
NamedParameterComment.PARAMETER_COMMENT_PATTERN.matcher(
Comments.getTextFromComment(comment));
if (!m.matches()) {
continue;
}
boolean isFormatCorrect = isVarargs(formal) ^ Strings.isNullOrEmpty(m.group(2));
String name = m.group(1);
boolean isNameCorrect = formal.getSimpleName().contentEquals(name);
// If there are multiple parameter name comments, bail if any one of them is an exact match.
if (isNameCorrect && isFormatCorrect) {
matches.clear();
break;
}
matches.add(FixInfo.create(isFormatCorrect, isNameCorrect, comment, name));
}
String fixTemplate = isVarargs(formal) ? "/* %s...= */" : "/* %s= */";
for (FixInfo match : matches) {
SuggestedFix rewriteCommentFix =
rewriteComment(match.comment(), String.format(fixTemplate, formal.getSimpleName()));
SuggestedFix rewriteToRegularCommentFix =
rewriteComment(match.comment(), String.format("/* %s */", match.name()));
Description description;
if (match.isFormatCorrect() && !match.isNameCorrect()) {
description =
buildDescription(actual)
.setMessage(
String.format(
"`%s` does not match formal parameter name `%s`; either fix the name or"
+ " use a regular comment",
match.comment().getText(), formal.getSimpleName()))
.addFix(rewriteCommentFix)
.addFix(rewriteToRegularCommentFix)
.build();
} else if (!match.isFormatCorrect() && match.isNameCorrect()) {
description =
buildDescription(actual)
.setMessage(
String.format(
"parameter name comment `%s` uses incorrect format",
match.comment().getText()))
.addFix(rewriteCommentFix)
.build();
} else if (!match.isFormatCorrect() && !match.isNameCorrect()) {
description =
buildDescription(actual)
.setMessage(
String.format(
"`%s` does not match formal parameter name `%s` and uses incorrect "
+ "format; either fix the format or use a regular comment",
match.comment().getText(), formal.getSimpleName()))
.addFix(rewriteCommentFix)
.addFix(rewriteToRegularCommentFix)
.build();
} else {
throw new AssertionError(
"Unexpected match with both isNameCorrect and isFormatCorrect true: " + match);
}
state.reportMatch(description);
}
}
private static SuggestedFix rewriteComment(ErrorProneComment comment, String format) {
int replacementStartPos = comment.getSourcePos(0);
int replacementEndPos = comment.getSourcePos(comment.getText().length() - 1) + 1;
return SuggestedFix.replace(replacementStartPos, replacementEndPos, format);
}
// complains on parameter name comments on varargs past the first one
private void checkComment(ExpressionTree arg, ErrorProneToken token, VisitorState state) {
for (ErrorProneComment comment : token.comments()) {
Matcher m =
NamedParameterComment.PARAMETER_COMMENT_PATTERN.matcher(
Comments.getTextFromComment(comment));
if (m.matches()) {
SuggestedFix rewriteCommentFix =
rewriteComment(
comment, String.format("/* %s%s */", m.group(1), firstNonNull(m.group(2), "")));
state.reportMatch(
buildDescription(arg)
.addFix(rewriteCommentFix)
.setMessage("parameter name comment only allowed on first varargs argument")
.build());
}
}
}
private static boolean isVarargs(VarSymbol sym) {
Preconditions.checkArgument(
sym.owner instanceof MethodSymbol, "sym must be a parameter to a method");
MethodSymbol method = (MethodSymbol) sym.owner;
return method.isVarArgs() && (method.getParameters().last() == sym);
}
}
|
ParameterName
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.