language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/naturalid/A.java | {
"start": 732,
"end": 1488
} | class ____ {
@Id
@GeneratedValue(strategy = GenerationType.TABLE)
private long oid;
@Version
private int version;
@Column
@NaturalId(mutable = false)
private String name;
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE)
@org.hibernate.annotations.OptimisticLock(excluded = true)
@jakarta.persistence.OneToMany(mappedBy = "a")
private Set<D> ds = new HashSet<D>();
@jakarta.persistence.OneToOne
private D singleD = null;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Set<D> getDs() {
return ds;
}
public void setDs(Set<D> ds) {
this.ds = ds;
}
public D getSingleD() {
return singleD;
}
public void setSingleD(D singleD) {
this.singleD = singleD;
}
}
| A |
java | quarkusio__quarkus | extensions/oidc-client-registration/runtime/src/test/java/io/quarkus/oidc/client/registration/OidcClientRegistrationConfigBuilderTest.java | {
"start": 479,
"end": 13719
} | class ____ {
@Test
public void testDefaultValues() {
var config = OidcClientRegistrationConfig.builder().build();
testDefaultValues(config);
config = new OidcClientRegistrationConfigBuilder().build();
testDefaultValues(config);
}
private static void testDefaultValues(OidcClientRegistrationConfig config) {
// OidcClientRegistrationConfig methods
assertTrue(config.id().isEmpty());
assertTrue(config.registrationEnabled());
assertTrue(config.registerEarly());
assertTrue(config.initialToken().isEmpty());
assertNotNull(config.metadata());
assertTrue(config.metadata().clientName().isEmpty());
assertTrue(config.metadata().redirectUri().isEmpty());
assertTrue(config.metadata().postLogoutUri().isEmpty());
assertTrue(config.metadata().extraProps().isEmpty());
// OidcCommonConfig methods
assertTrue(config.authServerUrl().isEmpty());
assertTrue(config.discoveryEnabled().isEmpty());
assertTrue(config.registrationPath().isEmpty());
assertTrue(config.connectionDelay().isEmpty());
assertEquals(3, config.connectionRetryCount());
assertEquals(10, config.connectionTimeout().getSeconds());
assertFalse(config.useBlockingDnsLookup());
assertTrue(config.maxPoolSize().isEmpty());
assertTrue(config.followRedirects());
assertNotNull(config.proxy());
assertTrue(config.proxy().host().isEmpty());
assertEquals(80, config.proxy().port());
assertTrue(config.proxy().username().isEmpty());
assertTrue(config.proxy().password().isEmpty());
assertNotNull(config.tls());
assertTrue(config.tls().tlsConfigurationName().isEmpty());
assertTrue(config.tls().verification().isEmpty());
assertTrue(config.tls().keyStoreFile().isEmpty());
assertTrue(config.tls().keyStoreFileType().isEmpty());
assertTrue(config.tls().keyStoreProvider().isEmpty());
assertTrue(config.tls().keyStorePassword().isEmpty());
assertTrue(config.tls().keyStoreKeyAlias().isEmpty());
assertTrue(config.tls().keyStoreKeyPassword().isEmpty());
assertTrue(config.tls().trustStoreFile().isEmpty());
assertTrue(config.tls().trustStorePassword().isEmpty());
assertTrue(config.tls().trustStoreCertAlias().isEmpty());
assertTrue(config.tls().trustStoreFileType().isEmpty());
assertTrue(config.tls().trustStoreProvider().isEmpty());
}
@Test
public void testSetEveryProperty() {
var config = OidcClientRegistrationConfig.builder()
// OidcClientRegistrationConfig methods
.id("pink")
.registrationEnabled(false)
.registerEarly(false)
.initialToken("floyd")
.metadata()
.clientName("another")
.redirectUri("brick")
.postLogoutUri("in")
.extraProps(Map.of("the", "wall"))
.extraProperty("hey", "teacher")
.end()
// OidcCommonConfig methods
.authServerUrl("we")
.discoveryEnabled(false)
.registrationPath("don't")
.connectionDelay(Duration.ofSeconds(656))
.connectionRetryCount(565)
.connectionTimeout(Duration.ofSeconds(673))
.useBlockingDnsLookup(true)
.maxPoolSize(376)
.followRedirects(false)
.proxy("need", 55, "no", "education")
.tlsConfigurationName("Teacher!")
.build();
// OidcClientRegistrationConfig methods
assertEquals("pink", config.id().orElse(null));
assertFalse(config.registrationEnabled());
assertFalse(config.registerEarly());
assertEquals("floyd", config.initialToken().orElse(null));
assertNotNull(config.metadata());
assertEquals("another", config.metadata().clientName().orElse(null));
assertEquals("brick", config.metadata().redirectUri().orElse(null));
assertEquals("in", config.metadata().postLogoutUri().orElse(null));
assertEquals(2, config.metadata().extraProps().size());
assertEquals("wall", config.metadata().extraProps().get("the"));
assertEquals("teacher", config.metadata().extraProps().get("hey"));
// OidcCommonConfig methods
assertEquals("we", config.authServerUrl().orElse(null));
assertFalse(config.discoveryEnabled().orElse(false));
assertEquals("don't", config.registrationPath().orElse(null));
assertEquals(656, config.connectionDelay().map(Duration::getSeconds).orElse(null));
assertEquals(565, config.connectionRetryCount());
assertEquals(673, config.connectionTimeout().getSeconds());
assertTrue(config.useBlockingDnsLookup());
assertEquals(376, config.maxPoolSize().orElse(0));
assertFalse(config.followRedirects());
assertNotNull(config.proxy());
assertEquals("need", config.proxy().host().orElse(null));
assertEquals(55, config.proxy().port());
assertEquals("no", config.proxy().username().orElse(null));
assertEquals("education", config.proxy().password().orElse(null));
assertNotNull(config.tls());
assertEquals("Teacher!", config.tls().tlsConfigurationName().orElse(null));
assertTrue(config.tls().verification().isEmpty());
assertTrue(config.tls().keyStoreFile().isEmpty());
assertTrue(config.tls().keyStoreFileType().isEmpty());
assertTrue(config.tls().keyStoreProvider().isEmpty());
assertTrue(config.tls().keyStorePassword().isEmpty());
assertTrue(config.tls().keyStoreKeyAlias().isEmpty());
assertTrue(config.tls().keyStoreKeyPassword().isEmpty());
assertTrue(config.tls().trustStoreFile().isEmpty());
assertTrue(config.tls().trustStorePassword().isEmpty());
assertTrue(config.tls().trustStoreCertAlias().isEmpty());
assertTrue(config.tls().trustStoreFileType().isEmpty());
assertTrue(config.tls().trustStoreProvider().isEmpty());
}
@Test
public void testCopyProxyProperties() {
var previousConfig = OidcClientRegistrationConfig.builder()
.proxy("need", 55, "no", "education")
.build();
var newConfig = OidcClientRegistrationConfig.builder(previousConfig)
.proxy("fast-car", 22)
.build();
assertNotNull(previousConfig.proxy());
assertEquals("fast-car", newConfig.proxy().host().orElse(null));
assertEquals(22, newConfig.proxy().port());
assertEquals("no", newConfig.proxy().username().orElse(null));
assertEquals("education", newConfig.proxy().password().orElse(null));
}
@Test
public void testCopyClientRegistrationConfigProperties() {
var previousConfigBuilder = OidcClientRegistrationConfig.builder();
var previousConfig = new MetadataBuilder(previousConfigBuilder)
.clientName("another")
.redirectUri("brick")
.postLogoutUri("in")
.extraProps(Map.of("the", "wall"))
.extraProperty("hey", "teacher")
.end()
.id("pink")
.registrationEnabled(false)
.registerEarly(false)
.initialToken("floyd")
.build();
assertNotNull(previousConfig.metadata());
assertEquals("another", previousConfig.metadata().clientName().orElse(null));
assertEquals("brick", previousConfig.metadata().redirectUri().orElse(null));
assertEquals("in", previousConfig.metadata().postLogoutUri().orElse(null));
assertEquals(2, previousConfig.metadata().extraProps().size());
assertEquals("wall", previousConfig.metadata().extraProps().get("the"));
assertEquals("teacher", previousConfig.metadata().extraProps().get("hey"));
var metadata = new MetadataBuilder()
.clientName("place")
.postLogoutUri("is")
.extraProperty("better", "starting")
.build();
var newConfig = OidcClientRegistrationConfig.builder(previousConfig)
.id("any")
.registerEarly(true)
.metadata(metadata)
.build();
assertEquals("any", newConfig.id().orElse(null));
assertFalse(newConfig.registrationEnabled());
assertTrue(newConfig.registerEarly());
assertEquals("floyd", newConfig.initialToken().orElse(null));
assertNotNull(newConfig.metadata());
assertEquals("place", newConfig.metadata().clientName().orElse(null));
assertTrue(newConfig.metadata().redirectUri().isEmpty());
assertEquals("is", newConfig.metadata().postLogoutUri().orElse(null));
assertEquals(1, newConfig.metadata().extraProps().size());
assertEquals("starting", newConfig.metadata().extraProps().get("better"));
}
@Test
public void testCopyOidcCommonConfigProperties() {
var previousConfig = OidcClientRegistrationConfig.builder()
.authServerUrl("we")
.discoveryEnabled(false)
.registrationPath("don't")
.connectionDelay(Duration.ofSeconds(656))
.connectionRetryCount(565)
.connectionTimeout(Duration.ofSeconds(673))
.useBlockingDnsLookup(true)
.maxPoolSize(376)
.followRedirects(false)
.proxy("need", 55, "no", "education")
.tlsConfigurationName("Teacher!")
.build();
var newConfig = OidcClientRegistrationConfig.builder(previousConfig)
.discoveryEnabled(true)
.connectionDelay(Duration.ofSeconds(753))
.connectionTimeout(Duration.ofSeconds(357))
.maxPoolSize(1988)
.proxy("cross", 44, "the", "boarder")
.build();
assertEquals("we", newConfig.authServerUrl().orElse(null));
assertTrue(newConfig.discoveryEnabled().orElse(false));
assertEquals("don't", newConfig.registrationPath().orElse(null));
assertEquals(753, newConfig.connectionDelay().map(Duration::getSeconds).orElse(null));
assertEquals(565, newConfig.connectionRetryCount());
assertEquals(357, newConfig.connectionTimeout().getSeconds());
assertTrue(newConfig.useBlockingDnsLookup());
assertEquals(1988, newConfig.maxPoolSize().orElse(0));
assertFalse(newConfig.followRedirects());
assertNotNull(newConfig.proxy());
assertEquals("cross", newConfig.proxy().host().orElse(null));
assertEquals(44, newConfig.proxy().port());
assertEquals("the", newConfig.proxy().username().orElse(null));
assertEquals("boarder", newConfig.proxy().password().orElse(null));
assertNotNull(newConfig.tls());
assertEquals("Teacher!", newConfig.tls().tlsConfigurationName().orElse(null));
assertTrue(newConfig.tls().verification().isEmpty());
assertTrue(newConfig.tls().keyStoreFile().isEmpty());
assertTrue(newConfig.tls().keyStoreFileType().isEmpty());
assertTrue(newConfig.tls().keyStoreProvider().isEmpty());
assertTrue(newConfig.tls().keyStorePassword().isEmpty());
assertTrue(newConfig.tls().keyStoreKeyAlias().isEmpty());
assertTrue(newConfig.tls().keyStoreKeyPassword().isEmpty());
assertTrue(newConfig.tls().trustStoreFile().isEmpty());
assertTrue(newConfig.tls().trustStorePassword().isEmpty());
assertTrue(newConfig.tls().trustStoreCertAlias().isEmpty());
assertTrue(newConfig.tls().trustStoreFileType().isEmpty());
assertTrue(newConfig.tls().trustStoreProvider().isEmpty());
}
@Test
public void testCreateBuilderShortcuts() {
OidcClientRegistrationConfig config = OidcClientRegistrationConfig
.authServerUrl("auth-server-url")
.metadata("Dynamic Client", "http://localhost:8081/protected/new-oidc-client-reg")
.build();
assertEquals("http://localhost:8081/protected/new-oidc-client-reg", config.metadata().redirectUri().orElse(null));
assertEquals("Dynamic Client", config.metadata().clientName().orElse(null));
config = OidcClientRegistrationConfig
.registrationPath("registration-path")
.metadata("redirect-uri")
.build();
assertEquals("registration-path", config.registrationPath().orElse(null));
assertEquals("redirect-uri", config.metadata().redirectUri().orElse(null));
}
@Test
public void testMetadataBuilderDefaults() {
var metadata = new MetadataBuilder().build();
assertTrue(metadata.clientName().isEmpty());
assertTrue(metadata.postLogoutUri().isEmpty());
assertTrue(metadata.redirectUri().isEmpty());
assertTrue(metadata.extraProps().isEmpty());
}
}
| OidcClientRegistrationConfigBuilderTest |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/deser/ConstructorErrorTest_private.java | {
"start": 189,
"end": 521
} | class ____ extends TestCase {
public void test_error() throws Exception {
Exception error = null;
try {
JSON.parseObject("{}", Model.class);
} catch (JSONException ex) {
error = ex;
}
Assert.assertNotNull(error);
}
private static | ConstructorErrorTest_private |
java | spring-projects__spring-framework | spring-jdbc/src/test/java/org/springframework/jdbc/object/SqlUpdateTests.java | {
"start": 1629,
"end": 4590
} | class ____ {
private static final String UPDATE =
"update seat_status set booking_id = null";
private static final String UPDATE_INT =
"update seat_status set booking_id = null where performance_id = ?";
private static final String UPDATE_INT_INT =
"update seat_status set booking_id = null where performance_id = ? and price_band_id = ?";
private static final String UPDATE_NAMED_PARAMETERS =
"update seat_status set booking_id = null where performance_id = :perfId and price_band_id = :priceId";
private static final String UPDATE_STRING =
"update seat_status set booking_id = null where name = ?";
private static final String UPDATE_OBJECTS =
"update seat_status set booking_id = null where performance_id = ? and price_band_id = ? and name = ? and confirmed = ?";
private static final String INSERT_GENERATE_KEYS =
"insert into show (name) values(?)";
private Connection connection = mock();
private DataSource dataSource = mock();
private PreparedStatement preparedStatement = mock();
private ResultSet resultSet = mock();
private ResultSetMetaData resultSetMetaData = mock();
@BeforeEach
void setUp() throws Exception {
given(dataSource.getConnection()).willReturn(connection);
}
@AfterEach
void verifyClosed() throws Exception {
verify(preparedStatement).close();
verify(connection).close();
}
@Test
void testUpdate() throws SQLException {
given(preparedStatement.executeUpdate()).willReturn(1);
given(connection.prepareStatement(UPDATE)).willReturn(preparedStatement);
Updater pc = new Updater();
int rowsAffected = pc.run();
assertThat(rowsAffected).isEqualTo(1);
}
@Test
void testUpdateInt() throws SQLException {
given(preparedStatement.executeUpdate()).willReturn(1);
given(connection.prepareStatement(UPDATE_INT)).willReturn(preparedStatement);
IntUpdater pc = new IntUpdater();
int rowsAffected = pc.run(1);
assertThat(rowsAffected).isEqualTo(1);
verify(preparedStatement).setObject(1, 1, Types.NUMERIC);
}
@Test
void testUpdateIntInt() throws SQLException {
given(preparedStatement.executeUpdate()).willReturn(1);
given(connection.prepareStatement(UPDATE_INT_INT)).willReturn(preparedStatement);
IntIntUpdater pc = new IntIntUpdater();
int rowsAffected = pc.run(1, 1);
assertThat(rowsAffected).isEqualTo(1);
verify(preparedStatement).setObject(1, 1, Types.NUMERIC);
verify(preparedStatement).setObject(2, 1, Types.NUMERIC);
}
@Test
void testNamedParameterUpdateWithUnnamedDeclarations() throws SQLException {
doTestNamedParameterUpdate(false);
}
@Test
void testNamedParameterUpdateWithNamedDeclarations() throws SQLException {
doTestNamedParameterUpdate(true);
}
private void doTestNamedParameterUpdate(final boolean namedDeclarations)
throws SQLException {
given(preparedStatement.executeUpdate()).willReturn(1);
given(connection.prepareStatement(UPDATE_INT_INT)).willReturn(preparedStatement);
| SqlUpdateTests |
java | quarkusio__quarkus | integration-tests/test-extension/extension/deployment/src/main/java/io/quarkus/extest/deployment/TestRecordProcessor.java | {
"start": 318,
"end": 517
} | class ____ {
@BuildStep
@Record(ExecutionTime.RUNTIME_INIT)
public void record(TestRecordRecorder recorder) {
recorder.record(new TestRecord("foo", 100));
}
}
| TestRecordProcessor |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/ListConnectorSyncJobsActionRequestBWCSerializingTests.java | {
"start": 968,
"end": 3589
} | class ____ extends AbstractBWCSerializationTestCase<
ListConnectorSyncJobsAction.Request> {
@Override
protected Writeable.Reader<ListConnectorSyncJobsAction.Request> instanceReader() {
return ListConnectorSyncJobsAction.Request::new;
}
@Override
protected ListConnectorSyncJobsAction.Request createTestInstance() {
PageParams pageParams = EnterpriseSearchModuleTestUtils.randomPageParams();
String connectorId = randomAlphaOfLength(10);
ConnectorSyncStatus syncStatus = ConnectorTestUtils.getRandomSyncStatus();
ConnectorSyncJobType syncJobType = ConnectorTestUtils.getRandomSyncJobType();
return new ListConnectorSyncJobsAction.Request(pageParams, connectorId, syncStatus, Collections.singletonList(syncJobType));
}
@Override
protected ListConnectorSyncJobsAction.Request mutateInstance(ListConnectorSyncJobsAction.Request instance) throws IOException {
PageParams pageParams = instance.getPageParams();
String connectorId = instance.getConnectorId();
ConnectorSyncStatus syncStatus = instance.getConnectorSyncStatus();
ConnectorSyncJobType syncJobType = instance.getConnectorSyncJobTypeList().get(0);
switch (randomIntBetween(0, 3)) {
case 0 -> pageParams = randomValueOtherThan(pageParams, EnterpriseSearchModuleTestUtils::randomPageParams);
case 1 -> connectorId = randomValueOtherThan(connectorId, () -> randomAlphaOfLength(10));
case 2 -> syncStatus = randomValueOtherThan(syncStatus, ConnectorTestUtils::getRandomSyncStatus);
case 3 -> syncJobType = randomValueOtherThan(syncJobType, ConnectorTestUtils::getRandomSyncJobType);
default -> throw new AssertionError("Illegal randomisation branch");
}
return new ListConnectorSyncJobsAction.Request(pageParams, connectorId, syncStatus, Collections.singletonList(syncJobType));
}
@Override
protected ListConnectorSyncJobsAction.Request doParseInstance(XContentParser parser) throws IOException {
return ListConnectorSyncJobsAction.Request.parse(parser);
}
@Override
protected ListConnectorSyncJobsAction.Request mutateInstanceForVersion(
ListConnectorSyncJobsAction.Request instance,
TransportVersion version
) {
return new ListConnectorSyncJobsAction.Request(
instance.getPageParams(),
instance.getConnectorId(),
instance.getConnectorSyncStatus(),
instance.getConnectorSyncJobTypeList()
);
}
}
| ListConnectorSyncJobsActionRequestBWCSerializingTests |
java | apache__kafka | metadata/src/main/java/org/apache/kafka/controller/QuorumController.java | {
"start": 51659,
"end": 62318
} | class ____ implements ControllerWriteOperation<Void> {
@Override
public ControllerResult<Void> generateRecordsAndResult() {
try {
return ActivationRecordsGenerator.generate(
log::warn,
offsetControl.transactionStartOffset(),
bootstrapMetadata,
featureControl.metadataVersion(),
configurationControl.getStaticallyConfiguredMinInsyncReplicas());
} catch (Throwable t) {
throw fatalFaultHandler.handleFault("exception while completing controller " +
"activation", t);
}
}
@Override
public void processBatchEndOffset(long offset) {
// As part of completing our transition to active controller, we reschedule the
// periodic tasks here. At this point, all the records we generated in
// generateRecordsAndResult have been applied, so we have the correct value for
// metadata.version and other in-memory state.
periodicControl.activate();
}
}
void renounce() {
try {
if (curClaimEpoch == -1) {
throw new RuntimeException("Cannot renounce leadership because we are not the " +
"current leader.");
}
raftClient.resign(curClaimEpoch);
curClaimEpoch = -1;
deferredEventQueue.failAll(ControllerExceptions.
newWrongControllerException(OptionalInt.empty()));
offsetControl.deactivate();
clusterControl.deactivate();
periodicControl.deactivate();
} catch (Throwable e) {
fatalFaultHandler.handleFault("exception while renouncing leadership", e);
}
}
/**
* Apply the metadata record to its corresponding in-memory state(s)
*
* @param message The metadata record
* @param snapshotId The snapshotId if this record is from a snapshot
* @param offset The offset of the record
*/
private void replay(ApiMessage message, Optional<OffsetAndEpoch> snapshotId, long offset) {
if (log.isTraceEnabled()) {
if (snapshotId.isPresent()) {
log.trace("Replaying snapshot {} record {}",
Snapshots.filenameFromSnapshotId(snapshotId.get()),
recordRedactor.toLoggableString(message));
} else {
log.trace("Replaying log record {} with offset {}",
recordRedactor.toLoggableString(message), offset);
}
}
MetadataRecordType type = MetadataRecordType.fromId(message.apiKey());
switch (type) {
case REGISTER_BROKER_RECORD:
clusterControl.replay((RegisterBrokerRecord) message, offset);
break;
case UNREGISTER_BROKER_RECORD:
clusterControl.replay((UnregisterBrokerRecord) message);
break;
case TOPIC_RECORD:
replicationControl.replay((TopicRecord) message);
break;
case PARTITION_RECORD:
replicationControl.replay((PartitionRecord) message);
break;
case CONFIG_RECORD:
configurationControl.replay((ConfigRecord) message);
break;
case PARTITION_CHANGE_RECORD:
replicationControl.replay((PartitionChangeRecord) message);
break;
case FENCE_BROKER_RECORD:
clusterControl.replay((FenceBrokerRecord) message);
break;
case UNFENCE_BROKER_RECORD:
clusterControl.replay((UnfenceBrokerRecord) message);
break;
case REMOVE_TOPIC_RECORD:
replicationControl.replay((RemoveTopicRecord) message);
break;
case FEATURE_LEVEL_RECORD:
featureControl.replay((FeatureLevelRecord) message);
break;
case CLIENT_QUOTA_RECORD:
clientQuotaControlManager.replay((ClientQuotaRecord) message);
break;
case PRODUCER_IDS_RECORD:
producerIdControlManager.replay((ProducerIdsRecord) message);
break;
case BROKER_REGISTRATION_CHANGE_RECORD:
clusterControl.replay((BrokerRegistrationChangeRecord) message);
break;
case ACCESS_CONTROL_ENTRY_RECORD:
aclControlManager.replay((AccessControlEntryRecord) message);
break;
case REMOVE_ACCESS_CONTROL_ENTRY_RECORD:
aclControlManager.replay((RemoveAccessControlEntryRecord) message);
break;
case USER_SCRAM_CREDENTIAL_RECORD:
scramControlManager.replay((UserScramCredentialRecord) message);
break;
case REMOVE_USER_SCRAM_CREDENTIAL_RECORD:
scramControlManager.replay((RemoveUserScramCredentialRecord) message);
break;
case DELEGATION_TOKEN_RECORD:
delegationTokenControlManager.replay((DelegationTokenRecord) message);
break;
case REMOVE_DELEGATION_TOKEN_RECORD:
delegationTokenControlManager.replay((RemoveDelegationTokenRecord) message);
break;
case NO_OP_RECORD:
// NoOpRecord is an empty record and doesn't need to be replayed
break;
case ZK_MIGRATION_STATE_RECORD:
// In 4.0, although migration is no longer supported and ZK has been removed from Kafka,
// users might migrate from ZK to KRaft in version 3.x and then perform a rolling upgrade to 4.0.
// Therefore, this case needs to be retained but will be a no-op.
break;
case BEGIN_TRANSACTION_RECORD:
offsetControl.replay((BeginTransactionRecord) message, offset);
break;
case END_TRANSACTION_RECORD:
offsetControl.replay((EndTransactionRecord) message, offset);
break;
case ABORT_TRANSACTION_RECORD:
offsetControl.replay((AbortTransactionRecord) message, offset);
break;
case REGISTER_CONTROLLER_RECORD:
clusterControl.replay((RegisterControllerRecord) message);
break;
case CLEAR_ELR_RECORD:
replicationControl.replay((ClearElrRecord) message);
break;
default:
throw new RuntimeException("Unhandled record type " + type);
}
}
/**
* Handles faults that cause a controller failover, but which don't abort the process.
*/
private final FaultHandler nonFatalFaultHandler;
/**
* Handles faults that should normally be fatal to the process.
*/
private final FaultHandler fatalFaultHandler;
/**
* The slf4j logger.
*/
private final Logger log;
/**
* The ID of this controller node.
*/
private final int nodeId;
/**
* The ID of this cluster.
*/
private final String clusterId;
/**
* The single-threaded queue that processes all of our events.
* It also processes timeouts.
*/
private final KafkaEventQueue queue;
/**
* The Kafka clock object to use.
*/
private final Time time;
/**
* The controller metrics.
*/
private final QuorumControllerMetrics controllerMetrics;
/**
* A registry for snapshot data. This must be accessed only by the event queue thread.
*/
private final SnapshotRegistry snapshotRegistry;
/**
* The deferred event queue which holds deferred operations which are waiting for the metadata
* log's stable offset to advance. This must be accessed only by the event queue thread.
*/
private final DeferredEventQueue deferredEventQueue;
/**
* Manages read and write offsets, and in-memory snapshots.
*/
private final OffsetControlManager offsetControl;
/**
* A predicate that returns information about whether a ConfigResource exists.
*/
private final Consumer<ConfigResource> resourceExists;
/**
* An object which stores the controller's dynamic configuration.
* This must be accessed only by the event queue thread.
*/
private final ConfigurationControlManager configurationControl;
/**
* An object which stores the controller's dynamic client quotas.
* This must be accessed only by the event queue thread.
*/
private final ClientQuotaControlManager clientQuotaControlManager;
/**
* Describes the feature versions in the cluster.
*/
private final QuorumClusterFeatureSupportDescriber clusterSupportDescriber;
/**
* Handles changes to the event queue for PeriodicTaskControlManager.
*/
private final PeriodicTaskControlManagerQueueAccessor queueAccessor;
/**
* Controls periodic tasks.
*/
private final PeriodicTaskControlManager periodicControl;
/**
* An object which stores the controller's view of the cluster.
* This must be accessed only by the event queue thread.
*/
private final ClusterControlManager clusterControl;
/**
* An object which stores the controller's view of the cluster features.
* This must be accessed only by the event queue thread.
*/
private final FeatureControlManager featureControl;
/**
* An object which stores the controller's view of the latest producer ID
* that has been generated. This must be accessed only by the event queue thread.
*/
private final ProducerIdControlManager producerIdControlManager;
/**
* An object which stores the controller's view of topics and partitions.
* This must be accessed only by the event queue thread.
*/
private final ReplicationControlManager replicationControl;
/**
* Manages SCRAM credentials, if there are any.
*/
private final ScramControlManager scramControlManager;
/**
* Manages DelegationTokens, if there are any.
*/
private final DelegationTokenControlManager delegationTokenControlManager;
/**
* Manages the standard ACLs in the cluster.
* This must be accessed only by the event queue thread.
*/
private final AclControlManager aclControlManager;
/**
* The | CompleteActivationEvent |
java | apache__kafka | storage/src/test/java/org/apache/kafka/tiered/storage/actions/StartBrokerAction.java | {
"start": 1021,
"end": 1440
} | class ____ implements TieredStorageTestAction {
private final int brokerId;
public StartBrokerAction(int brokerId) {
this.brokerId = brokerId;
}
@Override
public void doExecute(TieredStorageTestContext context) {
context.start(brokerId);
}
@Override
public void describe(PrintStream output) {
output.println("start-broker: " + brokerId);
}
}
| StartBrokerAction |
java | mapstruct__mapstruct | processor/src/main/java/org/mapstruct/ap/spi/MapStructProcessingEnvironment.java | {
"start": 645,
"end": 1244
} | interface ____ {
/**
* Returns an implementation of some utility methods for
* operating on elements
*
* @return element utilities
*/
Elements getElementUtils();
/**
* Returns an implementation of some utility methods for
* operating on types.
*
* @return type utilities
*/
Types getTypeUtils();
/**
* Returns the resolved options specified by the impl of
* {@link AdditionalSupportedOptionsProvider}.
*
* @return resolved options
*/
Map<String, String> getOptions();
}
| MapStructProcessingEnvironment |
java | elastic__elasticsearch | modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java | {
"start": 161904,
"end": 165263
} | class ____ extends ParserRuleContext {
public TerminalNode LBRACE() {
return getToken(PainlessParser.LBRACE, 0);
}
public List<MaptokenContext> maptoken() {
return getRuleContexts(MaptokenContext.class);
}
public MaptokenContext maptoken(int i) {
return getRuleContext(MaptokenContext.class, i);
}
public TerminalNode RBRACE() {
return getToken(PainlessParser.RBRACE, 0);
}
public List<TerminalNode> COMMA() {
return getTokens(PainlessParser.COMMA);
}
public TerminalNode COMMA(int i) {
return getToken(PainlessParser.COMMA, i);
}
public TerminalNode COLON() {
return getToken(PainlessParser.COLON, 0);
}
public MapinitializerContext(ParserRuleContext parent, int invokingState) {
super(parent, invokingState);
}
@Override
public int getRuleIndex() {
return RULE_mapinitializer;
}
@Override
public <T> T accept(ParseTreeVisitor<? extends T> visitor) {
if (visitor instanceof PainlessParserVisitor) return ((PainlessParserVisitor<? extends T>) visitor).visitMapinitializer(this);
else return visitor.visitChildren(this);
}
}
public final MapinitializerContext mapinitializer() throws RecognitionException {
MapinitializerContext _localctx = new MapinitializerContext(_ctx, getState());
enterRule(_localctx, 64, RULE_mapinitializer);
int _la;
try {
setState(509);
_errHandler.sync(this);
switch (getInterpreter().adaptivePredict(_input, 50, _ctx)) {
case 1:
enterOuterAlt(_localctx, 1); {
setState(495);
match(LBRACE);
setState(496);
maptoken();
setState(501);
_errHandler.sync(this);
_la = _input.LA(1);
while (_la == COMMA) {
{
{
setState(497);
match(COMMA);
setState(498);
maptoken();
}
}
setState(503);
_errHandler.sync(this);
_la = _input.LA(1);
}
setState(504);
match(RBRACE);
}
break;
case 2:
enterOuterAlt(_localctx, 2); {
setState(506);
match(LBRACE);
setState(507);
match(COLON);
setState(508);
match(RBRACE);
}
break;
}
} catch (RecognitionException re) {
_localctx.exception = re;
_errHandler.reportError(this, re);
_errHandler.recover(this, re);
} finally {
exitRule();
}
return _localctx;
}
@SuppressWarnings("CheckReturnValue")
public static | MapinitializerContext |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/util/collections/binary/WindowBytesHashMap.java | {
"start": 1339,
"end": 1721
} | class ____ extends AbstractBytesHashMap<WindowKey> {
public WindowBytesHashMap(
final Object owner,
MemoryManager memoryManager,
long memorySize,
PagedTypeSerializer<RowData> keySer,
int valueArity) {
super(owner, memoryManager, memorySize, new WindowKeySerializer(keySer), valueArity);
}
}
| WindowBytesHashMap |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java | {
"start": 1740,
"end": 23448
} | class ____ extends AbstractBulkByScrollRequestTestCase<ReindexRequest> {
private final BytesReference matchAll = new BytesArray("{ \"foo\" : \"bar\" }");
@Override
protected NamedWriteableRegistry writableRegistry() {
SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList());
return new NamedWriteableRegistry(searchModule.getNamedWriteables());
}
@Override
protected NamedXContentRegistry xContentRegistry() {
SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList());
return new NamedXContentRegistry(searchModule.getNamedXContents());
}
@Override
protected boolean enableWarningsCheck() {
// There sometimes will be a warning about specifying types in reindex requests being deprecated.
return false;
}
@Override
protected ReindexRequest createTestInstance() {
ReindexRequest reindexRequest = new ReindexRequest();
reindexRequest.setSourceIndices("source");
reindexRequest.setDestIndex("destination");
if (randomBoolean()) {
try (XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint()) {
BytesReference query = BytesReference.bytes(matchAllQuery().toXContent(builder, ToXContent.EMPTY_PARAMS));
reindexRequest.setRemoteInfo(
new RemoteInfo(
randomAlphaOfLength(5),
randomAlphaOfLength(5),
between(1, Integer.MAX_VALUE),
null,
query,
"user",
new SecureString("pass".toCharArray()),
emptyMap(),
RemoteInfo.DEFAULT_SOCKET_TIMEOUT,
RemoteInfo.DEFAULT_CONNECT_TIMEOUT
)
);
} catch (IOException e) {
throw new AssertionError(e);
}
}
if (randomBoolean()) {
reindexRequest.setSourceBatchSize(randomInt(100));
}
if (randomBoolean()) {
reindexRequest.setDestOpType("create");
}
if (randomBoolean()) {
reindexRequest.setDestPipeline("my_pipeline");
}
if (randomBoolean()) {
reindexRequest.setDestRouting("=cat");
}
if (randomBoolean()) {
reindexRequest.setMaxDocs(randomIntBetween(100, 1000));
}
if (randomBoolean()) {
reindexRequest.setAbortOnVersionConflict(false);
}
if (reindexRequest.getRemoteInfo() == null && randomBoolean()) {
reindexRequest.setSourceQuery(new TermQueryBuilder("foo", "fooval"));
}
return reindexRequest;
}
@Override
protected ReindexRequest doParseInstance(XContentParser parser) throws IOException {
return ReindexRequest.fromXContent(parser, Predicates.never());
}
@Override
protected boolean supportsUnknownFields() {
return false;
}
@Override
protected void assertEqualInstances(ReindexRequest expectedInstance, ReindexRequest newInstance) {
assertNotSame(newInstance, expectedInstance);
assertArrayEquals(expectedInstance.getSearchRequest().indices(), newInstance.getSearchRequest().indices());
assertEquals(expectedInstance.getSearchRequest(), newInstance.getSearchRequest());
assertEquals(expectedInstance.getMaxDocs(), newInstance.getMaxDocs());
assertEquals(expectedInstance.getSlices(), newInstance.getSlices());
assertEquals(expectedInstance.isAbortOnVersionConflict(), newInstance.isAbortOnVersionConflict());
assertEquals(expectedInstance.getRemoteInfo(), newInstance.getRemoteInfo());
assertEquals(expectedInstance.getDestination().getPipeline(), newInstance.getDestination().getPipeline());
assertEquals(expectedInstance.getDestination().routing(), newInstance.getDestination().routing());
assertEquals(expectedInstance.getDestination().opType(), newInstance.getDestination().opType());
}
public void testReindexFromRemoteDoesNotSupportSearchQuery() {
ReindexRequest reindex = newRequest();
reindex.setRemoteInfo(
new RemoteInfo(
randomAlphaOfLength(5),
randomAlphaOfLength(5),
between(1, Integer.MAX_VALUE),
null,
matchAll,
null,
null,
emptyMap(),
RemoteInfo.DEFAULT_SOCKET_TIMEOUT,
RemoteInfo.DEFAULT_CONNECT_TIMEOUT
)
);
reindex.getSearchRequest().source().query(matchAllQuery()); // Unsupported place to put query
ActionRequestValidationException e = reindex.validate();
assertEquals(
"Validation Failed: 1: reindex from remote sources should use RemoteInfo's query instead of source's query;",
e.getMessage()
);
}
public void testReindexFromRemoteDoesNotSupportSlicesParameterGreaterThan1() {
ReindexRequest reindex = newRequest();
reindex.setRemoteInfo(
new RemoteInfo(
randomAlphaOfLength(5),
randomAlphaOfLength(5),
between(1, Integer.MAX_VALUE),
null,
matchAll,
null,
null,
emptyMap(),
RemoteInfo.DEFAULT_SOCKET_TIMEOUT,
RemoteInfo.DEFAULT_CONNECT_TIMEOUT
)
);
// Enable automatic slicing with a random number of slices greater than 1 (like setting the slices URL parameter):
reindex.setSlices(between(2, Integer.MAX_VALUE));
ActionRequestValidationException e = reindex.validate();
assertEquals(
"Validation Failed: 1: reindex from remote sources doesn't support slices > 1 but was [" + reindex.getSlices() + "];",
e.getMessage()
);
}
public void testReindexFromRemoteDoesNotSupportSlicesParameterSetToAuto() {
ReindexRequest reindex = newRequest();
reindex.setRemoteInfo(
new RemoteInfo(
randomAlphaOfLength(5),
randomAlphaOfLength(5),
between(1, Integer.MAX_VALUE),
null,
matchAll,
null,
null,
emptyMap(),
RemoteInfo.DEFAULT_SOCKET_TIMEOUT,
RemoteInfo.DEFAULT_CONNECT_TIMEOUT
)
);
// Enable automatic slicing with an automatically chosen number of slices (like setting the slices URL parameter to "auto"):
reindex.setSlices(AbstractBulkByScrollRequest.AUTO_SLICES);
ActionRequestValidationException e = reindex.validate();
assertEquals(
"Validation Failed: 1: reindex from remote sources doesn't support slices > 1 but was [" + reindex.getSlices() + "];",
e.getMessage()
);
}
public void testReindexFromRemoteDoesNotSupportSlicesSourceField() {
ReindexRequest reindex = newRequest();
reindex.setRemoteInfo(
new RemoteInfo(
randomAlphaOfLength(5),
randomAlphaOfLength(5),
between(1, Integer.MAX_VALUE),
null,
matchAll,
null,
null,
emptyMap(),
RemoteInfo.DEFAULT_SOCKET_TIMEOUT,
RemoteInfo.DEFAULT_CONNECT_TIMEOUT
)
);
// Enable manual slicing (like setting source.slice.max and source.slice.id in the request body):
int numSlices = randomIntBetween(2, Integer.MAX_VALUE);
int sliceId = randomIntBetween(0, numSlices - 1);
reindex.getSearchRequest().source().slice(new SliceBuilder(sliceId, numSlices));
ActionRequestValidationException e = reindex.validate();
assertEquals(
"Validation Failed: 1: reindex from remote sources doesn't support source.slice but was ["
+ reindex.getSearchRequest().source().slice()
+ "];",
e.getMessage()
);
}
public void testReindexFromRemoteRejectsUsernameWithNoPassword() {
ReindexRequest reindex = newRequest();
reindex.setRemoteInfo(
new RemoteInfo(
randomAlphaOfLength(5),
randomAlphaOfLength(5),
between(1, Integer.MAX_VALUE),
null,
matchAll,
"user",
null,
emptyMap(),
RemoteInfo.DEFAULT_SOCKET_TIMEOUT,
RemoteInfo.DEFAULT_CONNECT_TIMEOUT
)
);
ActionRequestValidationException e = reindex.validate();
assertEquals("Validation Failed: 1: reindex from remote source included username but not password;", e.getMessage());
}
public void testReindexFromRemoteRejectsPasswordWithNoUsername() {
ReindexRequest reindex = newRequest();
reindex.setRemoteInfo(
new RemoteInfo(
randomAlphaOfLength(5),
randomAlphaOfLength(5),
between(1, Integer.MAX_VALUE),
null,
matchAll,
null,
new SecureString("password".toCharArray()),
emptyMap(),
RemoteInfo.DEFAULT_SOCKET_TIMEOUT,
RemoteInfo.DEFAULT_CONNECT_TIMEOUT
)
);
ActionRequestValidationException e = reindex.validate();
assertEquals("Validation Failed: 1: reindex from remote source included password but not username;", e.getMessage());
}
public void testNoSliceBuilderSetWithSlicedRequest() {
ReindexRequest reindex = newRequest();
reindex.getSearchRequest().source().slice(new SliceBuilder(0, 4));
reindex.setSlices(between(2, Integer.MAX_VALUE));
ActionRequestValidationException e = reindex.validate();
assertEquals("Validation Failed: 1: can't specify both manual and automatic slicing at the same time;", e.getMessage());
}
@Override
protected void extraRandomizationForSlice(ReindexRequest original) {
if (randomBoolean()) {
original.setScript(mockScript(randomAlphaOfLength(5)));
}
if (randomBoolean()) {
original.setRemoteInfo(
new RemoteInfo(
randomAlphaOfLength(5),
randomAlphaOfLength(5),
between(1, 10000),
null,
matchAll,
null,
null,
emptyMap(),
randomPositiveTimeValue(),
randomPositiveTimeValue()
)
);
}
}
@Override
protected void extraForSliceAssertions(ReindexRequest original, ReindexRequest forSliced) {
assertEquals(original.getScript(), forSliced.getScript());
assertEquals(original.getDestination(), forSliced.getDestination());
assertEquals(original.getRemoteInfo(), forSliced.getRemoteInfo());
}
@Override
protected ReindexRequest newRequest() {
ReindexRequest reindex = new ReindexRequest();
reindex.setSourceIndices("source");
reindex.setDestIndex("dest");
return reindex;
}
public void testBuildRemoteInfoNoRemote() throws IOException {
assertNull(ReindexRequest.buildRemoteInfo(new HashMap<>()));
}
public void testBuildRemoteInfoFullyLoaded() throws IOException {
Map<String, String> headers = new HashMap<>();
headers.put("first", "a");
headers.put("second", "b");
headers.put("third", "");
Map<String, Object> remote = new HashMap<>();
remote.put("host", "https://example.com:9200");
remote.put("username", "testuser");
remote.put("password", "testpass");
remote.put("headers", headers);
remote.put("socket_timeout", "90s");
remote.put("connect_timeout", "10s");
Map<String, Object> query = new HashMap<>();
query.put("a", "b");
Map<String, Object> source = new HashMap<>();
source.put("remote", remote);
source.put("query", query);
RemoteInfo remoteInfo = ReindexRequest.buildRemoteInfo(source);
assertEquals("https", remoteInfo.getScheme());
assertEquals("example.com", remoteInfo.getHost());
assertEquals(9200, remoteInfo.getPort());
assertEquals("""
{
"a" : "b"
}""", remoteInfo.getQuery().utf8ToString());
assertEquals("testuser", remoteInfo.getUsername());
assertEquals("testpass", remoteInfo.getPassword().toString());
assertEquals(headers, remoteInfo.getHeaders());
assertEquals(timeValueSeconds(90), remoteInfo.getSocketTimeout());
assertEquals(timeValueSeconds(10), remoteInfo.getConnectTimeout());
}
public void testBuildRemoteInfoWithoutAllParts() throws IOException {
expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase("example.com"));
expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase(":9200"));
expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase("http://:9200"));
expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase("example.com:9200"));
expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase("http://example.com"));
}
public void testBuildRemoteInfoWithAllHostParts() throws IOException {
RemoteInfo info = buildRemoteInfoHostTestCase("http://example.com:9200");
assertEquals("http", info.getScheme());
assertEquals("example.com", info.getHost());
assertEquals(9200, info.getPort());
assertNull(info.getPathPrefix());
assertEquals(RemoteInfo.DEFAULT_SOCKET_TIMEOUT, info.getSocketTimeout()); // Didn't set the timeout so we should get the default
assertEquals(RemoteInfo.DEFAULT_CONNECT_TIMEOUT, info.getConnectTimeout()); // Didn't set the timeout so we should get the default
info = buildRemoteInfoHostTestCase("https://other.example.com:9201");
assertEquals("https", info.getScheme());
assertEquals("other.example.com", info.getHost());
assertEquals(9201, info.getPort());
assertNull(info.getPathPrefix());
assertEquals(RemoteInfo.DEFAULT_SOCKET_TIMEOUT, info.getSocketTimeout());
assertEquals(RemoteInfo.DEFAULT_CONNECT_TIMEOUT, info.getConnectTimeout());
info = buildRemoteInfoHostTestCase("https://[::1]:9201");
assertEquals("https", info.getScheme());
assertEquals("[::1]", info.getHost());
assertEquals(9201, info.getPort());
assertNull(info.getPathPrefix());
assertEquals(RemoteInfo.DEFAULT_SOCKET_TIMEOUT, info.getSocketTimeout());
assertEquals(RemoteInfo.DEFAULT_CONNECT_TIMEOUT, info.getConnectTimeout());
info = buildRemoteInfoHostTestCase("https://other.example.com:9201/");
assertEquals("https", info.getScheme());
assertEquals("other.example.com", info.getHost());
assertEquals(9201, info.getPort());
assertEquals("/", info.getPathPrefix());
assertEquals(RemoteInfo.DEFAULT_SOCKET_TIMEOUT, info.getSocketTimeout());
assertEquals(RemoteInfo.DEFAULT_CONNECT_TIMEOUT, info.getConnectTimeout());
info = buildRemoteInfoHostTestCase("https://other.example.com:9201/proxy-path/");
assertEquals("https", info.getScheme());
assertEquals("other.example.com", info.getHost());
assertEquals(9201, info.getPort());
assertEquals("/proxy-path/", info.getPathPrefix());
assertEquals(RemoteInfo.DEFAULT_SOCKET_TIMEOUT, info.getSocketTimeout());
assertEquals(RemoteInfo.DEFAULT_CONNECT_TIMEOUT, info.getConnectTimeout());
final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase("https"));
assertEquals("[host] must be of the form [scheme]://[host]:[port](/[pathPrefix])? but was [https]", exception.getMessage());
}
public void testBuildRemoteInfoWithApiKey() throws IOException {
Map<String, Object> remote = new HashMap<>();
remote.put("host", "https://example.com:9200");
remote.put("api_key", "l3t-m3-1n");
Map<String, Object> source = new HashMap<>();
source.put("remote", remote);
RemoteInfo remoteInfo = ReindexRequest.buildRemoteInfo(source);
assertEquals(remoteInfo.getHeaders(), Map.of("Authorization", "ApiKey l3t-m3-1n"));
}
public void testBuildRemoteInfoWithApiKeyAndOtherHeaders() throws IOException {
Map<String, Object> originalHeaders = new HashMap<>();
originalHeaders.put("X-Routing-Magic", "Abracadabra");
originalHeaders.put("X-Tracing-Magic", "12345");
Map<String, Object> remote = new HashMap<>();
remote.put("host", "https://example.com:9200");
remote.put("api_key", "l3t-m3-1n");
remote.put("headers", originalHeaders);
Map<String, Object> source = new HashMap<>();
source.put("remote", remote);
RemoteInfo remoteInfo = ReindexRequest.buildRemoteInfo(source);
assertEquals(
remoteInfo.getHeaders(),
Map.of("X-Routing-Magic", "Abracadabra", "X-Tracing-Magic", "12345", "Authorization", "ApiKey l3t-m3-1n")
);
}
public void testBuildRemoteInfoWithConflictingApiKeyAndAuthorizationHeader() throws IOException {
Map<String, Object> originalHeaders = new HashMap<>();
originalHeaders.put("aUtHoRiZaTiOn", "op3n-s3s4m3"); // non-standard capitalization, but HTTP headers are not case-sensitive
Map<String, Object> remote = new HashMap<>();
remote.put("host", "https://example.com:9200");
remote.put("api_key", "l3t-m3-1n");
remote.put("headers", originalHeaders);
Map<String, Object> source = new HashMap<>();
source.put("remote", remote);
assertThrows(IllegalArgumentException.class, () -> ReindexRequest.buildRemoteInfo(source));
}
public void testReindexFromRemoteRequestParsing() throws IOException {
BytesReference request;
try (XContentBuilder b = JsonXContent.contentBuilder()) {
b.startObject();
{
b.startObject("source");
{
b.startObject("remote");
{
b.field("host", "http://localhost:9200");
}
b.endObject();
b.field("index", "source");
}
b.endObject();
b.startObject("dest");
{
b.field("index", "dest");
}
b.endObject();
}
b.endObject();
request = BytesReference.bytes(b);
}
try (XContentParser p = createParser(JsonXContent.jsonXContent, request)) {
ReindexRequest r = ReindexRequest.fromXContent(p, nf -> false);
assertEquals("localhost", r.getRemoteInfo().getHost());
assertArrayEquals(new String[] { "source" }, r.getSearchRequest().indices());
}
}
private RemoteInfo buildRemoteInfoHostTestCase(String hostInRest) throws IOException {
Map<String, Object> remote = new HashMap<>();
remote.put("host", hostInRest);
Map<String, Object> source = new HashMap<>();
source.put("remote", remote);
return ReindexRequest.buildRemoteInfo(source);
}
public void testCommaSeparatedSourceIndices() throws IOException {
ReindexRequest r = parseRequestWithSourceIndices("a,b");
assertArrayEquals(new String[] { "a", "b" }, r.getSearchRequest().indices());
}
public void testArraySourceIndices() throws IOException {
ReindexRequest r = parseRequestWithSourceIndices(new String[] { "a", "b" });
assertArrayEquals(new String[] { "a", "b" }, r.getSearchRequest().indices());
}
public void testEmptyStringSourceIndices() throws IOException {
ReindexRequest r = parseRequestWithSourceIndices("");
assertArrayEquals(new String[0], r.getSearchRequest().indices());
ActionRequestValidationException validationException = r.validate();
assertNotNull(validationException);
assertEquals(List.of("use _all if you really want to copy from all existing indexes"), validationException.validationErrors());
}
private ReindexRequest parseRequestWithSourceIndices(Object sourceIndices) throws IOException {
BytesReference request;
try (XContentBuilder b = JsonXContent.contentBuilder()) {
b.startObject();
{
b.startObject("source");
{
b.field("index", sourceIndices);
}
b.endObject();
b.startObject("dest");
{
b.field("index", "dest");
}
b.endObject();
}
b.endObject();
request = BytesReference.bytes(b);
}
try (XContentParser p = createParser(JsonXContent.jsonXContent, request)) {
return ReindexRequest.fromXContent(p, Predicates.never());
}
}
}
| ReindexRequestTests |
java | apache__camel | components/camel-huawei/camel-huaweicloud-obs/src/test/java/org/apache/camel/component/huaweicloud/obs/PutObjectStringTest.java | {
"start": 1632,
"end": 4491
} | class ____ extends CamelTestSupport {
private static final Logger LOGGER = LoggerFactory.getLogger(PutObjectTest.class);
TestConfiguration testConfiguration = new TestConfiguration();
ObjectMapper mapper = new ObjectMapper();
@BindToRegistry("obsClient")
ObsClient mockClient = Mockito.mock(ObsClient.class);
@BindToRegistry("serviceKeys")
ServiceKeys serviceKeys = new ServiceKeys(
testConfiguration.getProperty("accessKey"),
testConfiguration.getProperty("secretKey"));
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:put_object")
.setBody(constant("a test string"))
.setProperty(OBSProperties.OBJECT_NAME, constant("string_file.txt"))
.setProperty(OBSProperties.BUCKET_NAME, constant("reji-abc"))
.setProperty(OBSProperties.BUCKET_LOCATION, constant("cn-north-1"))
.to("hwcloud-obs:putObject?" +
"serviceKeys=#serviceKeys" +
"®ion=" + testConfiguration.getProperty("region") +
"&ignoreSslVerification=true" +
"&obsClient=#obsClient")
.log("Put object successful")
.to("log:LOG?showAll=true")
.to("mock:put_object_result");
}
};
}
@Test
public void putObjectStringTest() throws Exception {
PutObjectResult putObjectResult = new PutObjectResult(
"reji-abc", "string_file.txt",
"eb733a00c0c9d336e65691a37ab54293", "version-xxx",
StorageClassEnum.STANDARD, "https://reji-abc.obs.cn-north-1.myhuaweicloud.com/test_file.txt");
Mockito.when(mockClient.putObject(Mockito.any(String.class),
Mockito.any(String.class), Mockito.any(InputStream.class)))
.thenReturn(putObjectResult);
MockEndpoint mock = getMockEndpoint("mock:put_object_result");
mock.expectedMinimumMessageCount(1);
template.sendBody("direct:put_object", "sample file content");
Exchange responseExchange = mock.getExchanges().get(0);
mock.assertIsSatisfied();
assertEquals("{\"bucketName\":\"reji-abc\",\"objectKey\":\"string_file.txt\"," +
"\"etag\":\"eb733a00c0c9d336e65691a37ab54293\",\"versionId\":\"version-xxx\"," +
"\"storageClass\":\"STANDARD\"," +
"\"objectUrl\":\"https://reji-abc.obs.cn-north-1.myhuaweicloud.com/test_file.txt\"," +
"\"statusCode\":0}",
responseExchange.getIn().getBody());
}
}
| PutObjectStringTest |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/internal/util/ReflectHelper.java | {
"start": 4382,
"end": 4445
} | class ____ the given interface.
*
* @param clazz The | implements |
java | elastic__elasticsearch | x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/XContentFilterKeysUtils.java | {
"start": 741,
"end": 4756
} | class ____ {
private XContentFilterKeysUtils() {}
public static Map<String, Object> filterMapOrdered(Set<String> keys, XContentParser parser) throws IOException {
try {
if (parser.currentToken() != null) {
throw new IllegalArgumentException("Parser already started");
}
if (parser.nextToken() != START_OBJECT) {
throw new IllegalArgumentException("Content should start with START_OBJECT");
}
State state = new State(new ArrayList<>(keys));
return parse(parser, state);
} catch (IOException e) {
throw new IOException("could not build a filtered payload out of xcontent", e);
}
}
private static Map<String, Object> parse(XContentParser parser, State state) throws IOException {
return parse(parser, state, true);
}
private static Map<String, Object> parse(XContentParser parser, State state, boolean isOutsideOfArray) throws IOException {
if (state.includeLeaf) {
return parser.map();
}
Map<String, Object> data = new HashMap<>();
for (XContentParser.Token token = parser.nextToken(); token != END_OBJECT; token = parser.nextToken()) {
switch (token) {
case FIELD_NAME -> state.nextField(parser.currentName());
case START_OBJECT -> {
if (state.includeKey) {
String fieldName = state.currentFieldName();
Map<String, Object> nestedData = parse(parser, state, isOutsideOfArray);
data.put(fieldName, nestedData);
} else {
parser.skipChildren();
}
if (isOutsideOfArray) {
state.previousField();
}
}
case START_ARRAY -> {
if (state.includeKey) {
String fieldName = state.currentFieldName();
List<Object> arrayData = arrayParsing(parser, state);
data.put(fieldName, arrayData);
} else {
parser.skipChildren();
}
state.previousField();
}
case VALUE_STRING -> {
if (state.includeKey) {
data.put(state.currentFieldName(), parser.text());
}
if (isOutsideOfArray) {
state.previousField();
}
}
case VALUE_NUMBER -> {
if (state.includeKey) {
data.put(state.currentFieldName(), parser.numberValue());
}
if (isOutsideOfArray) {
state.previousField();
}
}
case VALUE_BOOLEAN -> {
if (state.includeKey) {
data.put(state.currentFieldName(), parser.booleanValue());
}
if (isOutsideOfArray) {
state.previousField();
}
}
}
}
return data;
}
private static List<Object> arrayParsing(XContentParser parser, State state) throws IOException {
List<Object> values = new ArrayList<>();
for (XContentParser.Token token = parser.nextToken(); token != END_ARRAY; token = parser.nextToken()) {
switch (token) {
case START_OBJECT -> values.add(parse(parser, state, false));
case VALUE_STRING -> values.add(parser.text());
case VALUE_NUMBER -> values.add(parser.numberValue());
case VALUE_BOOLEAN -> values.add(parser.booleanValue());
}
}
return values;
}
private static final | XContentFilterKeysUtils |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/java/typeutils/PojoTypeInfo.java | {
"start": 3069,
"end": 14667
} | class ____<T> extends CompositeType<T> {
private static final long serialVersionUID = 1L;
private static final String REGEX_FIELD = "[\\p{L}_\\$][\\p{L}\\p{Digit}_\\$]*";
private static final String REGEX_NESTED_FIELDS = "(" + REGEX_FIELD + ")(\\.(.+))?";
private static final String REGEX_NESTED_FIELDS_WILDCARD =
REGEX_NESTED_FIELDS
+ "|\\"
+ ExpressionKeys.SELECT_ALL_CHAR
+ "|\\"
+ ExpressionKeys.SELECT_ALL_CHAR_SCALA;
private static final Pattern PATTERN_NESTED_FIELDS = Pattern.compile(REGEX_NESTED_FIELDS);
private static final Pattern PATTERN_NESTED_FIELDS_WILDCARD =
Pattern.compile(REGEX_NESTED_FIELDS_WILDCARD);
private final PojoField[] fields;
private final int totalFields;
@PublicEvolving
public PojoTypeInfo(Class<T> typeClass, List<PojoField> fields) {
super(typeClass);
checkArgument(
Modifier.isPublic(typeClass.getModifiers()), "POJO %s is not public", typeClass);
this.fields = fields.toArray(new PojoField[fields.size()]);
Arrays.sort(
this.fields,
new Comparator<PojoField>() {
@Override
public int compare(PojoField o1, PojoField o2) {
return o1.getField().getName().compareTo(o2.getField().getName());
}
});
int counterFields = 0;
for (PojoField field : fields) {
counterFields += field.getTypeInformation().getTotalFields();
}
totalFields = counterFields;
}
@Override
@PublicEvolving
public boolean isBasicType() {
return false;
}
@Override
@PublicEvolving
public boolean isTupleType() {
return false;
}
@Override
@PublicEvolving
public int getArity() {
return fields.length;
}
@Override
@PublicEvolving
public int getTotalFields() {
return totalFields;
}
@Override
@PublicEvolving
public boolean isSortKeyType() {
// Support for sorting POJOs that implement Comparable is not implemented yet.
// Since the order of fields in a POJO type is not well defined, sorting on fields
// gives only some undefined order.
return false;
}
@Override
@PublicEvolving
public void getFlatFields(
String fieldExpression, int offset, List<FlatFieldDescriptor> result) {
Matcher matcher = PATTERN_NESTED_FIELDS_WILDCARD.matcher(fieldExpression);
if (!matcher.matches()) {
throw new InvalidFieldReferenceException(
"Invalid POJO field reference \"" + fieldExpression + "\".");
}
String field = matcher.group(0);
if (field.equals(ExpressionKeys.SELECT_ALL_CHAR)
|| field.equals(ExpressionKeys.SELECT_ALL_CHAR_SCALA)) {
// handle select all
int keyPosition = 0;
for (PojoField pField : fields) {
if (pField.getTypeInformation() instanceof CompositeType) {
CompositeType<?> cType = (CompositeType<?>) pField.getTypeInformation();
cType.getFlatFields(
String.valueOf(ExpressionKeys.SELECT_ALL_CHAR),
offset + keyPosition,
result);
keyPosition += cType.getTotalFields() - 1;
} else {
result.add(
new NamedFlatFieldDescriptor(
pField.getField().getName(),
offset + keyPosition,
pField.getTypeInformation()));
}
keyPosition++;
}
return;
} else {
field = matcher.group(1);
}
// get field
int fieldPos = -1;
TypeInformation<?> fieldType = null;
for (int i = 0; i < fields.length; i++) {
if (fields[i].getField().getName().equals(field)) {
fieldPos = i;
fieldType = fields[i].getTypeInformation();
break;
}
}
if (fieldPos == -1) {
throw new InvalidFieldReferenceException(
"Unable to find field \"" + field + "\" in type " + this + ".");
}
String tail = matcher.group(3);
if (tail == null) {
if (fieldType instanceof CompositeType) {
// forward offset
for (int i = 0; i < fieldPos; i++) {
offset += this.getTypeAt(i).getTotalFields();
}
// add all fields of composite type
((CompositeType<?>) fieldType).getFlatFields("*", offset, result);
} else {
// we found the field to add
// compute flat field position by adding skipped fields
int flatFieldPos = offset;
for (int i = 0; i < fieldPos; i++) {
flatFieldPos += this.getTypeAt(i).getTotalFields();
}
result.add(new FlatFieldDescriptor(flatFieldPos, fieldType));
}
} else {
if (fieldType instanceof CompositeType<?>) {
// forward offset
for (int i = 0; i < fieldPos; i++) {
offset += this.getTypeAt(i).getTotalFields();
}
((CompositeType<?>) fieldType).getFlatFields(tail, offset, result);
} else {
throw new InvalidFieldReferenceException(
"Nested field expression \""
+ tail
+ "\" not possible on atomic type "
+ fieldType
+ ".");
}
}
}
@SuppressWarnings("unchecked")
@Override
@PublicEvolving
public <X> TypeInformation<X> getTypeAt(String fieldExpression) {
Matcher matcher = PATTERN_NESTED_FIELDS.matcher(fieldExpression);
if (!matcher.matches()) {
if (fieldExpression.startsWith(ExpressionKeys.SELECT_ALL_CHAR)
|| fieldExpression.startsWith(ExpressionKeys.SELECT_ALL_CHAR_SCALA)) {
throw new InvalidFieldReferenceException(
"Wildcard expressions are not allowed here.");
} else {
throw new InvalidFieldReferenceException(
"Invalid format of POJO field expression \"" + fieldExpression + "\".");
}
}
String field = matcher.group(1);
// get field
int fieldPos = -1;
TypeInformation<?> fieldType = null;
for (int i = 0; i < fields.length; i++) {
if (fields[i].getField().getName().equals(field)) {
fieldPos = i;
fieldType = fields[i].getTypeInformation();
break;
}
}
if (fieldPos == -1) {
throw new InvalidFieldReferenceException(
"Unable to find field \"" + field + "\" in type " + this + ".");
}
String tail = matcher.group(3);
if (tail == null) {
// we found the type
return (TypeInformation<X>) fieldType;
} else {
if (fieldType instanceof CompositeType<?>) {
return ((CompositeType<?>) fieldType).getTypeAt(tail);
} else {
throw new InvalidFieldReferenceException(
"Nested field expression \""
+ tail
+ "\" not possible on atomic type "
+ fieldType
+ ".");
}
}
}
@Override
@PublicEvolving
public <X> TypeInformation<X> getTypeAt(int pos) {
if (pos < 0 || pos >= this.fields.length) {
throw new IndexOutOfBoundsException();
}
@SuppressWarnings("unchecked")
TypeInformation<X> typed = (TypeInformation<X>) fields[pos].getTypeInformation();
return typed;
}
@Override
@PublicEvolving
protected TypeComparatorBuilder<T> createTypeComparatorBuilder() {
return new PojoTypeComparatorBuilder();
}
@PublicEvolving
public PojoField getPojoFieldAt(int pos) {
if (pos < 0 || pos >= this.fields.length) {
throw new IndexOutOfBoundsException();
}
return this.fields[pos];
}
@PublicEvolving
public String[] getFieldNames() {
String[] result = new String[fields.length];
for (int i = 0; i < fields.length; i++) {
result[i] = fields[i].getField().getName();
}
return result;
}
@Override
@PublicEvolving
public int getFieldIndex(String fieldName) {
for (int i = 0; i < fields.length; i++) {
if (fields[i].getField().getName().equals(fieldName)) {
return i;
}
}
return -1;
}
@Override
@PublicEvolving
@SuppressWarnings("unchecked")
public TypeSerializer<T> createSerializer(SerializerConfig config) {
if (config.isForceKryoEnabled()) {
return new KryoSerializer<>(getTypeClass(), config);
}
if (config.isForceAvroEnabled()) {
return AvroUtils.getAvroUtils().createAvroSerializer(getTypeClass());
}
return createPojoSerializer(config);
}
public PojoSerializer<T> createPojoSerializer(SerializerConfig config) {
TypeSerializer<?>[] fieldSerializers = new TypeSerializer<?>[fields.length];
Field[] reflectiveFields = new Field[fields.length];
for (int i = 0; i < fields.length; i++) {
fieldSerializers[i] = fields[i].getTypeInformation().createSerializer(config);
reflectiveFields[i] = fields[i].getField();
}
return new PojoSerializer<T>(getTypeClass(), fieldSerializers, reflectiveFields, config);
}
@Override
public boolean equals(Object obj) {
if (obj instanceof PojoTypeInfo) {
@SuppressWarnings("unchecked")
PojoTypeInfo<T> pojoTypeInfo = (PojoTypeInfo<T>) obj;
return pojoTypeInfo.canEqual(this)
&& super.equals(pojoTypeInfo)
&& Arrays.equals(fields, pojoTypeInfo.fields)
&& totalFields == pojoTypeInfo.totalFields;
} else {
return false;
}
}
@Override
public int hashCode() {
return 31 * (31 * Arrays.hashCode(fields) + totalFields) + super.hashCode();
}
@Override
public boolean canEqual(Object obj) {
return obj instanceof PojoTypeInfo;
}
@Override
public String toString() {
List<String> fieldStrings = new ArrayList<String>();
for (PojoField field : fields) {
fieldStrings.add(
field.getField().getName() + ": " + field.getTypeInformation().toString());
}
return "PojoType<"
+ getTypeClass().getName()
+ ", fields = ["
+ StringUtils.join(fieldStrings, ", ")
+ "]"
+ ">";
}
// --------------------------------------------------------------------------------------------
private | PojoTypeInfo |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/junit/jupiter/SoftAssertionsExtension_Injection_Test.java | {
"start": 2165,
"end": 2402
} | class ____ {
@Test
void should_use_parent_SoftAssertions_initialized_field() {
assertThat(softly).isNotNull();
}
}
@Nested
@ExtendWith(SoftAssertionsExtension.class)
@DisplayName("nested test | NestedMethodLifecycle |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/runtime/io/checkpointing/AbstractAlternatingAlignedBarrierHandlerState.java | {
"start": 1226,
"end": 3588
} | class ____ implements BarrierHandlerState {
protected final ChannelState state;
protected AbstractAlternatingAlignedBarrierHandlerState(ChannelState state) {
this.state = state;
}
@Override
public final BarrierHandlerState announcementReceived(
Controller controller, InputChannelInfo channelInfo, int sequenceNumber) {
state.addSeenAnnouncement(channelInfo, sequenceNumber);
return this;
}
@Override
public final BarrierHandlerState barrierReceived(
Controller controller,
InputChannelInfo channelInfo,
CheckpointBarrier checkpointBarrier,
boolean markChannelBlocked)
throws IOException, CheckpointException {
if (checkpointBarrier.getCheckpointOptions().isUnalignedCheckpoint()) {
BarrierHandlerState unalignedState =
alignedCheckpointTimeout(controller, checkpointBarrier);
return unalignedState.barrierReceived(
controller, channelInfo, checkpointBarrier, markChannelBlocked);
}
state.removeSeenAnnouncement(channelInfo);
if (markChannelBlocked) {
state.blockChannel(channelInfo);
}
if (controller.allBarriersReceived()) {
controller.initInputsCheckpoint(checkpointBarrier);
controller.triggerGlobalCheckpoint(checkpointBarrier);
return finishCheckpoint();
} else if (controller.isTimedOut(checkpointBarrier)) {
return alignedCheckpointTimeout(controller, checkpointBarrier)
.barrierReceived(
controller,
channelInfo,
checkpointBarrier.asUnaligned(),
markChannelBlocked);
}
return transitionAfterBarrierReceived(state);
}
protected abstract BarrierHandlerState transitionAfterBarrierReceived(ChannelState state);
@Override
public final BarrierHandlerState abort(long cancelledId) throws IOException {
return finishCheckpoint();
}
protected BarrierHandlerState finishCheckpoint() throws IOException {
state.unblockAllChannels();
return new AlternatingWaitingForFirstBarrier(state.emptyState());
}
}
| AbstractAlternatingAlignedBarrierHandlerState |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/floats/Floats_assertLessThan_Test.java | {
"start": 1400,
"end": 3972
} | class ____ extends FloatsBaseTest {
@Test
void should_fail_if_actual_is_null() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> floats.assertLessThan(someInfo(), null, 8f))
.withMessage(actualIsNull());
}
@Test
void should_pass_if_actual_is_less_than_other() {
floats.assertLessThan(someInfo(), 6f, 8f);
}
@Test
void should_fail_if_actual_is_equal_to_other() {
AssertionInfo info = someInfo();
Throwable error = catchThrowable(() -> floats.assertLessThan(info, 6f, 6f));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldBeLess(6f, 6f));
}
@Test
void should_fail_if_actual_is_greater_than_other() {
AssertionInfo info = someInfo();
Throwable error = catchThrowable(() -> floats.assertLessThan(info, 8f, 6f));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldBeLess(8f, 6f));
}
@Test
void should_fail_if_actual_is_null_whatever_custom_comparison_strategy_is() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> floatsWithAbsValueComparisonStrategy.assertLessThan(someInfo(),
null,
8f))
.withMessage(actualIsNull());
}
@Test
void should_pass_if_actual_is_less_than_other_according_to_custom_comparison_strategy() {
floatsWithAbsValueComparisonStrategy.assertLessThan(someInfo(), 6f, -8f);
}
@Test
void should_fail_if_actual_is_equal_to_other_according_to_custom_comparison_strategy() {
AssertionInfo info = someInfo();
Throwable error = catchThrowable(() -> floatsWithAbsValueComparisonStrategy.assertLessThan(info, 6f, -6f));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldBeLess(6f, -6f, absValueComparisonStrategy));
}
@Test
void should_fail_if_actual_is_greater_than_other_according_to_custom_comparison_strategy() {
AssertionInfo info = someInfo();
Throwable error = catchThrowable(() -> floatsWithAbsValueComparisonStrategy.assertLessThan(info, -8f, 6f));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldBeLess(-8f, 6f, absValueComparisonStrategy));
}
}
| Floats_assertLessThan_Test |
java | spring-projects__spring-boot | cli/spring-boot-cli/src/test/java/org/springframework/boot/cli/command/CommandRunnerIntegrationTests.java | {
"start": 1990,
"end": 2259
} | class ____ extends AbstractCommand {
private String @Nullable [] args;
ArgHandlingCommand() {
super("args", "");
}
@Override
public ExitStatus run(String... args) throws Exception {
this.args = args;
return ExitStatus.OK;
}
}
}
| ArgHandlingCommand |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/http/client/reactive/ClientHttpRequest.java | {
"start": 1048,
"end": 1667
} | interface ____ extends ReactiveHttpOutputMessage {
/**
* Return the HTTP method of the request.
*/
HttpMethod getMethod();
/**
* Return the URI of the request.
*/
URI getURI();
/**
* Return a mutable map of request cookies to send to the server.
*/
MultiValueMap<String, HttpCookie> getCookies();
/**
* Return a mutable map of the request attributes.
* @since 6.2
*/
Map<String, Object> getAttributes();
/**
* Return the request from the underlying HTTP library.
* @param <T> the expected type of the request to cast to
* @since 5.3
*/
<T> T getNativeRequest();
}
| ClientHttpRequest |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/concurrent/TimedSemaphore.java | {
"start": 2724,
"end": 4052
} | class ____ extends Thread {
* // The semaphore for limiting database load.
* private final TimedSemaphore semaphore;
* // Create an instance and set the semaphore
* public StatisticsThread(TimedSemaphore timedSemaphore) {
* semaphore = timedSemaphore;
* }
* // Gather statistics
* public void run() {
* try {
* while (true) {
* semaphore.acquire(); // limit database load
* performQuery(); // issue a query
* }
* } catch (InterruptedException) {
* // fall through
* }
* }
* ...
* }
* </pre>
*
* <p>
* The following code fragment shows how a {@link TimedSemaphore} is created that allows only 10 operations per second and passed to the statistics thread:
* </p>
*
* <pre>
* TimedSemaphore sem = new TimedSemaphore(1, TimeUnit.SECOND, 10);
* StatisticsThread thread = new StatisticsThread(sem);
* thread.start();
* </pre>
*
* <p>
* When creating an instance the time period for the semaphore must be specified. {@link TimedSemaphore} uses an executor service with a corresponding period to
* monitor this interval. The {@code
* ScheduledExecutorService} to be used for this purpose can be provided at construction time. Alternatively the | StatisticsThread |
java | google__dagger | javatests/dagger/internal/codegen/ComponentProcessorTest.java | {
"start": 73443,
"end": 73792
} | class ____ {",
" @Inject Bar() {}",
"}");
Source component =
CompilerTests.javaSource(
"test.TestComponent",
"package test;",
"",
"import dagger.Component;",
"import javax.inject.Provider;",
"",
"@Component",
"public | Bar |
java | apache__dubbo | dubbo-plugin/dubbo-rest-jaxrs/src/test/java/org/apache/dubbo/rpc/protocol/tri/rest/support/jaxrs/compatible/rest/RestDemoService.java | {
"start": 1230,
"end": 2036
} | interface ____ {
@GET
@Path("/hello")
Integer hello(@QueryParam("a") Integer a, @QueryParam("b") Integer b);
@GET
@Path("/findUserById")
Response findUserById(@QueryParam("id") Integer id);
@GET
@Path("/error")
String error();
@POST
@Path("/say")
@Consumes({MediaType.TEXT_PLAIN})
String sayHello(String name);
@POST
@Path("number")
@Produces({MediaType.APPLICATION_FORM_URLENCODED})
@Consumes({MediaType.APPLICATION_FORM_URLENCODED})
Long testFormBody(@FormParam("number") Long number);
boolean isCalled();
@DELETE
@Path("{uid}")
String deleteUserByUid(@PathParam("uid") String uid);
@DELETE
@Path("/deleteUserById/{uid}")
public Response deleteUserById(@PathParam("uid") String uid);
}
| RestDemoService |
java | elastic__elasticsearch | x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateFormatter.java | {
"start": 955,
"end": 7295
} | class ____ {
protected static final Map<String, DateFormatter> FORMATTER_MAP;
static {
List<DateFormatter> formatters = List.of(
new Builder().pattern("%a").javaPattern("E").build(),
new Builder().pattern("%b").javaPattern("MMM").build(),
new Builder().pattern("%c").javaPattern("M").build(),
new Builder().pattern("%D")
.javaPattern("d")
.additionalMapper(s -> s + ordinalSuffixForDayOfTheMonth(Integer.parseInt(s)))
.build(),
new Builder().pattern("%d").javaPattern("dd").build(),
new Builder().pattern("%e").javaPattern("d").build(),
new Builder().pattern("%f")
.javaPattern("n")
.additionalMapper(s -> String.format(Locale.ENGLISH, "%06d", Math.round(Integer.parseInt(s) / 1000.0)))
.build(),
new Builder().pattern("%H").javaPattern("HH").build(),
new Builder().pattern("%h").javaPattern("hh").build(),
new Builder().pattern("%I").javaPattern("hh").build(),
new Builder().pattern("%i").javaPattern("mm").build(),
new Builder().pattern("%j").javaPattern("DDD").build(),
new Builder().pattern("%k").javaPattern("H").build(),
new Builder().pattern("%l").javaPattern("h").build(),
new Builder().pattern("%M").javaPattern("MMMM").build(),
new Builder().pattern("%m").javaPattern("MM").build(),
new Builder().pattern("%p").javaPattern("a").build(),
new Builder().pattern("%r").javaPattern("hh:mm:ss a").build(),
new Builder().pattern("%S").javaPattern("ss").build(),
new Builder().pattern("%s").javaPattern("ss").build(),
new Builder().pattern("%T").javaPattern("HH:mm:ss").build(),
new Builder().pattern("%U")
.javaFormat(t -> String.format(Locale.ENGLISH, "%02d", t.get(WeekFields.of(DayOfWeek.SUNDAY, 7).weekOfYear())))
.build(),
new Builder().pattern("%u")
.javaFormat(t -> String.format(Locale.ENGLISH, "%02d", t.get(WeekFields.of(DayOfWeek.MONDAY, 4).weekOfYear())))
.build(),
new Builder().pattern("%V")
.javaFormat(t -> String.format(Locale.ENGLISH, "%02d", t.get(WeekFields.of(DayOfWeek.SUNDAY, 7).weekOfWeekBasedYear())))
.build(),
new Builder().pattern("%v")
.javaFormat(t -> String.format(Locale.ENGLISH, "%02d", t.get(WeekFields.of(DayOfWeek.MONDAY, 4).weekOfWeekBasedYear())))
.build(),
new Builder().pattern("%W").javaPattern("EEEE").build(),
new Builder().pattern("%w")
.javaFormat(t -> String.format(Locale.ENGLISH, "%01d", t.get(WeekFields.of(DayOfWeek.SUNDAY, 7).dayOfWeek()) - 1))
.build(),
new Builder().pattern("%X")
.javaFormat(t -> String.format(Locale.ENGLISH, "%04d", t.get(WeekFields.of(DayOfWeek.SUNDAY, 7).weekBasedYear())))
.build(),
new Builder().pattern("%x")
.javaFormat(t -> String.format(Locale.ENGLISH, "%04d", t.get(WeekFields.of(DayOfWeek.MONDAY, 7).weekBasedYear())))
.build(),
new Builder().pattern("%Y").javaPattern("yyyy").build(),
new Builder().pattern("%y").javaPattern("yy").build()
);
Map<String, DateFormatter> formatterMap = new LinkedHashMap<>();
for (DateFormatter dateFormatter : formatters) {
formatterMap.put(dateFormatter.pattern, dateFormatter);
}
FORMATTER_MAP = formatterMap;
}
private static String ordinalSuffixForDayOfTheMonth(int i) {
if (i == 1 || i == 21 || i == 31) {
return "st";
} else if (i == 2 || i == 22) {
return "nd";
} else if (i == 3 || i == 23) {
return "rd";
} else {
return "th";
}
}
private String pattern;
private Function<TemporalAccessor, String> javaFormat;
private Function<String, String> additionalMapper;
static Function<TemporalAccessor, String> ofPattern(String dateFormatPattern) {
if (Strings.isEmpty(dateFormatPattern)) {
return timestamp -> "";
}
List<DateFormatter> dateFormatters = parsePattern(dateFormatPattern);
return timestamp -> dateFormatters.stream().map(p -> p.format(timestamp)).collect(Collectors.joining());
}
private String format(TemporalAccessor timestamp) {
String formatted = this.javaFormat.apply(timestamp);
return additionalMapper == null ? formatted : this.additionalMapper.apply(formatted);
}
private static List<DateFormatter> parsePattern(String dateFormatPattern) {
LinkedList<DateFormatter> formatters = new LinkedList<>();
DateFormatter dateFormatter;
while (dateFormatPattern.length() > 1) {
String potentialPattern = dateFormatPattern.substring(0, 2);
dateFormatter = FORMATTER_MAP.get(potentialPattern);
if (dateFormatter != null) {
dateFormatPattern = dateFormatPattern.substring(2);
} else if (potentialPattern.startsWith("%")) {
dateFormatter = literal(dateFormatPattern.substring(1, 2));
dateFormatPattern = dateFormatPattern.substring(2);
} else if (potentialPattern.endsWith("%")) {
dateFormatter = literal(dateFormatPattern.substring(0, 1));
dateFormatPattern = dateFormatPattern.substring(1);
} else {
dateFormatter = literal(dateFormatPattern.substring(0, 2));
dateFormatPattern = dateFormatPattern.substring(2);
}
formatters.addLast(dateFormatter);
}
if (dateFormatPattern.length() == 1) {
dateFormatter = literal(dateFormatPattern.substring(0, 1));
formatters.addLast(dateFormatter);
}
return formatters;
}
private static DateFormatter literal(String literal) {
DateFormatter dateFormatter = new DateFormatter();
dateFormatter.javaFormat = timestamp -> literal;
return dateFormatter;
}
private static | DateFormatter |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/introspect/POJOPropertiesCollectorTest.java | {
"start": 5804,
"end": 5993
} | interface ____ {}
@Target({ElementType.ANNOTATION_TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER})
@Retention(RetentionPolicy.RUNTIME)
@JacksonAnnotation
@ | A |
java | grpc__grpc-java | api/src/main/java/io/grpc/NameResolver.java | {
"start": 29422,
"end": 31770
} | class ____ {
private StatusOr<List<EquivalentAddressGroup>> addresses =
StatusOr.fromValue(Collections.emptyList());
private Attributes attributes = Attributes.EMPTY;
@Nullable
private ConfigOrError serviceConfig;
// Make sure to update #toBuilder above!
Builder() {}
/**
* Sets the addresses resolved by name resolution. This field is required.
*
* @since 1.21.0
* @deprecated Will be superseded by setAddressesOrError
*/
@Deprecated
public Builder setAddresses(List<EquivalentAddressGroup> addresses) {
setAddressesOrError(StatusOr.fromValue(addresses));
return this;
}
/**
* Sets the addresses resolved by name resolution or the error in doing so. This field is
* required.
* @param addresses Resolved addresses or an error in resolving addresses
*/
public Builder setAddressesOrError(StatusOr<List<EquivalentAddressGroup>> addresses) {
this.addresses = checkNotNull(addresses, "StatusOr addresses cannot be null.");
return this;
}
/**
* Sets the attributes for the addresses resolved by name resolution. If unset,
* {@link Attributes#EMPTY} will be used as a default.
*
* @since 1.21.0
*/
public Builder setAttributes(Attributes attributes) {
this.attributes = attributes;
return this;
}
/**
* Sets the Service Config parsed by {@link Args#getServiceConfigParser}.
* This field is optional.
*
* @since 1.21.0
*/
public Builder setServiceConfig(@Nullable ConfigOrError serviceConfig) {
this.serviceConfig = serviceConfig;
return this;
}
/**
* Constructs a new {@link ResolutionResult} from this builder.
*
* @since 1.21.0
*/
public ResolutionResult build() {
return new ResolutionResult(addresses, attributes, serviceConfig);
}
}
}
/**
* Represents either a successfully parsed service config, containing all necessary parts to be
* later applied by the channel, or a Status containing the error encountered while parsing.
*
* @since 1.20.0
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/1770")
public static final | Builder |
java | quarkusio__quarkus | extensions/smallrye-graphql/deployment/src/test/java/io/quarkus/smallrye/graphql/deployment/RequestLeakDetectionTest.java | {
"start": 2998,
"end": 4724
} | class ____ {
@Inject
MyRequestScopeBean bean;
@Inject
Barrier barrier;
@Query
public Uni<Foo> foo(int val) {
Assertions.assertTrue(VertxContext.isOnDuplicatedContext());
Vertx.currentContext().putLocal("count", val);
bean.setValue(val);
return Uni.createFrom().<Integer> emitter(e -> {
barrier.enqueue(Vertx.currentContext(), () -> {
Assertions.assertTrue(VertxContext.isOnDuplicatedContext());
int r = Vertx.currentContext().getLocal("count");
Assertions.assertEquals(r, val);
e.complete(bean.getValue());
});
}).map(i -> new Foo(Integer.toString(i)));
}
public Foo nested(@Source Foo foo) {
Assertions.assertTrue(VertxContext.isOnDuplicatedContext());
int r = Vertx.currentContext().getLocal("count");
String rAsString = Integer.toString(r);
Assertions.assertEquals(rAsString, foo.value);
Assertions.assertEquals(bean.getValue(), r);
return new Foo("source field on foo " + foo.value);
}
public Uni<Foo> nestedUni(@Source Foo foo) {
Assertions.assertTrue(VertxContext.isOnDuplicatedContext());
int r = Vertx.currentContext().getLocal("count");
String rAsString = Integer.toString(r);
Assertions.assertEquals(rAsString, foo.value);
Assertions.assertEquals(bean.getValue(), r);
return Uni.createFrom().item(new Foo("uni source field on foo " + foo.value));
}
}
@ApplicationScoped
public static | MyGraphQLApi |
java | spring-projects__spring-boot | documentation/spring-boot-docs/src/main/java/org/springframework/boot/docs/web/servlet/jersey/MyEndpoint.java | {
"start": 823,
"end": 900
} | class ____ {
@GET
public String message() {
return "Hello";
}
}
| MyEndpoint |
java | google__guice | extensions/servlet/test/com/google/inject/servlet/ContextPathTest.java | {
"start": 9184,
"end": 9527
} | class ____ extends HttpServlet {
private boolean triggered = false;
@Override
public void doGet(HttpServletRequest req, HttpServletResponse resp) {
triggered = true;
}
public boolean isTriggered() {
return triggered;
}
public void clear() {
triggered = false;
}
}
public static | TestServlet |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/jta/JtaExceptionListener.java | {
"start": 1207,
"end": 2456
} | class ____ {
@Test
public void testTransactionRollback(EntityManagerFactoryScope scope) {
final var emf = scope.getEntityManagerFactory();
assertThrows(
RollbackException.class, () -> {
TestingJtaPlatformImpl.INSTANCE.getTransactionManager().begin();
var entityManager = emf.createEntityManager();
try {
// Trying to persist an entity - however the listener should throw an exception, so the entity
// shouldn't be persisted
StrTestEntity te = new StrTestEntity( "x" );
entityManager.persist( te );
}
finally {
entityManager.close();
TestingJtaPlatformImpl.tryCommit();
}
}
);
}
@Test
public void testDataNotPersisted(EntityManagerFactoryScope scope) throws Exception {
final var emf = scope.getEntityManagerFactory();
TestingJtaPlatformImpl.INSTANCE.getTransactionManager().begin();
// Checking if the entity became persisted
var entityManager = emf.createEntityManager();
try {
long count = entityManager.createQuery( "from StrTestEntity s where s.str = 'x'", StrTestEntity.class )
.getResultList().size();
assertEquals( 0, count );
}
finally {
entityManager.close();
TestingJtaPlatformImpl.tryCommit();
}
}
}
| JtaExceptionListener |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/derivedidentities/e4/c/DerivedIdentitySimpleParentSimpleDepSecondPassOrderingTest.java | {
"start": 5927,
"end": 6594
} | class ____ implements Serializable {
@Id
private Integer id;
@ManyToOne
private EntityWithOneToOneDerivedId ref;
public EntityReferencingEntityWithOneToOneDerivedId() {
}
public EntityReferencingEntityWithOneToOneDerivedId(Integer id, EntityWithOneToOneDerivedId ref) {
this.id = id;
this.ref = ref;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public EntityWithOneToOneDerivedId getRef() {
return ref;
}
public void setRef(EntityWithOneToOneDerivedId ref) {
this.ref = ref;
}
}
@Entity(name = "mto_derived")
public static | EntityReferencingEntityWithOneToOneDerivedId |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_2221/RestSiteDto.java | {
"start": 232,
"end": 701
} | class ____ {
private final String tenantId;
private final String siteId;
private final String cti;
public RestSiteDto(String tenantId, String siteId, String cti) {
this.tenantId = tenantId;
this.siteId = siteId;
this.cti = cti;
}
public String getTenantId() {
return tenantId;
}
public String getSiteId() {
return siteId;
}
public String getCti() {
return cti;
}
}
| RestSiteDto |
java | spring-projects__spring-framework | spring-tx/src/main/java/org/springframework/transaction/UnexpectedRollbackException.java | {
"start": 847,
"end": 1325
} | class ____ extends TransactionException {
/**
* Constructor for UnexpectedRollbackException.
* @param msg the detail message
*/
public UnexpectedRollbackException(String msg) {
super(msg);
}
/**
* Constructor for UnexpectedRollbackException.
* @param msg the detail message
* @param cause the root cause from the transaction API in use
*/
public UnexpectedRollbackException(String msg, Throwable cause) {
super(msg, cause);
}
}
| UnexpectedRollbackException |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/params/provider/FieldArgumentsProviderTests.java | {
"start": 15526,
"end": 17193
} | class ____ a "void test()" method.
Method testMethod = ReflectionSupport.findMethod(testClass, "test").get();
return provideArguments(testClass, testMethod, allowNonStaticMethod, fieldNames);
}
private static Stream<Object[]> provideArguments(Class<?> testClass, Method testMethod,
boolean allowNonStaticMethod, String... fieldNames) {
var extensionRegistry = createRegistryWithDefaultExtensions(mock());
var fieldSource = mock(FieldSource.class);
when(fieldSource.value()).thenReturn(fieldNames);
var parameters = mock(ParameterDeclarations.class);
var extensionContext = mock(ExtensionContext.class);
when(extensionContext.getTestClass()).thenReturn(Optional.of(testClass));
when(extensionContext.getTestMethod()).thenReturn(Optional.of(testMethod));
when(extensionContext.getExecutableInvoker()).thenReturn(
new DefaultExecutableInvoker(extensionContext, extensionRegistry));
doCallRealMethod().when(extensionContext).getRequiredTestMethod();
doCallRealMethod().when(extensionContext).getRequiredTestClass();
var testInstance = allowNonStaticMethod ? ReflectionSupport.newInstance(testClass) : null;
when(extensionContext.getTestInstance()).thenReturn(Optional.ofNullable(testInstance));
var lifeCycle = allowNonStaticMethod ? Lifecycle.PER_CLASS : Lifecycle.PER_METHOD;
when(extensionContext.getTestInstanceLifecycle()).thenReturn(Optional.of(lifeCycle));
var provider = new FieldArgumentsProvider();
provider.accept(fieldSource);
return provider.provideArguments(parameters, extensionContext).map(Arguments::get);
}
// -------------------------------------------------------------------------
static | defines |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/JSONScannerTest_new.java | {
"start": 196,
"end": 3546
} | class ____ extends TestCase {
public void test_scan_new_0() throws Exception {
JSONScanner lexer = new JSONScanner("new");
lexer.scanNullOrNew();
}
public void test_scan_new_1() throws Exception {
JSONException error = null;
try {
JSONScanner lexer = new JSONScanner("nww");
lexer.scanNullOrNew();
} catch (JSONException e) {
error = e;
}
Assert.assertNotNull(error);
}
public void test_scan_new_2() throws Exception {
JSONException error = null;
try {
JSONScanner lexer = new JSONScanner("nee");
lexer.scanNullOrNew();
} catch (JSONException e) {
error = e;
}
Assert.assertNotNull(error);
}
public void test_scan_new_3() throws Exception {
JSONException error = null;
try {
JSONScanner lexer = new JSONScanner("neel");
lexer.scanNullOrNew();
} catch (JSONException e) {
error = e;
}
Assert.assertNotNull(error);
}
public void test_scan_new_4() throws Exception {
JSONException error = null;
try {
JSONScanner lexer = new JSONScanner("neww");
lexer.scanNullOrNew();
} catch (JSONException e) {
error = e;
}
Assert.assertNotNull(error);
}
public void test_scan_new_5() throws Exception {
JSONException error = null;
try {
JSONScanner lexer = new JSONScanner("newe");
lexer.scanNullOrNew();
} catch (JSONException e) {
error = e;
}
Assert.assertNotNull(error);
}
public void test_scan_new_6() throws Exception {
JSONException error = null;
try {
JSONScanner lexer = new JSONScanner("new\"");
lexer.scanNullOrNew();
} catch (JSONException e) {
error = e;
}
Assert.assertNotNull(error);
}
public void test_scan_new_7() throws Exception {
JSONScanner lexer = new JSONScanner("new a");
lexer.scanNullOrNew();
}
public void test_scan_new_8() throws Exception {
JSONScanner lexer = new JSONScanner("new,");
lexer.scanNullOrNew();
}
public void test_scan_new_9() throws Exception {
JSONScanner lexer = new JSONScanner("new\na");
lexer.scanNullOrNew();
}
public void test_scan_new_10() throws Exception {
JSONScanner lexer = new JSONScanner("new\ra");
lexer.scanNullOrNew();
}
public void test_scan_new_11() throws Exception {
JSONScanner lexer = new JSONScanner("new\ta");
lexer.scanNullOrNew();
}
public void test_scan_new_12() throws Exception {
JSONScanner lexer = new JSONScanner("new\fa");
lexer.scanNullOrNew();
}
public void test_scan_new_13() throws Exception {
JSONScanner lexer = new JSONScanner("new\ba");
lexer.scanNullOrNew();
}
public void test_scan_new_14() throws Exception {
JSONScanner lexer = new JSONScanner("new}");
lexer.scanNullOrNew();
}
public void test_scan_new_15() throws Exception {
JSONScanner lexer = new JSONScanner("new]");
lexer.scanNullOrNew();
}
}
| JSONScannerTest_new |
java | apache__avro | lang/java/ipc/src/test/java/org/apache/avro/TestProtocolReflect.java | {
"start": 2066,
"end": 5251
} | class ____ implements Simple {
public String hello(String greeting) {
return "goodbye";
}
public int add(int arg1, int arg2) {
return arg1 + arg2;
}
public TestRecord echo(TestRecord record) {
return record;
}
public byte[] echoBytes(byte[] data) {
return data;
}
public void error() throws SimpleException {
if (throwUndeclaredError)
throw new RuntimeException("foo");
throw new SimpleException("foo");
}
}
protected static Server server;
protected static Transceiver client;
protected static Simple proxy;
@BeforeEach
public void testStartServer() throws Exception {
if (server != null)
return;
server = new SocketServer(new ReflectResponder(Simple.class, new TestImpl()), new InetSocketAddress(0));
server.start();
client = new SocketTransceiver(new InetSocketAddress(server.getPort()));
proxy = ReflectRequestor.getClient(Simple.class, client);
}
@Test
void classLoader() throws Exception {
ClassLoader loader = new ClassLoader() {
};
ReflectResponder responder = new ReflectResponder(Simple.class, new TestImpl(), new ReflectData(loader));
assertEquals(responder.getReflectData().getClassLoader(), loader);
ReflectRequestor requestor = new ReflectRequestor(Simple.class, client, new ReflectData(loader));
assertEquals(requestor.getReflectData().getClassLoader(), loader);
}
@Test
void hello() throws IOException {
String response = proxy.hello("bob");
assertEquals("goodbye", response);
}
@Test
void echo() throws IOException {
TestRecord record = new TestRecord();
record.name = "foo";
TestRecord echoed = proxy.echo(record);
assertEquals(record, echoed);
}
@Test
void add() throws IOException {
int result = proxy.add(1, 2);
assertEquals(3, result);
}
@Test
void echoBytes() throws IOException {
Random random = new Random();
int length = random.nextInt(1024 * 16);
byte[] data = new byte[length];
random.nextBytes(data);
byte[] echoed = proxy.echoBytes(data);
assertArrayEquals(data, echoed);
}
@Test
// The JRE range is because reflection based protocols try to (among others)
// make the field Throwable.cause accessible, and are not allowed to.
@EnabledForJreRange(min = JRE.JAVA_8, max = JRE.JAVA_11, disabledReason = "Java 11 announced: All illegal access operations will be denied in a future release")
void error() throws IOException {
SimpleException error = null;
try {
proxy.error();
} catch (SimpleException e) {
error = e;
}
assertNotNull(error);
assertEquals("foo", error.getMessage());
}
@Test
void undeclaredError() throws Exception {
this.throwUndeclaredError = true;
RuntimeException error = null;
try {
proxy.error();
} catch (AvroRuntimeException e) {
error = e;
} finally {
this.throwUndeclaredError = false;
}
assertNotNull(error);
assertTrue(error.toString().contains("foo"));
}
@AfterAll
public static void testStopServer() throws IOException {
client.close();
server.close();
}
}
| TestImpl |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/annotations/OnDelete.java | {
"start": 3435,
"end": 3604
} | interface ____ {
/**
* The action to taken by the database when deletion of a row
* would cause the constraint to be violated.
*/
OnDeleteAction action();
}
| OnDelete |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/inlineme/InlinerTest.java | {
"start": 45029,
"end": 45380
} | class ____ {
@InlineMe(replacement = "x * 2")
public static int timesTwo(int x) {
return x * 2;
}
}
""")
.expectUnchanged()
.addInputLines(
"Caller.java",
"""
import com.google.foo.Client;
public final | Client |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/bean/override/convention/TestBeanTests.java | {
"start": 7130,
"end": 7268
} | class ____ {
static String example() {
throw new IllegalStateException("Should not be called");
}
}
static | AbstractCompetingMethods |
java | elastic__elasticsearch | x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/action/ProfilingLicenseChecker.java | {
"start": 547,
"end": 1386
} | class ____ {
private static final LicensedFeature.Momentary UNIVERSAL_PROFILING_FEATURE = LicensedFeature.momentary(
null,
"universal_profiling",
License.OperationMode.ENTERPRISE
);
private final Supplier<XPackLicenseState> licenseStateResolver;
public ProfilingLicenseChecker(Supplier<XPackLicenseState> licenseStateResolver) {
this.licenseStateResolver = licenseStateResolver;
}
public boolean isSupportedLicense() {
return UNIVERSAL_PROFILING_FEATURE.checkWithoutTracking(licenseStateResolver.get());
}
public void requireSupportedLicense() {
if (UNIVERSAL_PROFILING_FEATURE.check(licenseStateResolver.get()) == false) {
throw LicenseUtils.newComplianceException(UNIVERSAL_PROFILING_FEATURE.getName());
}
}
}
| ProfilingLicenseChecker |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-guava-tests/src/test/java/org/assertj/tests/guava/api/MultisetAssert_containsAtMost_Test.java | {
"start": 1073,
"end": 3110
} | class ____ {
@Test
public void should_fail_if_actual_is_null() {
// GIVEN
Multiset<String> actual = null;
// WHEN
Throwable throwable = catchThrowable(() -> assertThat(actual).containsAtMost(1, "test"));
// THEN
assertThat(throwable).isInstanceOf(AssertionError.class)
.hasMessage(actualIsNull());
}
@Test
public void should_fail_if_expected_is_negative() {
// GIVEN
Multiset<String> actual = HashMultiset.create();
// WHEN
Throwable throwable = catchThrowable(() -> assertThat(actual).containsAtMost(-1, "test"));
// THEN
assertThat(throwable).isInstanceOf(IllegalArgumentException.class)
.hasMessage("The maximum count should not be negative.");
}
@Test
public void should_pass_if_actual_contains_value_fewer_times_than_expected() {
// GIVEN
Multiset<String> actual = HashMultiset.create();
actual.add("test", 2);
// when
assertThat(actual).containsAtMost(3, "test");
// THEN pass
}
@Test
public void should_pass_if_actual_contains_value_number_of_times_expected() {
// GIVEN
Multiset<String> actual = HashMultiset.create();
actual.add("test", 2);
// when
assertThat(actual).containsAtMost(2, "test");
// THEN pass
}
@Test
public void should_fail_if_actual_contains_value_more_times_than_expected() {
// GIVEN
Multiset<String> actual = HashMultiset.create();
actual.add("test", 2);
// WHEN
Throwable throwable = catchThrowable(() -> assertThat(actual).containsAtMost(1, "test"));
// THEN
assertThat(throwable).isInstanceOf(AssertionError.class)
.hasMessage(format("%nExpecting:%n" +
" [\"test\", \"test\"]%n" +
"to contain:%n" +
" \"test\"%n" +
"at most 1 times but was found 2 times."));
}
}
| MultisetAssert_containsAtMost_Test |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java | {
"start": 29001,
"end": 133177
} | class ____ extends SubjectInheritingThread {
private final CapacityScheduler cs;
private BlockingQueue<ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode>>
backlogs = new LinkedBlockingQueue<>();
public ResourceCommitterService(CapacityScheduler cs) {
this.cs = cs;
setDaemon(true);
}
@Override
public void work() {
while (!Thread.currentThread().isInterrupted()) {
try {
ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode> request =
backlogs.take();
cs.writeLock.lock();
try {
cs.tryCommit(cs.getClusterResource(), request, true);
} finally {
cs.writeLock.unlock();
}
} catch (InterruptedException e) {
LOG.error(e.toString());
Thread.currentThread().interrupt();
}
}
LOG.info("ResourceCommitterService exited!");
}
public void addNewCommitRequest(
ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode> proposal) {
backlogs.add(proposal);
}
public int getPendingBacklogs() {
return backlogs.size();
}
}
@VisibleForTesting
public PlacementRule getCSMappingPlacementRule() throws IOException {
readLock.lock();
try {
CSMappingPlacementRule mappingRule = new CSMappingPlacementRule();
mappingRule.initialize(this);
return mappingRule;
} finally {
readLock.unlock();
}
}
@VisibleForTesting
public void updatePlacementRules() throws IOException {
// Initialize placement rules
Collection<String> placementRuleStrs = conf.getStringCollection(
YarnConfiguration.QUEUE_PLACEMENT_RULES);
List<PlacementRule> placementRules = new ArrayList<>();
Set<String> distinguishRuleSet = CapacitySchedulerConfigValidator
.validatePlacementRules(placementRuleStrs);
// add UserGroupMappingPlacementRule if empty,default value of
// yarn.scheduler.queue-placement-rules is user-group
if (distinguishRuleSet.isEmpty()) {
distinguishRuleSet.add(YarnConfiguration.USER_GROUP_PLACEMENT_RULE);
}
placementRuleStrs = new ArrayList<>(distinguishRuleSet);
boolean csMappingAdded = false;
for (String placementRuleStr : placementRuleStrs) {
switch (placementRuleStr) {
case YarnConfiguration.USER_GROUP_PLACEMENT_RULE:
case YarnConfiguration.APP_NAME_PLACEMENT_RULE:
if (!csMappingAdded) {
PlacementRule csMappingRule = getCSMappingPlacementRule();
if (null != csMappingRule) {
placementRules.add(csMappingRule);
csMappingAdded = true;
}
}
break;
default:
boolean isMappingNotEmpty;
try {
PlacementRule rule = PlacementFactory.getPlacementRule(
placementRuleStr, conf);
if (null != rule) {
try {
isMappingNotEmpty = rule.initialize(this);
} catch (IOException ie) {
throw new IOException(ie);
}
if (isMappingNotEmpty) {
placementRules.add(rule);
}
}
} catch (ClassNotFoundException cnfe) {
throw new IOException(cnfe);
}
}
}
rmContext.getQueuePlacementManager().updateRules(placementRules);
}
@Lock(CapacityScheduler.class)
private void initializeQueues(CapacitySchedulerConfiguration conf)
throws YarnException {
try {
this.queueManager.initializeQueues(conf);
updatePlacementRules();
this.workflowPriorityMappingsMgr.initialize(this);
// Notify Preemption Manager
preemptionManager.refreshQueues(null, this.getRootQueue());
} catch (Exception e) {
throw new YarnException("Failed to initialize queues", e);
}
}
@Lock(CapacityScheduler.class)
private void reinitializeQueues(CapacitySchedulerConfiguration newConf)
throws IOException {
queueContext.reinitialize();
this.queueManager.reinitializeQueues(newConf);
updatePlacementRules();
this.workflowPriorityMappingsMgr.initialize(this);
// Notify Preemption Manager
preemptionManager.refreshQueues(null, this.getRootQueue());
}
@Override
public CSQueue getQueue(String queueName) {
if (queueName == null) {
return null;
}
return this.queueManager.getQueue(queueName);
}
/**
* Returns the normalized queue name, which should be used for internal
* queue references. Currently this is the fullQueuename which disambiguously
* identifies a queue.
* @param name Name of the queue to be normalized
* @return The normalized (full name) of the queue
*/
public String normalizeQueueName(String name) {
if (this.queueManager == null) {
return name;
}
return this.queueManager.normalizeQueueName(name);
}
/**
* Determines if a short queue name reference is ambiguous, if there are at
* least two queues with the same name, it is considered ambiguous. Otherwise
* it is not.
* @param queueName The name of the queue to check for ambiguity
* @return true if there are at least 2 queues with the same name
*/
public boolean isAmbiguous(String queueName) {
return this.queueManager.isAmbiguous(queueName);
}
private void addApplicationOnRecovery(ApplicationId applicationId,
String queueName, String user,
Priority priority, ApplicationPlacementContext placementContext,
boolean unmanagedAM) {
writeLock.lock();
try {
//check if the queue needs to be auto-created during recovery
CSQueue queue = getOrCreateQueueFromPlacementContext(applicationId, user,
queueName, placementContext, true);
if (queue == null) {
//During a restart, this indicates a queue was removed, which is
//not presently supported
if (!appShouldFailFast) {
this.rmContext.getDispatcher().getEventHandler().handle(
new RMAppEvent(applicationId, RMAppEventType.KILL,
"Application killed on recovery as it"
+ " was submitted to queue " + queueName
+ " which no longer exists after restart."));
return;
} else{
String queueErrorMsg = "Queue named " + queueName + " missing "
+ "during application recovery."
+ " Queue removal during recovery is not presently "
+ "supported by the capacity scheduler, please "
+ "restart with all queues configured"
+ " which were present before shutdown/restart.";
LOG.error(FATAL, queueErrorMsg);
throw new QueueInvalidException(queueErrorMsg);
}
}
if (!(queue instanceof AbstractLeafQueue)) {
// During RM restart, this means leaf queue was converted to a parent
// queue, which is not supported for running apps.
if (!appShouldFailFast) {
this.rmContext.getDispatcher().getEventHandler().handle(
new RMAppEvent(applicationId, RMAppEventType.KILL,
"Application killed on recovery as it was "
+ "submitted to queue " + queueName
+ " which is no longer a leaf queue after restart."));
return;
} else{
String queueErrorMsg = "Queue named " + queueName
+ " is no longer a leaf queue during application recovery."
+ " Changing a leaf queue to a parent queue during recovery is"
+ " not presently supported by the capacity scheduler. Please"
+ " restart with leaf queues before shutdown/restart continuing"
+ " as leaf queues.";
LOG.error(FATAL, queueErrorMsg);
throw new QueueInvalidException(queueErrorMsg);
}
}
// When recovering apps in this queue but queue is in STOPPED state,
// that means its previous state was DRAINING. So we auto transit
// the state to DRAINING for recovery.
if (queue.getState() == QueueState.STOPPED) {
((AbstractLeafQueue) queue).recoverDrainingState();
}
// Submit to the queue
try {
queue.submitApplication(applicationId, user, queueName);
} catch (AccessControlException ace) {
// Ignore the exception for recovered app as the app was previously
// accepted.
LOG.warn("AccessControlException received when trying to recover "
+ applicationId + " in queue " + queueName + " for user " + user
+ ". Since the app was in the queue prior to recovery, the Capacity"
+ " Scheduler will recover the app anyway.", ace);
}
queue.getMetrics().submitApp(user, unmanagedAM);
SchedulerApplication<FiCaSchedulerApp> application =
new SchedulerApplication<FiCaSchedulerApp>(queue, user, priority,
unmanagedAM);
applications.put(applicationId, application);
LOG.info("Accepted application " + applicationId + " from user: " + user
+ ", in queue: " + queueName);
LOG.debug(
applicationId + " is recovering. Skip notifying APP_ACCEPTED");
} finally {
writeLock.unlock();
}
}
private CSQueue getOrCreateQueueFromPlacementContext(ApplicationId
applicationId, String user, String queueName,
ApplicationPlacementContext placementContext,
boolean isRecovery) {
CSQueue queue = getQueue(queueName);
QueuePath queuePath = new QueuePath(queueName);
if (queue != null) {
return queue;
}
if (isAmbiguous(queueName)) {
return null;
}
if (placementContext != null) {
queuePath = new QueuePath(placementContext.getFullQueuePath());
}
//we need to make sure there are no empty path parts present
if (queuePath.hasEmptyPart()) {
LOG.error("Application submitted to invalid path due to empty parts: " +
"'{}'", queuePath);
return null;
}
if (!queuePath.hasParent()) {
LOG.error("Application submitted to a queue without parent" +
" '{}'", queuePath);
return null;
}
try {
writeLock.lock();
return queueManager.createQueue(queuePath);
} catch (YarnException | IOException e) {
// A null queue is expected if the placementContext is null. In order
// not to disrupt the control flow, if we fail to auto create a queue,
// we fall back to the original logic.
if (placementContext == null) {
LOG.error("Could not auto-create leaf queue " + queueName +
" due to : ", e);
return null;
}
handleQueueCreationError(applicationId, user, queueName, isRecovery, e);
} finally {
writeLock.unlock();
}
return null;
}
private void handleQueueCreationError(
ApplicationId applicationId, String user, String queueName,
boolean isRecovery, Exception e) {
if (isRecovery) {
if (!appShouldFailFast) {
LOG.error("Could not auto-create leaf queue " + queueName +
" due to : ", e);
this.rmContext.getDispatcher().getEventHandler().handle(
new RMAppEvent(applicationId, RMAppEventType.KILL,
"Application killed on recovery"
+ " as it was submitted to queue " + queueName
+ " which did not exist and could not be auto-created"));
} else {
String queueErrorMsg =
"Queue named " + queueName + " could not be "
+ "auto-created during application recovery.";
LOG.error(FATAL, queueErrorMsg, e);
throw new QueueInvalidException(queueErrorMsg);
}
} else {
LOG.error("Could not auto-create leaf queue due to : ", e);
final String message =
"Application " + applicationId + " submission by user : "
+ user
+ " to queue : " + queueName + " failed : " + e
.getMessage();
this.rmContext.getDispatcher().getEventHandler().handle(
new RMAppEvent(applicationId, RMAppEventType.APP_REJECTED,
message));
}
}
private void addApplication(ApplicationId applicationId, String queueName,
String user, Priority priority,
ApplicationPlacementContext placementContext, boolean unmanagedAM) {
writeLock.lock();
try {
if (isSystemAppsLimitReached()) {
String message = "Maximum system application limit reached,"
+ "cannot accept submission of application: " + applicationId;
this.rmContext.getDispatcher().getEventHandler().handle(
new RMAppEvent(applicationId, RMAppEventType.APP_REJECTED,
message));
return;
}
//Could be a potential auto-created leaf queue
CSQueue queue = getOrCreateQueueFromPlacementContext(
applicationId, user, queueName, placementContext, false);
if (queue == null) {
String message;
if (isAmbiguous(queueName)) {
message = "Application " + applicationId
+ " submitted by user " + user
+ " to ambiguous queue: " + queueName
+ " please use full queue path instead.";
} else {
message =
"Application " + applicationId + " submitted by user " + user
+ " to unknown queue: " + queueName;
}
this.rmContext.getDispatcher().getEventHandler().handle(
new RMAppEvent(applicationId, RMAppEventType.APP_REJECTED,
message));
return;
}
if (!(queue instanceof AbstractLeafQueue)) {
String message =
"Application " + applicationId + " submitted by user : " + user
+ " to non-leaf queue : " + queueName;
this.rmContext.getDispatcher().getEventHandler().handle(
new RMAppEvent(applicationId, RMAppEventType.APP_REJECTED,
message));
return;
} else if (queue instanceof AutoCreatedLeafQueue && queue
.getParent() instanceof ManagedParentQueue) {
//If queue already exists and auto-queue creation was not required,
//placement context should not be null
if (placementContext == null) {
String message =
"Application " + applicationId + " submission by user : " + user
+ " to specified queue : " + queueName + " is prohibited. "
+ "Verify automatic queue mapping for user exists in " +
QUEUE_MAPPING;
this.rmContext.getDispatcher().getEventHandler().handle(
new RMAppEvent(applicationId, RMAppEventType.APP_REJECTED,
message));
return;
// For a queue which exists already and
// not auto-created above, then its parent queue should match
// the parent queue specified in queue mapping
} else if (!queue.getParent().getQueueShortName().equals(
placementContext.getParentQueue())
&& !queue.getParent().getQueuePath().equals(
placementContext.getParentQueue())) {
String message =
"Auto created Leaf queue " + placementContext.getQueue() + " "
+ "already exists under queue : " + queue
.getParent().getQueueShortName()
+ ". But Queue mapping configuration " +
CapacitySchedulerConfiguration.QUEUE_MAPPING + " has been "
+ "updated to a different parent queue : "
+ placementContext.getParentQueue()
+ " for the specified user : " + user;
this.rmContext.getDispatcher().getEventHandler().handle(
new RMAppEvent(applicationId, RMAppEventType.APP_REJECTED,
message));
return;
}
}
try {
priority = workflowPriorityMappingsMgr.mapWorkflowPriorityForApp(
applicationId, queue, user, priority);
} catch (YarnException e) {
String message = "Failed to submit application " + applicationId +
" submitted by user " + user + " reason: " + e.getMessage();
this.rmContext.getDispatcher().getEventHandler().handle(new RMAppEvent(
applicationId, RMAppEventType.APP_REJECTED, message));
return;
}
// Submit to the queue
try {
queue.submitApplication(applicationId, user, queueName);
} catch (AccessControlException ace) {
LOG.info("Failed to submit application " + applicationId + " to queue "
+ queueName + " from user " + user, ace);
this.rmContext.getDispatcher().getEventHandler().handle(
new RMAppEvent(applicationId, RMAppEventType.APP_REJECTED,
ace.toString()));
return;
}
// update the metrics
queue.getMetrics().submitApp(user, unmanagedAM);
SchedulerApplication<FiCaSchedulerApp> application =
new SchedulerApplication<FiCaSchedulerApp>(queue, user, priority,
unmanagedAM);
applications.put(applicationId, application);
LOG.info("Accepted application " + applicationId + " from user: " + user
+ ", in queue: " + queueName);
rmContext.getDispatcher().getEventHandler().handle(
new RMAppEvent(applicationId, RMAppEventType.APP_ACCEPTED));
} finally {
writeLock.unlock();
}
}
private void addApplicationAttempt(
ApplicationAttemptId applicationAttemptId,
boolean transferStateFromPreviousAttempt,
boolean isAttemptRecovering) {
writeLock.lock();
try {
SchedulerApplication<FiCaSchedulerApp> application = applications.get(
applicationAttemptId.getApplicationId());
if (application == null) {
LOG.warn("Application " + applicationAttemptId.getApplicationId()
+ " cannot be found in scheduler.");
return;
}
CSQueue queue = (CSQueue) application.getQueue();
FiCaSchedulerApp attempt = new FiCaSchedulerApp(applicationAttemptId,
application.getUser(), queue, queue.getAbstractUsersManager(),
rmContext, application.getPriority(), isAttemptRecovering,
activitiesManager);
if (transferStateFromPreviousAttempt) {
attempt.transferStateFromPreviousAttempt(
application.getCurrentAppAttempt());
}
application.setCurrentAppAttempt(attempt);
// Update attempt priority to the latest to avoid race condition i.e
// SchedulerApplicationAttempt is created with old priority but it is not
// set to SchedulerApplication#setCurrentAppAttempt.
// Scenario would occur is
// 1. SchdulerApplicationAttempt is created with old priority.
// 2. updateApplicationPriority() updates SchedulerApplication. Since
// currentAttempt is null, it just return.
// 3. ScheduelerApplcationAttempt is set in
// SchedulerApplication#setCurrentAppAttempt.
attempt.setPriority(application.getPriority());
maxRunningEnforcer.checkRunnabilityWithUpdate(attempt);
maxRunningEnforcer.trackApp(attempt);
queue.submitApplicationAttempt(attempt, application.getUser());
LOG.info("Added Application Attempt " + applicationAttemptId
+ " to scheduler from user " + application.getUser() + " in queue "
+ queue.getQueuePath());
if (isAttemptRecovering) {
LOG.debug("{} is recovering. Skipping notifying ATTEMPT_ADDED",
applicationAttemptId);
} else{
rmContext.getDispatcher().getEventHandler().handle(
new RMAppAttemptEvent(applicationAttemptId,
RMAppAttemptEventType.ATTEMPT_ADDED));
}
} finally {
writeLock.unlock();
}
}
private void doneApplication(ApplicationId applicationId,
RMAppState finalState) {
writeLock.lock();
try {
SchedulerApplication<FiCaSchedulerApp> application = applications.get(
applicationId);
if (application == null) {
// The AppRemovedSchedulerEvent maybe sent on recovery for completed
// apps, ignore it.
LOG.warn("Couldn't find application " + applicationId);
return;
}
CSQueue queue = (CSQueue) application.getQueue();
if (!(queue instanceof AbstractLeafQueue)) {
LOG.error("Cannot finish application " + "from non-leaf queue: " + queue
.getQueuePath());
} else{
queue.finishApplication(applicationId, application.getUser());
}
application.stop(finalState);
applications.remove(applicationId);
} finally {
writeLock.unlock();
}
}
private void doneApplicationAttempt(
ApplicationAttemptId applicationAttemptId,
RMAppAttemptState rmAppAttemptFinalState, boolean keepContainers) {
writeLock.lock();
try {
LOG.info("Application Attempt " + applicationAttemptId + " is done."
+ " finalState=" + rmAppAttemptFinalState);
FiCaSchedulerApp attempt = getApplicationAttempt(applicationAttemptId);
SchedulerApplication<FiCaSchedulerApp> application = applications.get(
applicationAttemptId.getApplicationId());
if (application == null || attempt == null) {
LOG.info(
"Unknown application " + applicationAttemptId + " has completed!");
return;
}
// Release all the allocated, acquired, running containers
for (RMContainer rmContainer : attempt.getLiveContainers()) {
if (keepContainers && rmContainer.getState().equals(
RMContainerState.RUNNING)) {
// do not kill the running container in the case of work-preserving AM
// restart.
LOG.info("Skip killing " + rmContainer.getContainerId());
continue;
}
super.completedContainer(rmContainer, SchedulerUtils
.createAbnormalContainerStatus(rmContainer.getContainerId(),
SchedulerUtils.COMPLETED_APPLICATION),
RMContainerEventType.KILL);
}
// Release all reserved containers
for (RMContainer rmContainer : attempt.getReservedContainers()) {
super.completedContainer(rmContainer, SchedulerUtils
.createAbnormalContainerStatus(rmContainer.getContainerId(),
"Application Complete"), RMContainerEventType.KILL);
}
// Clean up pending requests, metrics etc.
attempt.stop(rmAppAttemptFinalState);
// Inform the queue
Queue queue = attempt.getQueue();
CSQueue csQueue = (CSQueue) queue;
if (!(csQueue instanceof AbstractLeafQueue)) {
LOG.error(
"Cannot finish application " + "from non-leaf queue: "
+ csQueue.getQueuePath());
} else {
csQueue.finishApplicationAttempt(attempt, csQueue.getQueuePath());
maxRunningEnforcer.untrackApp(attempt);
if (attempt.isRunnable()) {
maxRunningEnforcer.updateRunnabilityOnAppRemoval(attempt);
}
}
} finally {
writeLock.unlock();
}
}
/**
* Normalize a list of SchedulingRequest.
*
* @param asks scheduling request
*/
private void normalizeSchedulingRequests(List<SchedulingRequest> asks) {
if (asks == null) {
return;
}
Resource maxAllocation = getMaximumResourceCapability();
for (SchedulingRequest ask: asks) {
ResourceSizing sizing = ask.getResourceSizing();
if (sizing != null && sizing.getResources() != null) {
sizing.setResources(
getNormalizedResource(sizing.getResources(), maxAllocation));
}
}
}
@Override
@Lock(Lock.NoLock.class)
public Allocation allocate(ApplicationAttemptId applicationAttemptId,
List<ResourceRequest> ask, List<SchedulingRequest> schedulingRequests,
List<ContainerId> release, List<String> blacklistAdditions,
List<String> blacklistRemovals, ContainerUpdates updateRequests) {
FiCaSchedulerApp application = getApplicationAttempt(applicationAttemptId);
if (application == null) {
LOG.error("Calling allocate on removed or non existent application " +
applicationAttemptId.getApplicationId());
return EMPTY_ALLOCATION;
}
// The allocate may be the leftover from previous attempt, and it will
// impact current attempt, such as confuse the request and allocation for
// current attempt's AM container.
// Note outside precondition check for the attempt id may be
// outdated here, so double check it here is necessary.
if (!application.getApplicationAttemptId().equals(applicationAttemptId)) {
LOG.error("Calling allocate on previous or removed " +
"or non existent application attempt " + applicationAttemptId);
return EMPTY_ALLOCATION;
}
// Handle all container updates
handleContainerUpdates(application, updateRequests);
// Release containers
releaseContainers(release, application);
AbstractLeafQueue updateDemandForQueue = null;
// Sanity check for new allocation requests
normalizeResourceRequests(ask);
// Normalize scheduling requests
normalizeSchedulingRequests(schedulingRequests);
Allocation allocation;
// make sure we aren't stopping/removing the application
// when the allocate comes in
application.getWriteLock().lock();
try {
if (application.isStopped()) {
return EMPTY_ALLOCATION;
}
// Process resource requests
if (!ask.isEmpty() || (schedulingRequests != null && !schedulingRequests
.isEmpty())) {
if (LOG.isDebugEnabled()) {
LOG.debug(
"allocate: pre-update " + applicationAttemptId + " ask size ="
+ ask.size());
application.showRequests();
}
// update the current container ask by considering the already allocated
// containers from previous allocation request and return updatedNewlyAllocatedContainers.
autoCorrectContainerAllocation(ask, application);
// Update application requests
if (application.updateResourceRequests(ask) || application
.updateSchedulingRequests(schedulingRequests)) {
updateDemandForQueue = (AbstractLeafQueue) application.getQueue();
}
if (LOG.isDebugEnabled()) {
LOG.debug("allocate: post-update");
application.showRequests();
}
}
application.updateBlacklist(blacklistAdditions, blacklistRemovals);
allocation = application.getAllocation(getResourceCalculator(),
getClusterResource(), getMinimumResourceCapability());
} finally {
application.getWriteLock().unlock();
}
if (updateDemandForQueue != null && !application
.isWaitingForAMContainer()) {
updateDemandForQueue.getOrderingPolicy().demandUpdated(application);
}
LOG.debug("Allocation for application {} : {} with cluster resource : {}",
applicationAttemptId, allocation, getClusterResource());
return allocation;
}
@Override
@Lock(Lock.NoLock.class)
public QueueInfo getQueueInfo(String queueName,
boolean includeChildQueues, boolean recursive)
throws IOException {
CSQueue queue = null;
queue = this.getQueue(queueName);
if (queue == null) {
if (isAmbiguous(queueName)) {
throw new IOException("Ambiguous queue reference: " + queueName
+ " please use full queue path instead.");
} else {
throw new IOException("Unknown queue: " + queueName);
}
}
return queue.getQueueInfo(includeChildQueues, recursive);
}
@Override
@Lock(Lock.NoLock.class)
public List<QueueUserACLInfo> getQueueUserAclInfo() {
UserGroupInformation user = null;
try {
user = UserGroupInformation.getCurrentUser();
} catch (IOException ioe) {
// should never happen
return new ArrayList<QueueUserACLInfo>();
}
return getRootQueue().getQueueUserAclInfo(user);
}
@Override
protected void nodeUpdate(RMNode rmNode) {
long begin = System.nanoTime();
readLock.lock();
try {
setLastNodeUpdateTime(Time.now());
super.nodeUpdate(rmNode);
} finally {
readLock.unlock();
}
// Try to do scheduling
if (!asyncSchedulingConf.isScheduleAsynchronously()) {
writeLock.lock();
try {
// reset allocation and reservation stats before we start doing any
// work
updateSchedulerHealth(lastNodeUpdateTime, rmNode.getNodeID(),
CSAssignment.NULL_ASSIGNMENT);
allocateContainersToNode(rmNode.getNodeID(), true);
} finally {
writeLock.unlock();
}
}
long latency = System.nanoTime() - begin;
CapacitySchedulerMetrics.getMetrics().addNodeUpdate(latency);
}
/**
* Process resource update on a node.
*/
private void updateNodeAndQueueResource(RMNode nm,
ResourceOption resourceOption) {
writeLock.lock();
try {
updateNodeResource(nm, resourceOption);
Resource clusterResource = getClusterResource();
getRootQueue().updateClusterResource(clusterResource,
new ResourceLimits(clusterResource));
} finally {
writeLock.unlock();
}
}
/**
* Process node labels update on a node.
*/
private void updateLabelsOnNode(NodeId nodeId,
Set<String> newLabels) {
FiCaSchedulerNode node = nodeTracker.getNode(nodeId);
if (null == node) {
return;
}
// Get new partition, we have only one partition per node
String newPartition;
if (newLabels.isEmpty()) {
newPartition = RMNodeLabelsManager.NO_LABEL;
} else{
newPartition = newLabels.iterator().next();
}
// old partition as well
String oldPartition = node.getPartition();
// Update resources of these containers
for (RMContainer rmContainer : node.getCopiedListOfRunningContainers()) {
FiCaSchedulerApp application = getApplicationAttempt(
rmContainer.getApplicationAttemptId());
if (null != application) {
application.nodePartitionUpdated(rmContainer, oldPartition,
newPartition);
} else{
LOG.warn("There's something wrong, some RMContainers running on"
+ " a node, but we cannot find SchedulerApplicationAttempt "
+ "for it. Node=" + node.getNodeID() + " applicationAttemptId="
+ rmContainer.getApplicationAttemptId());
continue;
}
}
// Unreserve container on this node
RMContainer reservedContainer = node.getReservedContainer();
if (null != reservedContainer) {
killReservedContainer(reservedContainer);
}
// Update node labels after we've done this
node.updateLabels(newLabels);
}
private void updateSchedulerHealth(long now, NodeId nodeId,
CSAssignment assignment) {
List<AssignmentInformation.AssignmentDetails> allocations =
assignment.getAssignmentInformation().getAllocationDetails();
List<AssignmentInformation.AssignmentDetails> reservations =
assignment.getAssignmentInformation().getReservationDetails();
// Get nodeId from allocated container if incoming argument is null.
NodeId updatedNodeid = (nodeId == null)
? allocations.get(allocations.size() - 1).rmContainer.getNodeId()
: nodeId;
if (!allocations.isEmpty()) {
ContainerId allocatedContainerId =
allocations.get(allocations.size() - 1).containerId;
String allocatedQueue = allocations.get(allocations.size() - 1).queue;
schedulerHealth.updateAllocation(now, updatedNodeid, allocatedContainerId,
allocatedQueue);
}
if (!reservations.isEmpty()) {
ContainerId reservedContainerId =
reservations.get(reservations.size() - 1).containerId;
String reservedQueue = reservations.get(reservations.size() - 1).queue;
schedulerHealth.updateReservation(now, updatedNodeid, reservedContainerId,
reservedQueue);
}
schedulerHealth.updateSchedulerReservationCounts(assignment
.getAssignmentInformation().getNumReservations());
schedulerHealth.updateSchedulerAllocationCounts(assignment
.getAssignmentInformation().getNumAllocations());
schedulerHealth.updateSchedulerRunDetails(now, assignment
.getAssignmentInformation().getAllocated(), assignment
.getAssignmentInformation().getReserved());
}
private boolean canAllocateMore(CSAssignment assignment, int offswitchCount,
int assignedContainers) {
// Current assignment shouldn't be empty
if (assignment == null
|| Resources.equals(assignment.getResource(), Resources.none())) {
return false;
}
// offswitch assignment should be under threshold
if (offswitchCount >= offswitchPerHeartbeatLimit) {
return false;
}
// And it should not be a reserved container
if (assignment.getAssignmentInformation().getNumReservations() > 0) {
return false;
}
// assignMultipleEnabled should be ON,
// and assignedContainers should be under threshold
return assignMultipleEnabled
&& (maxAssignPerHeartbeat == -1
|| assignedContainers < maxAssignPerHeartbeat);
}
private Map<NodeId, FiCaSchedulerNode> getNodesHeartbeated(String partition) {
Map<NodeId, FiCaSchedulerNode> nodesByPartition = new HashMap<>();
boolean printSkippedNodeLogging = isPrintSkippedNodeLogging(this);
List<FiCaSchedulerNode> nodes = nodeTracker
.getNodesPerPartition(partition);
if (nodes != null && !nodes.isEmpty()) {
//Filter for node heartbeat too long
nodes.stream()
.filter(node ->
!shouldSkipNodeSchedule(node, this, printSkippedNodeLogging))
.forEach(n -> nodesByPartition.put(n.getNodeID(), n));
}
if (printSkippedNodeLogging) {
printedVerboseLoggingForAsyncScheduling = true;
}
return nodesByPartition;
}
private CandidateNodeSet<FiCaSchedulerNode> getCandidateNodeSet(
String partition) {
CandidateNodeSet<FiCaSchedulerNode> candidates = null;
Map<NodeId, FiCaSchedulerNode> nodesByPartition
= getNodesHeartbeated(partition);
if (!nodesByPartition.isEmpty()) {
candidates = new SimpleCandidateNodeSet<FiCaSchedulerNode>(
nodesByPartition, partition);
}
return candidates;
}
private CandidateNodeSet<FiCaSchedulerNode> getCandidateNodeSet(
FiCaSchedulerNode node) {
CandidateNodeSet<FiCaSchedulerNode> candidates = null;
candidates = new SimpleCandidateNodeSet<>(node);
if (multiNodePlacementEnabled) {
Map<NodeId, FiCaSchedulerNode> nodesByPartition =
getNodesHeartbeated(node.getPartition());
if (!nodesByPartition.isEmpty()) {
candidates = new SimpleCandidateNodeSet<FiCaSchedulerNode>(
nodesByPartition, node.getPartition());
}
}
return candidates;
}
/**
* We need to make sure when doing allocation, Node should be existed
* And we will construct a {@link CandidateNodeSet} before proceeding
*/
private void allocateContainersToNode(NodeId nodeId,
boolean withNodeHeartbeat) {
FiCaSchedulerNode node = getNode(nodeId);
if (null != node) {
int offswitchCount = 0;
int assignedContainers = 0;
CandidateNodeSet<FiCaSchedulerNode> candidates =
getCandidateNodeSet(node);
CSAssignment assignment = allocateContainersToNode(candidates,
withNodeHeartbeat);
// Only check if we can allocate more container on the same node when
// scheduling is triggered by node heartbeat
if (null != assignment && withNodeHeartbeat) {
if (assignment.getType() == NodeType.OFF_SWITCH) {
offswitchCount++;
}
if (Resources.greaterThan(calculator, getClusterResource(),
assignment.getResource(), Resources.none())) {
assignedContainers++;
}
while (canAllocateMore(assignment, offswitchCount,
assignedContainers)) {
// Try to see if it is possible to allocate multiple container for
// the same node heartbeat
assignment = allocateContainersToNode(candidates, true);
if (null != assignment
&& assignment.getType() == NodeType.OFF_SWITCH) {
offswitchCount++;
}
if (null != assignment
&& Resources.greaterThan(calculator, getClusterResource(),
assignment.getResource(), Resources.none())) {
assignedContainers++;
}
}
if (offswitchCount >= offswitchPerHeartbeatLimit) {
LOG.debug("Assigned maximum number of off-switch containers: {},"
+ " assignments so far: {}", offswitchCount, assignment);
}
}
}
}
/*
* Logics of allocate container on a single node (Old behavior)
*/
private CSAssignment allocateContainerOnSingleNode(
CandidateNodeSet<FiCaSchedulerNode> candidates, FiCaSchedulerNode node,
boolean withNodeHeartbeat) {
LOG.debug("Trying to schedule on node: {}, available: {}",
node.getNodeName(), node.getUnallocatedResource());
// Backward compatible way to make sure previous behavior which allocation
// driven by node heartbeat works.
if (getNode(node.getNodeID()) != node) {
LOG.error("Trying to schedule on a removed node, please double check, "
+ "nodeId=" + node.getNodeID());
ActivitiesLogger.QUEUE.recordQueueActivity(activitiesManager, node,
"", getRootQueue().getQueuePath(), ActivityState.REJECTED,
ActivityDiagnosticConstant.INIT_CHECK_SINGLE_NODE_REMOVED);
ActivitiesLogger.NODE.finishSkippedNodeAllocation(activitiesManager,
node);
return null;
}
// Assign new containers...
// 1. Check for reserved applications
// 2. Schedule if there are no reservations
RMContainer reservedContainer = node.getReservedContainer();
if (reservedContainer != null) {
allocateFromReservedContainer(node, withNodeHeartbeat, reservedContainer);
// Do not schedule if there are any reservations to fulfill on the node
LOG.debug("Skipping scheduling since node {} is reserved by"
+ " application {}", node.getNodeID(), reservedContainer.
getContainerId().getApplicationAttemptId());
return null;
}
// First check if we can schedule
// When this time look at one node only, try schedule if the node
// has any available or killable resource
if (calculator.computeAvailableContainers(Resources
.add(node.getUnallocatedResource(), node.getTotalKillableResources()),
minimumAllocation) <= 0) {
LOG.debug("This node " + node.getNodeID() + " doesn't have sufficient "
+ "available or preemptible resource for minimum allocation");
ActivitiesLogger.QUEUE.recordQueueActivity(activitiesManager, node,
"", getRootQueue().getQueuePath(), ActivityState.REJECTED,
ActivityDiagnosticConstant.
INIT_CHECK_SINGLE_NODE_RESOURCE_INSUFFICIENT);
ActivitiesLogger.NODE.finishSkippedNodeAllocation(activitiesManager,
node);
return null;
}
return allocateOrReserveNewContainers(candidates, withNodeHeartbeat);
}
private void allocateFromReservedContainer(FiCaSchedulerNode node,
boolean withNodeHeartbeat, RMContainer reservedContainer) {
if(reservedContainer == null){
LOG.warn("reservedContainer is null, that may be unreserved by the proposal judgment thread");
return;
}
FiCaSchedulerApp reservedApplication = getCurrentAttemptForContainer(
reservedContainer.getContainerId());
if (reservedApplication == null) {
LOG.error(
"Trying to schedule for a finished app, please double check. nodeId="
+ node.getNodeID() + " container=" + reservedContainer
.getContainerId());
return;
}
// Try to fulfill the reservation
LOG.debug("Trying to fulfill reservation for application {} on node: {}",
reservedApplication.getApplicationId(), node.getNodeID());
AbstractLeafQueue queue = ((AbstractLeafQueue) reservedApplication.getQueue());
CSAssignment assignment = queue.assignContainers(getClusterResource(),
new SimpleCandidateNodeSet<>(node),
// TODO, now we only consider limits for parent for non-labeled
// resources, should consider labeled resources as well.
new ResourceLimits(labelManager
.getResourceByLabel(RMNodeLabelsManager.NO_LABEL,
getClusterResource())),
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
if (assignment.isFulfilledReservation()) {
if (withNodeHeartbeat) {
// Only update SchedulerHealth in sync scheduling, existing
// Data structure of SchedulerHealth need to be updated for
// Async mode
updateSchedulerHealth(lastNodeUpdateTime, node.getNodeID(),
assignment);
}
schedulerHealth.updateSchedulerFulfilledReservationCounts(1);
ActivitiesLogger.QUEUE.recordQueueActivity(activitiesManager, node,
queue.getParent().getQueuePath(), queue.getQueuePath(),
ActivityState.ACCEPTED, ActivityDiagnosticConstant.EMPTY);
ActivitiesLogger.NODE.finishAllocatedNodeAllocation(activitiesManager,
node, reservedContainer.getContainerId(),
AllocationState.ALLOCATED_FROM_RESERVED);
} else if (assignment.getAssignmentInformation().getNumReservations() > 0) {
ActivitiesLogger.QUEUE.recordQueueActivity(activitiesManager, node,
queue.getParent().getQueuePath(), queue.getQueuePath(),
ActivityState.RE_RESERVED, ActivityDiagnosticConstant.EMPTY);
ActivitiesLogger.NODE.finishAllocatedNodeAllocation(activitiesManager,
node, reservedContainer.getContainerId(), AllocationState.RESERVED);
}
assignment.setSchedulingMode(
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
submitResourceCommitRequest(getClusterResource(), assignment);
}
private CSAssignment allocateOrReserveNewContainers(
CandidateNodeSet<FiCaSchedulerNode> candidates,
boolean withNodeHeartbeat) {
CSAssignment assignment = getRootQueue().assignContainers(
getClusterResource(), candidates, new ResourceLimits(labelManager
.getResourceByLabel(candidates.getPartition(),
getClusterResource())),
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
assignment.setSchedulingMode(SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
submitResourceCommitRequest(getClusterResource(), assignment);
if (Resources.greaterThan(calculator, getClusterResource(),
assignment.getResource(), Resources.none())) {
FiCaSchedulerNode node = CandidateNodeSetUtils.getSingleNode(candidates);
NodeId nodeId = null;
if (node != null) {
nodeId = node.getNodeID();
}
if (withNodeHeartbeat) {
updateSchedulerHealth(lastNodeUpdateTime, nodeId, assignment);
}
return assignment;
}
// Only do non-exclusive allocation when node has node-labels.
if (StringUtils.equals(candidates.getPartition(),
RMNodeLabelsManager.NO_LABEL)) {
return null;
}
// Only do non-exclusive allocation when the node-label supports that
try {
if (rmContext.getNodeLabelManager().isExclusiveNodeLabel(
candidates.getPartition())) {
return null;
}
} catch (IOException e) {
LOG.warn(
"Exception when trying to get exclusivity of node label=" + candidates
.getPartition(), e);
return null;
}
// Try to use NON_EXCLUSIVE
assignment = getRootQueue().assignContainers(getClusterResource(),
candidates,
// TODO, now we only consider limits for parent for non-labeled
// resources, should consider labeled resources as well.
new ResourceLimits(labelManager
.getResourceByLabel(RMNodeLabelsManager.NO_LABEL,
getClusterResource())),
SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY);
assignment.setSchedulingMode(SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY);
submitResourceCommitRequest(getClusterResource(), assignment);
return assignment;
}
/*
* New behavior, allocate containers considering multiple nodes
*/
private CSAssignment allocateContainersOnMultiNodes(
CandidateNodeSet<FiCaSchedulerNode> candidates) {
// Try to allocate from reserved containers
for (FiCaSchedulerNode node : candidates.getAllNodes().values()) {
RMContainer reservedContainer = node.getReservedContainer();
if (reservedContainer != null) {
allocateFromReservedContainer(node, false, reservedContainer);
}
}
return allocateOrReserveNewContainers(candidates, false);
}
@VisibleForTesting
CSAssignment allocateContainersToNode(
CandidateNodeSet<FiCaSchedulerNode> candidates,
boolean withNodeHeartbeat) {
if (rmContext.isWorkPreservingRecoveryEnabled() && !rmContext
.isSchedulerReadyForAllocatingContainers()) {
return null;
}
long startTime = System.nanoTime();
// Backward compatible way to make sure previous behavior which allocation
// driven by node heartbeat works.
FiCaSchedulerNode node = CandidateNodeSetUtils.getSingleNode(candidates);
// We have two different logics to handle allocation on single node / multi
// nodes.
CSAssignment assignment;
if (!multiNodePlacementEnabled) {
ActivitiesLogger.NODE.startNodeUpdateRecording(activitiesManager,
node.getNodeID());
assignment = allocateContainerOnSingleNode(candidates,
node, withNodeHeartbeat);
ActivitiesLogger.NODE.finishNodeUpdateRecording(activitiesManager,
node.getNodeID(), candidates.getPartition());
} else{
ActivitiesLogger.NODE.startNodeUpdateRecording(activitiesManager,
ActivitiesManager.EMPTY_NODE_ID);
assignment = allocateContainersOnMultiNodes(candidates);
ActivitiesLogger.NODE.finishNodeUpdateRecording(activitiesManager,
ActivitiesManager.EMPTY_NODE_ID, candidates.getPartition());
}
if (assignment != null && assignment.getAssignmentInformation() != null
&& assignment.getAssignmentInformation().getNumAllocations() > 0) {
long allocateTime = System.nanoTime() - startTime;
CapacitySchedulerMetrics.getMetrics().addAllocate(allocateTime);
}
return assignment;
}
/**
* This method extracts the actual queue name from an app add event.
* Currently unfortunately ApplicationPlacementContext and
* ApplicationSubmissionContext are used in a quite erratic way, this method
* helps to get the proper placement path for the queue if placement context
* is provided
* @param appAddedEvent The application add event with details about the app
* @return The name of the queue the application should be added
*/
private String getAddedAppQueueName(AppAddedSchedulerEvent appAddedEvent) {
//appAddedEvent uses the queue from ApplicationSubmissionContext but in
//the case of CS it may be only a leaf name due to legacy reasons
String ret = appAddedEvent.getQueue();
ApplicationPlacementContext placementContext =
appAddedEvent.getPlacementContext();
//If we have a placement context, it means a mapping rule made a decision
//about the queue placement, so we use those data, it is supposed to be in
//sync with the ApplicationSubmissionContext and appAddedEvent.getQueue, but
//because of the aforementioned legacy reasons these two may only contain
//the leaf queue name.
if (placementContext != null) {
String leafName = placementContext.getQueue();
String parentName = placementContext.getParentQueue();
if (leafName != null) {
//building the proper queue path from the parent and leaf queue name
ret = placementContext.hasParentQueue() ?
(parentName + "." + leafName) : leafName;
}
}
return ret;
}
@Override
public void handle(SchedulerEvent event) {
switch(event.getType()) {
case NODE_ADDED:
{
NodeAddedSchedulerEvent nodeAddedEvent = (NodeAddedSchedulerEvent)event;
addNode(nodeAddedEvent.getAddedRMNode());
recoverContainersOnNode(nodeAddedEvent.getContainerReports(),
nodeAddedEvent.getAddedRMNode());
}
break;
case NODE_REMOVED:
{
NodeRemovedSchedulerEvent nodeRemovedEvent = (NodeRemovedSchedulerEvent)event;
removeNode(nodeRemovedEvent.getRemovedRMNode());
}
break;
case NODE_RESOURCE_UPDATE:
{
NodeResourceUpdateSchedulerEvent nodeResourceUpdatedEvent =
(NodeResourceUpdateSchedulerEvent)event;
updateNodeAndQueueResource(nodeResourceUpdatedEvent.getRMNode(),
nodeResourceUpdatedEvent.getResourceOption());
}
break;
case NODE_LABELS_UPDATE:
{
NodeLabelsUpdateSchedulerEvent labelUpdateEvent =
(NodeLabelsUpdateSchedulerEvent) event;
updateNodeLabelsAndQueueResource(labelUpdateEvent);
}
break;
case NODE_ATTRIBUTES_UPDATE:
{
NodeAttributesUpdateSchedulerEvent attributeUpdateEvent =
(NodeAttributesUpdateSchedulerEvent) event;
updateNodeAttributes(attributeUpdateEvent);
}
break;
case NODE_UPDATE:
{
NodeUpdateSchedulerEvent nodeUpdatedEvent = (NodeUpdateSchedulerEvent)event;
updateSchedulerNodeHBIntervalMetrics(nodeUpdatedEvent);
nodeUpdate(nodeUpdatedEvent.getRMNode());
}
break;
case APP_ADDED:
{
AppAddedSchedulerEvent appAddedEvent = (AppAddedSchedulerEvent) event;
String queueName = resolveReservationQueueName(
getAddedAppQueueName(appAddedEvent), appAddedEvent.getApplicationId(),
appAddedEvent.getReservationID(), appAddedEvent.getIsAppRecovering());
if (queueName != null) {
if (!appAddedEvent.getIsAppRecovering()) {
addApplication(appAddedEvent.getApplicationId(), queueName,
appAddedEvent.getUser(), appAddedEvent.getApplicatonPriority(),
appAddedEvent.getPlacementContext(),
appAddedEvent.isUnmanagedAM());
} else {
addApplicationOnRecovery(appAddedEvent.getApplicationId(), queueName,
appAddedEvent.getUser(), appAddedEvent.getApplicatonPriority(),
appAddedEvent.getPlacementContext(),
appAddedEvent.isUnmanagedAM());
}
}
}
break;
case APP_REMOVED:
{
AppRemovedSchedulerEvent appRemovedEvent = (AppRemovedSchedulerEvent)event;
doneApplication(appRemovedEvent.getApplicationID(),
appRemovedEvent.getFinalState());
}
break;
case APP_ATTEMPT_ADDED:
{
AppAttemptAddedSchedulerEvent appAttemptAddedEvent =
(AppAttemptAddedSchedulerEvent) event;
addApplicationAttempt(appAttemptAddedEvent.getApplicationAttemptId(),
appAttemptAddedEvent.getTransferStateFromPreviousAttempt(),
appAttemptAddedEvent.getIsAttemptRecovering());
}
break;
case APP_ATTEMPT_REMOVED:
{
AppAttemptRemovedSchedulerEvent appAttemptRemovedEvent =
(AppAttemptRemovedSchedulerEvent) event;
doneApplicationAttempt(appAttemptRemovedEvent.getApplicationAttemptID(),
appAttemptRemovedEvent.getFinalAttemptState(),
appAttemptRemovedEvent.getKeepContainersAcrossAppAttempts());
}
break;
case CONTAINER_EXPIRED:
{
ContainerExpiredSchedulerEvent containerExpiredEvent =
(ContainerExpiredSchedulerEvent) event;
ContainerId containerId = containerExpiredEvent.getContainerId();
if (containerExpiredEvent.isIncrease()) {
rollbackContainerUpdate(containerId);
} else {
completedContainer(getRMContainer(containerId),
SchedulerUtils.createAbnormalContainerStatus(
containerId,
SchedulerUtils.EXPIRED_CONTAINER),
RMContainerEventType.EXPIRE);
}
}
break;
case RELEASE_CONTAINER:
{
RMContainer container = ((ReleaseContainerEvent) event).getContainer();
completedContainer(container,
SchedulerUtils.createAbnormalContainerStatus(
container.getContainerId(),
SchedulerUtils.RELEASED_CONTAINER),
RMContainerEventType.RELEASED);
}
break;
case KILL_RESERVED_CONTAINER:
{
ContainerPreemptEvent killReservedContainerEvent =
(ContainerPreemptEvent) event;
RMContainer container = killReservedContainerEvent.getContainer();
killReservedContainer(container);
}
break;
case MARK_CONTAINER_FOR_PREEMPTION:
{
ContainerPreemptEvent preemptContainerEvent =
(ContainerPreemptEvent)event;
ApplicationAttemptId aid = preemptContainerEvent.getAppId();
RMContainer containerToBePreempted = preemptContainerEvent.getContainer();
markContainerForPreemption(aid, containerToBePreempted);
}
break;
case MARK_CONTAINER_FOR_KILLABLE:
{
ContainerPreemptEvent containerKillableEvent = (ContainerPreemptEvent)event;
RMContainer killableContainer = containerKillableEvent.getContainer();
markContainerForKillable(killableContainer);
}
break;
case MARK_CONTAINER_FOR_NONKILLABLE:
{
if (isLazyPreemptionEnabled) {
ContainerPreemptEvent cancelKillContainerEvent =
(ContainerPreemptEvent) event;
markContainerForNonKillable(cancelKillContainerEvent.getContainer());
}
}
break;
case MANAGE_QUEUE:
{
QueueManagementChangeEvent queueManagementChangeEvent =
(QueueManagementChangeEvent) event;
AbstractParentQueue parentQueue = queueManagementChangeEvent.getParentQueue();
try {
final List<QueueManagementChange> queueManagementChanges =
queueManagementChangeEvent.getQueueManagementChanges();
((ManagedParentQueue) parentQueue)
.validateAndApplyQueueManagementChanges(queueManagementChanges);
} catch (SchedulerDynamicEditException sde) {
LOG.error("Queue Management Change event cannot be applied for "
+ "parent queue : " + parentQueue.getQueuePath(), sde);
} catch (IOException ioe) {
LOG.error("Queue Management Change event cannot be applied for "
+ "parent queue : " + parentQueue.getQueuePath(), ioe);
}
}
break;
case AUTO_QUEUE_DELETION:
try {
AutoCreatedQueueDeletionEvent autoCreatedQueueDeletionEvent =
(AutoCreatedQueueDeletionEvent) event;
removeAutoCreatedQueue(autoCreatedQueueDeletionEvent.
getCheckQueue());
} catch (SchedulerDynamicEditException sde) {
LOG.error("Dynamic queue deletion cannot be applied for "
+ "queue : ", sde);
}
break;
default:
LOG.error("Invalid eventtype " + event.getType() + ". Ignoring!");
}
}
private void removeAutoCreatedQueue(CSQueue checkQueue)
throws SchedulerDynamicEditException{
writeLock.lock();
try {
if (checkQueue instanceof AbstractCSQueue
&& ((AbstractCSQueue) checkQueue).isInactiveDynamicQueue()) {
removeQueue(checkQueue);
}
} finally {
writeLock.unlock();
}
}
private void updateNodeAttributes(
NodeAttributesUpdateSchedulerEvent attributeUpdateEvent) {
writeLock.lock();
try {
for (Entry<String, Set<NodeAttribute>> entry : attributeUpdateEvent
.getUpdatedNodeToAttributes().entrySet()) {
String hostname = entry.getKey();
Set<NodeAttribute> attributes = entry.getValue();
List<NodeId> nodeIds = nodeTracker.getNodeIdsByResourceName(hostname);
updateAttributesOnNode(nodeIds, attributes);
}
} finally {
writeLock.unlock();
}
}
private void updateAttributesOnNode(List<NodeId> nodeIds,
Set<NodeAttribute> attributes) {
nodeIds.forEach((k) -> {
SchedulerNode node = nodeTracker.getNode(k);
node.updateNodeAttributes(attributes);
});
}
/**
* Process node labels update.
*/
private void updateNodeLabelsAndQueueResource(
NodeLabelsUpdateSchedulerEvent labelUpdateEvent) {
writeLock.lock();
try {
Set<String> updateLabels = new HashSet<String>();
for (Entry<NodeId, Set<String>> entry : labelUpdateEvent
.getUpdatedNodeToLabels().entrySet()) {
NodeId id = entry.getKey();
Set<String> labels = entry.getValue();
FiCaSchedulerNode node = nodeTracker.getNode(id);
if (node != null) {
// Update old partition to list.
updateLabels.add(node.getPartition());
}
updateLabelsOnNode(id, labels);
updateLabels.addAll(labels);
}
refreshLabelToNodeCache(updateLabels);
Resource clusterResource = getClusterResource();
getRootQueue().updateClusterResource(clusterResource,
new ResourceLimits(clusterResource));
} finally {
writeLock.unlock();
}
}
private void refreshLabelToNodeCache(Set<String> updateLabels) {
Map<String, Set<NodeId>> labelMapping = labelManager
.getLabelsToNodes(updateLabels);
for (String label : updateLabels) {
Set<NodeId> nodes = labelMapping.get(label);
if (nodes == null) {
continue;
}
nodeTracker.updateNodesPerPartition(label, nodes);
}
}
/**
* Add node to nodeTracker. Used when validating CS configuration by instantiating a new
* CS instance.
* @param nodesToAdd node to be added
*/
public void addNodes(List<FiCaSchedulerNode> nodesToAdd) {
writeLock.lock();
try {
for (FiCaSchedulerNode node : nodesToAdd) {
nodeTracker.addNode(node);
}
} finally {
writeLock.unlock();
}
}
private void addNode(RMNode nodeManager) {
writeLock.lock();
try {
FiCaSchedulerNode schedulerNode = new FiCaSchedulerNode(nodeManager,
usePortForNodeName, nodeManager.getNodeLabels());
nodeTracker.addNode(schedulerNode);
// update this node to node label manager
if (labelManager != null) {
labelManager.activateNode(nodeManager.getNodeID(),
schedulerNode.getTotalResource());
}
// recover attributes from store if any.
if (rmContext.getNodeAttributesManager() != null) {
rmContext.getNodeAttributesManager()
.refreshNodeAttributesToScheduler(schedulerNode.getNodeID());
}
Resource clusterResource = getClusterResource();
getRootQueue().updateClusterResource(clusterResource,
new ResourceLimits(clusterResource));
LOG.info(
"Added node " + nodeManager.getNodeAddress() + " clusterResource: "
+ clusterResource);
if (asyncSchedulingConf.isScheduleAsynchronously() && getNumClusterNodes() == 1) {
for (AsyncScheduleThread t : asyncSchedulingConf.asyncSchedulerThreads) {
t.beginSchedule();
}
}
} finally {
writeLock.unlock();
}
}
private void removeNode(RMNode nodeInfo) {
writeLock.lock();
try {
// update this node to node label manager
if (labelManager != null) {
labelManager.deactivateNode(nodeInfo.getNodeID());
}
NodeId nodeId = nodeInfo.getNodeID();
FiCaSchedulerNode node = nodeTracker.getNode(nodeId);
if (node == null) {
LOG.error("Attempting to remove non-existent node " + nodeId);
return;
}
// Remove running containers
List<RMContainer> runningContainers =
node.getCopiedListOfRunningContainers();
for (RMContainer container : runningContainers) {
super.completedContainer(container, SchedulerUtils
.createAbnormalContainerStatus(container.getContainerId(),
SchedulerUtils.LOST_CONTAINER), RMContainerEventType.KILL);
node.releaseContainer(container.getContainerId(), true);
}
// Remove reservations, if any
RMContainer reservedContainer = node.getReservedContainer();
if (reservedContainer != null) {
super.completedContainer(reservedContainer, SchedulerUtils
.createAbnormalContainerStatus(reservedContainer.getContainerId(),
SchedulerUtils.LOST_CONTAINER), RMContainerEventType.KILL);
}
nodeTracker.removeNode(nodeId);
Resource clusterResource = getClusterResource();
getRootQueue().updateClusterResource(clusterResource,
new ResourceLimits(clusterResource));
int numNodes = nodeTracker.nodeCount();
asyncSchedulingConf.nodeRemoved(numNodes);
LOG.info(
"Removed node " + nodeInfo.getNodeAddress() + " clusterResource: "
+ getClusterResource());
} finally {
writeLock.unlock();
}
}
private void updateSchedulerNodeHBIntervalMetrics(
NodeUpdateSchedulerEvent nodeUpdatedEvent) {
// Add metrics for evaluating the time difference between heartbeats.
SchedulerNode node =
nodeTracker.getNode(nodeUpdatedEvent.getRMNode().getNodeID());
if (node != null) {
long lastInterval =
Time.monotonicNow() - node.getLastHeartbeatMonotonicTime();
CapacitySchedulerMetrics.getMetrics()
.addSchedulerNodeHBInterval(lastInterval);
}
}
@Override
protected void completedContainerInternal(
RMContainer rmContainer, ContainerStatus containerStatus,
RMContainerEventType event) {
Container container = rmContainer.getContainer();
ContainerId containerId = container.getId();
// Get the application for the finished container
FiCaSchedulerApp application = getCurrentAttemptForContainer(
container.getId());
ApplicationId appId =
containerId.getApplicationAttemptId().getApplicationId();
if (application == null) {
LOG.info(
"Container " + container + " of" + " finished application " + appId
+ " completed with event " + event);
return;
}
// Get the node on which the container was allocated
FiCaSchedulerNode node = getNode(container.getNodeId());
if (null == node) {
LOG.info("Container " + container + " of" + " removed node " + container
.getNodeId() + " completed with event " + event);
return;
}
// Inform the queue
AbstractLeafQueue queue = (AbstractLeafQueue) application.getQueue();
queue.completedContainer(getClusterResource(), application, node,
rmContainer, containerStatus, event, null, true);
}
@Lock(Lock.NoLock.class)
@VisibleForTesting
@Override
public FiCaSchedulerApp getApplicationAttempt(
ApplicationAttemptId applicationAttemptId) {
return super.getApplicationAttempt(applicationAttemptId);
}
@Lock(Lock.NoLock.class)
public FiCaSchedulerNode getNode(NodeId nodeId) {
return nodeTracker.getNode(nodeId);
}
@Lock(Lock.NoLock.class)
public List<FiCaSchedulerNode> getAllNodes() {
return nodeTracker.getAllNodes();
}
@Override
@Lock(Lock.NoLock.class)
public void recover(RMState state) throws Exception {
// NOT IMPLEMENTED
}
@Override
public void killReservedContainer(RMContainer container) {
LOG.debug("{}:{}", SchedulerEventType.KILL_RESERVED_CONTAINER, container);
// To think: What happens if this is no longer a reserved container, for
// e.g if the reservation became an allocation.
super.completedContainer(container,
SchedulerUtils.createAbnormalContainerStatus(
container.getContainerId(),
SchedulerUtils.UNRESERVED_CONTAINER),
RMContainerEventType.KILL);
}
@Override
public void markContainerForPreemption(ApplicationAttemptId aid,
RMContainer cont) {
LOG.debug("{}: appAttempt:{} container:{}",
SchedulerEventType.MARK_CONTAINER_FOR_PREEMPTION, aid, cont);
FiCaSchedulerApp app = getApplicationAttempt(aid);
if (app != null) {
app.markContainerForPreemption(cont.getContainerId());
}
}
@VisibleForTesting
@Override
public void killContainer(RMContainer container) {
markContainerForKillable(container);
}
public void markContainerForKillable(
RMContainer killableContainer) {
writeLock.lock();
try {
LOG.debug("{}: container {}",
SchedulerEventType.MARK_CONTAINER_FOR_KILLABLE, killableContainer);
if (!isLazyPreemptionEnabled) {
super.completedContainer(killableContainer, SchedulerUtils
.createPreemptedContainerStatus(killableContainer.getContainerId(),
SchedulerUtils.PREEMPTED_CONTAINER), RMContainerEventType.KILL);
} else {
FiCaSchedulerNode node = getSchedulerNode(
killableContainer.getAllocatedNode());
FiCaSchedulerApp application = getCurrentAttemptForContainer(
killableContainer.getContainerId());
node.markContainerToKillable(killableContainer.getContainerId());
// notify PreemptionManager
// Get the application for the finished container
if (null != application) {
String leafQueuePath = application.getCSLeafQueue().getQueuePath();
getPreemptionManager().addKillableContainer(
new KillableContainer(killableContainer, node.getPartition(),
leafQueuePath));
}
}
} finally {
writeLock.unlock();
}
}
private void markContainerForNonKillable(
RMContainer nonKillableContainer) {
writeLock.lock();
try {
LOG.debug("{}: container {}", SchedulerEventType.
MARK_CONTAINER_FOR_NONKILLABLE, nonKillableContainer);
FiCaSchedulerNode node = getSchedulerNode(
nonKillableContainer.getAllocatedNode());
FiCaSchedulerApp application = getCurrentAttemptForContainer(
nonKillableContainer.getContainerId());
node.markContainerToNonKillable(nonKillableContainer.getContainerId());
// notify PreemptionManager
// Get the application for the finished container
if (null != application) {
String leafQueuePath = application.getCSLeafQueue().getQueuePath();
getPreemptionManager().removeKillableContainer(
new KillableContainer(nonKillableContainer, node.getPartition(),
leafQueuePath));
}
} finally {
writeLock.unlock();
}
}
@Override
public boolean checkAccess(UserGroupInformation callerUGI,
QueueACL acl, String queueName) {
CSQueue queue = getQueue(queueName);
if (queueName.startsWith("root.")) {
// can only check proper ACLs if the path is fully qualified
while (queue == null) {
int sepIndex = queueName.lastIndexOf(".");
String parentName = queueName.substring(0, sepIndex);
if (LOG.isDebugEnabled()) {
LOG.debug("Queue {} does not exist, checking parent {}",
queueName, parentName);
}
queueName = parentName;
queue = queueManager.getQueue(queueName);
}
}
if (queue == null) {
LOG.debug("ACL not found for queue access-type {} for queue {}",
acl, queueName);
return false;
}
return queue.hasAccess(acl, callerUGI);
}
@Override
public List<ApplicationAttemptId> getAppsInQueue(String queueName) {
CSQueue queue = getQueue(queueName);
if (queue == null) {
return null;
}
List<ApplicationAttemptId> apps = new ArrayList<ApplicationAttemptId>();
queue.collectSchedulerApplications(apps);
return apps;
}
public boolean isSystemAppsLimitReached() {
if (getRootQueue().getNumApplications() < conf
.getMaximumSystemApplications()) {
return false;
}
return true;
}
private String getDefaultReservationQueueName(String planQueueName) {
return planQueueName + ReservationConstants.DEFAULT_QUEUE_SUFFIX;
}
private String resolveReservationQueueName(String queueName,
ApplicationId applicationId, ReservationId reservationID,
boolean isRecovering) {
readLock.lock();
try {
CSQueue queue = getQueue(queueName);
// Check if the queue is a plan queue
if ((queue == null) || !(queue instanceof PlanQueue)) {
return queueName;
}
if (reservationID != null) {
String resQName = reservationID.toString();
queue = getQueue(resQName);
if (queue == null) {
// reservation has terminated during failover
if (isRecovering && conf.getMoveOnExpiry(
getQueue(queueName).getQueuePathObject())) {
// move to the default child queue of the plan
return getDefaultReservationQueueName(queueName);
}
String message = "Application " + applicationId
+ " submitted to a reservation which is not currently active: "
+ resQName;
this.rmContext.getDispatcher().getEventHandler().handle(
new RMAppEvent(applicationId, RMAppEventType.APP_REJECTED,
message));
return null;
}
if (!queue.getParent().getQueuePath().equals(queueName)) {
String message =
"Application: " + applicationId + " submitted to a reservation "
+ resQName + " which does not belong to the specified queue: "
+ queueName;
this.rmContext.getDispatcher().getEventHandler().handle(
new RMAppEvent(applicationId, RMAppEventType.APP_REJECTED,
message));
return null;
}
// use the reservation queue to run the app
queueName = resQName;
} else{
// use the default child queue of the plan for unreserved apps
queueName = getDefaultReservationQueueName(queueName);
}
return queueName;
} finally {
readLock.unlock();
}
}
@Override
public void removeQueue(String queueName)
throws SchedulerDynamicEditException {
writeLock.lock();
try {
queueManager.removeLegacyDynamicQueue(queueName);
} finally {
writeLock.unlock();
}
}
public void removeQueue(CSQueue queue)
throws SchedulerDynamicEditException {
writeLock.lock();
try {
LOG.info("Removing queue: " + queue.getQueuePath());
if (!((AbstractCSQueue)queue).isDynamicQueue()) {
throw new SchedulerDynamicEditException(
"The queue that we are asked "
+ "to remove (" + queue.getQueuePath()
+ ") is not a DynamicQueue");
}
if (!((AbstractCSQueue) queue).isEligibleForAutoDeletion()) {
LOG.warn("Queue " + queue.getQueuePath() +
" is marked for deletion, but not eligible for deletion");
return;
}
ParentQueue parentQueue = (ParentQueue)queue.getParent();
if (parentQueue != null) {
((ParentQueue) queue.getParent()).removeChildQueue(queue);
} else {
throw new SchedulerDynamicEditException(
"The queue " + queue.getQueuePath()
+ " can't be removed because it's parent is null");
}
if (parentQueue.childQueues.contains(queue) ||
queueManager.getQueue(queue.getQueuePath()) != null) {
throw new SchedulerDynamicEditException(
"The queue " + queue.getQueuePath()
+ " has not been removed normally.");
}
} finally {
writeLock.unlock();
}
}
@Override
public void addQueue(Queue queue)
throws SchedulerDynamicEditException, IOException {
writeLock.lock();
try {
queueManager.addLegacyDynamicQueue(queue);
} finally {
writeLock.unlock();
}
}
@Override
public void setEntitlement(String inQueue, QueueEntitlement entitlement)
throws YarnException {
writeLock.lock();
try {
AbstractLeafQueue queue = this.queueManager.getAndCheckLeafQueue(inQueue);
AbstractManagedParentQueue parent =
(AbstractManagedParentQueue) queue.getParent();
if (!(AbstractAutoCreatedLeafQueue.class.isAssignableFrom(
queue.getClass()))) {
throw new SchedulerDynamicEditException(
"Entitlement can not be" + " modified dynamically since queue "
+ inQueue + " is not a AutoCreatedLeafQueue");
}
if (parent == null || !(AbstractManagedParentQueue.class.isAssignableFrom(
parent.getClass()))) {
throw new SchedulerDynamicEditException(
"The parent of AutoCreatedLeafQueue " + inQueue
+ " must be a PlanQueue/ManagedParentQueue");
}
AbstractAutoCreatedLeafQueue newQueue =
(AbstractAutoCreatedLeafQueue) queue;
parent.validateQueueEntitlementChange(newQueue, entitlement);
newQueue.setEntitlement(entitlement);
LOG.info("Set entitlement for AutoCreatedLeafQueue " + inQueue + " to "
+ queue.getCapacity() + " request was (" + entitlement.getCapacity()
+ ")");
} finally {
writeLock.unlock();
}
}
@Override
public String moveApplication(ApplicationId appId,
String targetQueueName) throws YarnException {
writeLock.lock();
try {
SchedulerApplication<FiCaSchedulerApp> application =
applications.get(appId);
if (application == null) {
throw new YarnException("App to be moved " + appId + " not found.");
}
if (!(application.getQueue() instanceof CSQueue)) {
throw new YarnException("Source queue is not a Capacity Scheduler queue");
}
CSQueue csQueue = (CSQueue) application.getQueue();
String sourceQueueName = csQueue.getQueuePath();
AbstractLeafQueue source =
this.queueManager.getAndCheckLeafQueue(sourceQueueName);
String destQueueName = handleMoveToPlanQueue(targetQueueName);
AbstractLeafQueue dest = this.queueManager.getAndCheckLeafQueue(destQueueName);
String user = application.getUser();
try {
dest.submitApplication(appId, user, destQueueName);
} catch (AccessControlException e) {
throw new YarnException(e);
}
FiCaSchedulerApp app = application.getCurrentAppAttempt();
if (app != null) {
// Move all live containers even when stopped.
// For transferStateFromPreviousAttempt required
for (RMContainer rmContainer : app.getLiveContainers()) {
source.detachContainer(getClusterResource(), app, rmContainer);
// attach the Container to another queue
dest.attachContainer(getClusterResource(), app, rmContainer);
}
// Move all reserved containers
for (RMContainer rmContainer : app.getReservedContainers()) {
source.detachContainer(getClusterResource(), app, rmContainer);
dest.attachContainer(getClusterResource(), app, rmContainer);
}
if (!app.isStopped()) {
source.finishApplicationAttempt(app, sourceQueueName);
// Submit to a new queue
dest.submitApplicationAttempt(app, user, true);
}
// Finish app & update metrics
app.move(dest);
}
source.appFinished();
// Detach the application..
source.getParent().finishApplication(appId, user);
application.setQueue(dest);
LOG.info("App: " + appId + " successfully moved from " + sourceQueueName
+ " to: " + destQueueName);
return dest.getQueuePath();
} finally {
writeLock.unlock();
}
}
@Override
public void preValidateMoveApplication(ApplicationId appId,
String newQueue) throws YarnException {
writeLock.lock();
try {
SchedulerApplication<FiCaSchedulerApp> application =
applications.get(appId);
if (application == null) {
throw new YarnException("App to be moved " + appId + " not found.");
}
Queue queue = application.getQueue();
String sourceQueueName = queue instanceof CSQueue ?
((CSQueue) queue).getQueuePath() : queue.getQueueName();
this.queueManager.getAndCheckLeafQueue(sourceQueueName);
String destQueueName = handleMoveToPlanQueue(newQueue);
AbstractLeafQueue dest = this.queueManager.getAndCheckLeafQueue(destQueueName);
// Validation check - ACLs, submission limits for user & queue
String user = application.getUser();
// Check active partition only when attempt is available
FiCaSchedulerApp appAttempt =
getApplicationAttempt(ApplicationAttemptId.newInstance(appId, 0));
if (null != appAttempt) {
checkQueuePartition(appAttempt, dest);
}
try {
dest.validateSubmitApplication(appId, user, destQueueName);
} catch (AccessControlException e) {
throw new YarnException(e);
}
} finally {
writeLock.unlock();
}
}
/**
* Check application can be moved to queue with labels enabled. All labels in
* application life time will be checked
*
* @param app
* @param dest
* @throws YarnException
*/
private void checkQueuePartition(FiCaSchedulerApp app, AbstractLeafQueue dest)
throws YarnException {
if (!YarnConfiguration.areNodeLabelsEnabled(conf)) {
return;
}
Set<String> targetqueuelabels = dest.getAccessibleNodeLabels();
AppSchedulingInfo schedulingInfo = app.getAppSchedulingInfo();
Set<String> appLabelexpressions = schedulingInfo.getRequestedPartitions();
// default partition access always available remove empty label
appLabelexpressions.remove(RMNodeLabelsManager.NO_LABEL);
Set<String> nonAccessiblelabels = new HashSet<String>();
for (String label : appLabelexpressions) {
if (!SchedulerUtils.checkQueueLabelExpression(targetqueuelabels, label,
null)) {
nonAccessiblelabels.add(label);
}
}
if (nonAccessiblelabels.size() > 0) {
throw new YarnException(
"Specified queue=" + dest.getQueuePath() + " can't satisfy following "
+ "apps label expressions =" + nonAccessiblelabels
+ " accessible node labels =" + targetqueuelabels);
}
}
/** {@inheritDoc} */
@Override
public EnumSet<SchedulerResourceTypes> getSchedulingResourceTypes() {
if (calculator.getClass().getName()
.equals(DefaultResourceCalculator.class.getName())) {
return EnumSet.of(SchedulerResourceTypes.MEMORY);
}
return EnumSet.of(SchedulerResourceTypes.MEMORY, SchedulerResourceTypes.CPU);
}
@Override
public Resource getMaximumResourceCapability(String queueName) {
if(queueName == null || queueName.isEmpty()) {
return getMaximumResourceCapability();
}
CSQueue queue = getQueue(queueName);
if (queue == null) {
if (isAmbiguous(queueName)) {
LOG.error("Ambiguous queue reference: " + queueName
+ " please use full queue path instead.");
} else {
LOG.error("Unknown queue: " + queueName);
}
return getMaximumResourceCapability();
}
if (!(queue instanceof AbstractLeafQueue)) {
LOG.error("queue " + queueName + " is not an leaf queue");
return getMaximumResourceCapability();
}
// queue.getMaxAllocation returns *configured* maximum allocation.
// getMaximumResourceCapability() returns maximum allocation considers
// per-node maximum resources. So return (component-wise) min of the two.
Resource queueMaxAllocation = queue.getMaximumAllocation();
Resource clusterMaxAllocationConsiderNodeMax =
getMaximumResourceCapability();
return Resources.componentwiseMin(queueMaxAllocation,
clusterMaxAllocationConsiderNodeMax);
}
private String handleMoveToPlanQueue(String targetQueueName) {
CSQueue dest = getQueue(targetQueueName);
if (dest != null && dest instanceof PlanQueue) {
// use the default child reservation queue of the plan
targetQueueName = targetQueueName + ReservationConstants.DEFAULT_QUEUE_SUFFIX;
}
return targetQueueName;
}
@Override
public Set<String> getPlanQueues() {
Set<String> ret = new HashSet<String>();
for (Map.Entry<String, CSQueue> l : queueManager.getQueues().entrySet()) {
if (l.getValue() instanceof PlanQueue) {
ret.add(l.getKey());
}
}
return ret;
}
@Override
public Priority checkAndGetApplicationPriority(
Priority priorityRequestedByApp, UserGroupInformation user,
String queuePath, ApplicationId applicationId) throws YarnException {
readLock.lock();
try {
Priority appPriority = priorityRequestedByApp;
// Verify the scenario where priority is null from submissionContext.
if (null == appPriority) {
// Verify whether submitted user has any default priority set. If so,
// user's default priority will get precedence over queue default.
// for updateApplicationPriority call flow, this check is done in
// CientRMService itself.
appPriority = this.appPriorityACLManager.getDefaultPriority(
normalizeQueueName(queuePath),
user);
// Get the default priority for the Queue. If Queue is non-existent,
// then
// use default priority. Do it only if user doesn't have any default.
if (null == appPriority) {
appPriority = this.queueManager.getDefaultPriorityForQueue(
normalizeQueueName(queuePath));
}
LOG.info(
"Application '" + applicationId + "' is submitted without priority "
+ "hence considering default queue/cluster priority: "
+ appPriority.getPriority());
}
// Verify whether submitted priority is lesser than max priority
// in the cluster. If it is out of found, defining a max cap.
if (appPriority.getPriority() > getMaxClusterLevelAppPriority()
.getPriority()) {
appPriority = Priority
.newInstance(getMaxClusterLevelAppPriority().getPriority());
}
// Lets check for ACLs here.
if (!appPriorityACLManager.checkAccess(user, normalizeQueueName(queuePath), appPriority)) {
throw new YarnException(new AccessControlException(
"User " + user + " does not have permission to submit/update "
+ applicationId + " for " + appPriority));
}
LOG.info("Priority '" + appPriority.getPriority()
+ "' is acceptable in queue : " + queuePath + " for application: "
+ applicationId);
return appPriority;
} finally {
readLock.unlock();
}
}
@Override
public Priority updateApplicationPriority(Priority newPriority,
ApplicationId applicationId, SettableFuture<Object> future,
UserGroupInformation user)
throws YarnException {
writeLock.lock();
try {
Priority appPriority = null;
SchedulerApplication<FiCaSchedulerApp> application = applications
.get(applicationId);
if (application == null) {
throw new YarnException("Application '" + applicationId
+ "' is not present, hence could not change priority.");
}
RMApp rmApp = rmContext.getRMApps().get(applicationId);
appPriority = checkAndGetApplicationPriority(newPriority, user,
rmApp.getQueue(), applicationId);
if (application.getPriority().equals(appPriority)) {
future.set(null);
return appPriority;
}
// Update new priority in Submission Context to update to StateStore.
rmApp.getApplicationSubmissionContext().setPriority(appPriority);
// Update to state store
ApplicationStateData appState = ApplicationStateData.newInstance(
rmApp.getSubmitTime(), rmApp.getStartTime(),
rmApp.getApplicationSubmissionContext(), rmApp.getUser(),
rmApp.getRealUser(), rmApp.getCallerContext());
appState.setApplicationTimeouts(rmApp.getApplicationTimeouts());
appState.setLaunchTime(rmApp.getLaunchTime());
rmContext.getStateStore().updateApplicationStateSynchronously(appState,
false, future);
// As we use iterator over a TreeSet for OrderingPolicy, once we change
// priority then reinsert back to make order correct.
AbstractLeafQueue queue = (AbstractLeafQueue) getQueue(rmApp.getQueue());
queue.updateApplicationPriority(application, appPriority);
LOG.info("Priority '" + appPriority + "' is updated in queue :"
+ rmApp.getQueue() + " for application: " + applicationId
+ " for the user: " + rmApp.getUser());
return appPriority;
} finally {
writeLock.unlock();
}
}
@Override
public PreemptionManager getPreemptionManager() {
return preemptionManager;
}
@Override
public ResourceUsage getClusterResourceUsage() {
return getRootQueue().getQueueResourceUsage();
}
private SchedulerContainer<FiCaSchedulerApp, FiCaSchedulerNode> getSchedulerContainer(
RMContainer rmContainer, boolean allocated) {
if (null == rmContainer) {
return null;
}
FiCaSchedulerApp app = getApplicationAttempt(
rmContainer.getApplicationAttemptId());
if (null == app) { return null; }
NodeId nodeId;
// Get nodeId
if (rmContainer.getState() == RMContainerState.RESERVED) {
nodeId = rmContainer.getReservedNode();
} else {
nodeId = rmContainer.getNodeId();
}
FiCaSchedulerNode node = getNode(nodeId);
if (null == node) {
return null;
}
return new SchedulerContainer<>(app, node, rmContainer,
// TODO, node partition should come from CSAssignment to avoid partition
// get updated before submitting the commit
node.getPartition(), allocated);
}
private List<SchedulerContainer<FiCaSchedulerApp, FiCaSchedulerNode>>
getSchedulerContainersToRelease(
CSAssignment csAssignment) {
List<SchedulerContainer<FiCaSchedulerApp, FiCaSchedulerNode>> list = null;
if (csAssignment.getContainersToKill() != null && !csAssignment
.getContainersToKill().isEmpty()) {
list = new ArrayList<>();
for (RMContainer rmContainer : csAssignment.getContainersToKill()) {
SchedulerContainer schedulerContainer =
getSchedulerContainer(rmContainer, false);
if (schedulerContainer != null) {
list.add(schedulerContainer);
}
}
}
if (csAssignment.getExcessReservation() != null) {
if (null == list) {
list = new ArrayList<>();
}
SchedulerContainer schedulerContainer =
getSchedulerContainer(csAssignment.getExcessReservation(), false);
if (schedulerContainer != null) {
list.add(schedulerContainer);
}
}
if (list != null && list.isEmpty()) {
list = null;
}
return list;
}
@VisibleForTesting
public void submitResourceCommitRequest(Resource cluster,
CSAssignment csAssignment) {
ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode> request =
createResourceCommitRequest(csAssignment);
if (null == request) {
return;
}
if (asyncSchedulingConf.isScheduleAsynchronously()) {
// Submit to a commit thread and commit it async-ly
asyncSchedulingConf.resourceCommitterService.addNewCommitRequest(request);
} else{
// Otherwise do it sync-ly.
tryCommit(cluster, request, true);
}
}
@Override
public boolean attemptAllocationOnNode(SchedulerApplicationAttempt appAttempt,
SchedulingRequest schedulingRequest, SchedulerNode schedulerNode) {
if (schedulingRequest.getResourceSizing() != null) {
if (schedulingRequest.getResourceSizing().getNumAllocations() > 1) {
LOG.warn("The SchedulingRequest has requested more than 1 allocation," +
" but only 1 will be attempted !!");
}
if (!appAttempt.isStopped()) {
ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode>
resourceCommitRequest = createResourceCommitRequest(
appAttempt, schedulingRequest, schedulerNode);
// Validate placement constraint is satisfied before
// committing the request.
try {
if (!PlacementConstraintsUtil.canSatisfyConstraints(
appAttempt.getApplicationId(),
schedulingRequest, schedulerNode,
rmContext.getPlacementConstraintManager(),
rmContext.getAllocationTagsManager())) {
LOG.info("Failed to allocate container for application "
+ appAttempt.getApplicationId() + " on node "
+ schedulerNode.getNodeName()
+ " because this allocation violates the"
+ " placement constraint.");
return false;
}
} catch (InvalidAllocationTagsQueryException e) {
LOG.warn("Unable to allocate container", e);
return false;
}
return tryCommit(getClusterResource(), resourceCommitRequest, false);
}
}
return false;
}
// This assumes numContainers = 1 for the request.
private ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode>
createResourceCommitRequest(SchedulerApplicationAttempt appAttempt,
SchedulingRequest schedulingRequest, SchedulerNode schedulerNode) {
ContainerAllocationProposal<FiCaSchedulerApp, FiCaSchedulerNode> allocated =
null;
Resource resource = schedulingRequest.getResourceSizing().getResources();
if (Resources.greaterThan(calculator, getClusterResource(),
resource, Resources.none())) {
ContainerId cId =
ContainerId.newContainerId(appAttempt.getApplicationAttemptId(),
appAttempt.getAppSchedulingInfo().getNewContainerId());
Container container = BuilderUtils.newContainer(
cId, schedulerNode.getNodeID(), schedulerNode.getHttpAddress(),
resource, schedulingRequest.getPriority(), null,
ExecutionType.GUARANTEED,
schedulingRequest.getAllocationRequestId());
RMContainer rmContainer = new RMContainerImpl(container,
SchedulerRequestKey.extractFrom(container),
appAttempt.getApplicationAttemptId(), container.getNodeId(),
appAttempt.getUser(), rmContext, false);
((RMContainerImpl)rmContainer).setAllocationTags(
new HashSet<>(schedulingRequest.getAllocationTags()));
SchedulerContainer<FiCaSchedulerApp, FiCaSchedulerNode>
schedulerContainer = getSchedulerContainer(rmContainer, true);
if (schedulerContainer == null) {
allocated = null;
} else {
allocated = new ContainerAllocationProposal<>(schedulerContainer,
null, null, NodeType.NODE_LOCAL, NodeType.NODE_LOCAL,
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY, resource);
}
}
if (null != allocated) {
List<ContainerAllocationProposal<FiCaSchedulerApp, FiCaSchedulerNode>>
allocationsList = new ArrayList<>();
allocationsList.add(allocated);
return new ResourceCommitRequest<>(allocationsList, null, null);
}
return null;
}
@VisibleForTesting
public ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode>
createResourceCommitRequest(CSAssignment csAssignment) {
ContainerAllocationProposal<FiCaSchedulerApp, FiCaSchedulerNode> allocated =
null;
ContainerAllocationProposal<FiCaSchedulerApp, FiCaSchedulerNode> reserved =
null;
List<SchedulerContainer<FiCaSchedulerApp, FiCaSchedulerNode>> released =
null;
if (Resources.greaterThan(calculator, getClusterResource(),
csAssignment.getResource(), Resources.none())) {
// Allocated something
List<AssignmentInformation.AssignmentDetails> allocations =
csAssignment.getAssignmentInformation().getAllocationDetails();
if (!allocations.isEmpty()) {
RMContainer rmContainer = allocations.get(0).rmContainer;
SchedulerContainer<FiCaSchedulerApp, FiCaSchedulerNode>
schedulerContainer = getSchedulerContainer(rmContainer, true);
if (schedulerContainer == null) {
allocated = null;
// Decrease unconfirmed resource if app is alive
FiCaSchedulerApp app = getApplicationAttempt(
rmContainer.getApplicationAttemptId());
if (app != null) {
app.decUnconfirmedRes(rmContainer.getAllocatedResource());
}
} else {
allocated = new ContainerAllocationProposal<>(schedulerContainer,
getSchedulerContainersToRelease(csAssignment),
getSchedulerContainer(
csAssignment.getFulfilledReservedContainer(), false),
csAssignment.getType(), csAssignment.getRequestLocalityType(),
csAssignment.getSchedulingMode() != null ?
csAssignment.getSchedulingMode() :
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY,
csAssignment.getResource());
}
}
// Reserved something
List<AssignmentInformation.AssignmentDetails> reservation =
csAssignment.getAssignmentInformation().getReservationDetails();
if (!reservation.isEmpty()) {
RMContainer rmContainer = reservation.get(0).rmContainer;
SchedulerContainer<FiCaSchedulerApp, FiCaSchedulerNode>
schedulerContainer = getSchedulerContainer(rmContainer, false);
if (schedulerContainer == null) {
reserved = null;
} else {
reserved = new ContainerAllocationProposal<>(schedulerContainer,
getSchedulerContainersToRelease(csAssignment),
getSchedulerContainer(
csAssignment.getFulfilledReservedContainer(), false),
csAssignment.getType(), csAssignment.getRequestLocalityType(),
csAssignment.getSchedulingMode() != null ?
csAssignment.getSchedulingMode() :
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY,
csAssignment.getResource());
}
}
}
// When we don't need to allocate/reserve anything, we can feel free to
// kill all to-release containers in the request.
if (null == allocated && null == reserved) {
released = getSchedulerContainersToRelease(csAssignment);
}
if (null != allocated || null != reserved || (null != released && !released
.isEmpty())) {
List<ContainerAllocationProposal<FiCaSchedulerApp, FiCaSchedulerNode>>
allocationsList = null;
if (allocated != null) {
allocationsList = new ArrayList<>();
allocationsList.add(allocated);
}
List<ContainerAllocationProposal<FiCaSchedulerApp, FiCaSchedulerNode>>
reservationsList = null;
if (reserved != null) {
reservationsList = new ArrayList<>();
reservationsList.add(reserved);
}
return new ResourceCommitRequest<>(allocationsList, reservationsList,
released);
}
return null;
}
@Override
public boolean tryCommit(Resource cluster, ResourceCommitRequest r,
boolean updatePending) {
long commitStart = System.nanoTime();
ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode> request =
(ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode>) r;
ApplicationAttemptId attemptId = null;
// We need to update unconfirmed allocated resource of application when
// any container allocated.
boolean updateUnconfirmedAllocatedResource =
request.getContainersToAllocate() != null && !request
.getContainersToAllocate().isEmpty();
// find the application to accept and apply the ResourceCommitRequest
if (request.anythingAllocatedOrReserved()) {
ContainerAllocationProposal<FiCaSchedulerApp, FiCaSchedulerNode> c =
request.getFirstAllocatedOrReservedContainer();
attemptId =
c.getAllocatedOrReservedContainer().getSchedulerApplicationAttempt()
.getApplicationAttemptId();
} else {
if (!request.getContainersToRelease().isEmpty()) {
attemptId = request.getContainersToRelease().get(0)
.getSchedulerApplicationAttempt().getApplicationAttemptId();
}
}
LOG.debug("Try to commit allocation proposal={}", request);
boolean isSuccess = false;
if (attemptId != null) {
FiCaSchedulerApp app = getApplicationAttempt(attemptId);
// Required sanity check for attemptId - when async-scheduling enabled,
// proposal might be outdated if AM failover just finished
// and proposal queue was not be consumed in time
if (app != null && attemptId.equals(app.getApplicationAttemptId())) {
if (app.accept(cluster, request, updatePending)
&& app.apply(cluster, request, updatePending)) {
long commitSuccess = System.nanoTime() - commitStart;
CapacitySchedulerMetrics.getMetrics()
.addCommitSuccess(commitSuccess);
isSuccess = true;
} else{
long commitFailed = System.nanoTime() - commitStart;
CapacitySchedulerMetrics.getMetrics()
.addCommitFailure(commitFailed);
}
LOG.debug("Allocation proposal accepted={}, proposal={}", isSuccess,
request);
// Update unconfirmed allocated resource.
if (updateUnconfirmedAllocatedResource) {
app.decUnconfirmedRes(request.getTotalAllocatedResource());
}
}
}
return isSuccess;
}
public int getAsyncSchedulingPendingBacklogs() {
return asyncSchedulingConf.getPendingBacklogs();
}
@Override
public CapacitySchedulerQueueManager getCapacitySchedulerQueueManager() {
return this.queueManager;
}
public WorkflowPriorityMappingsManager getWorkflowPriorityMappingsManager() {
return this.workflowPriorityMappingsMgr;
}
/**
* Try to move a reserved container to a targetNode.
* If the targetNode is reserved by another application (other than this one).
* The previous reservation will be cancelled.
*
* @param toBeMovedContainer reserved container will be moved
* @param targetNode targetNode
* @return true if move succeeded. Return false if the targetNode is reserved by
* a different container or move failed because of any other reasons.
*/
public boolean moveReservedContainer(RMContainer toBeMovedContainer,
FiCaSchedulerNode targetNode) {
writeLock.lock();
try {
LOG.debug("Trying to move container={} to node={}",
toBeMovedContainer, targetNode.getNodeID());
FiCaSchedulerNode sourceNode = getNode(toBeMovedContainer.getNodeId());
if (null == sourceNode) {
LOG.debug("Failed to move reservation, cannot find source node={}",
toBeMovedContainer.getNodeId());
return false;
}
// Target node updated?
if (getNode(targetNode.getNodeID()) != targetNode) {
LOG.debug("Failed to move reservation, node updated or removed,"
+ " moving cancelled.");
return false;
}
// Target node's reservation status changed?
if (targetNode.getReservedContainer() != null) {
LOG.debug("Target node's reservation status changed,"
+ " moving cancelled.");
return false;
}
FiCaSchedulerApp app = getApplicationAttempt(
toBeMovedContainer.getApplicationAttemptId());
if (null == app) {
LOG.debug("Cannot find to-be-moved container's application={}",
toBeMovedContainer.getApplicationAttemptId());
return false;
}
// finally, move the reserved container
return app.moveReservation(toBeMovedContainer, sourceNode, targetNode);
} finally {
writeLock.unlock();
}
}
@Override
public long checkAndGetApplicationLifetime(String queueName,
long lifetimeRequestedByApp, RMAppImpl app) {
CSQueue queue;
writeLock.lock();
try {
queue = getQueue(queueName);
// This handles the case where the first submitted app in aqc queue
// does not exist, addressing the issue related to YARN-11708.
if (queue == null) {
queue = getOrCreateQueueFromPlacementContext(app.getApplicationId(),
app.getUser(), app.getQueue(), app.getApplicationPlacementContext(), false);
}
if (queue == null) {
String message = "Application " + app.getApplicationId()
+ " submitted by user " + app.getUser();
if (isAmbiguous(queueName)) {
message = message + " to ambiguous queue: " + queueName
+ " please use full queue path instead.";
} else {
message = message + "Application " + app.getApplicationId() +
" submitted by user " + app.getUser() + " to unknown queue: " + queueName;
}
this.rmContext.getDispatcher().getEventHandler().handle(
new RMAppEvent(app.getApplicationId(), RMAppEventType.APP_REJECTED,
message));
return lifetimeRequestedByApp;
}
if (!(queue instanceof AbstractLeafQueue)) {
return lifetimeRequestedByApp;
}
} finally {
writeLock.unlock();
}
readLock.lock();
try {
long defaultApplicationLifetime =
queue.getDefaultApplicationLifetime();
long maximumApplicationLifetime =
queue.getMaximumApplicationLifetime();
// check only for maximum, that's enough because default can't
// exceed maximum
if (maximumApplicationLifetime <= 0) {
return (lifetimeRequestedByApp <= 0) ? defaultApplicationLifetime :
lifetimeRequestedByApp;
}
if (lifetimeRequestedByApp <= 0) {
return defaultApplicationLifetime;
} else if (lifetimeRequestedByApp > maximumApplicationLifetime) {
return maximumApplicationLifetime;
}
return lifetimeRequestedByApp;
} finally {
readLock.unlock();
}
}
@Override
public long getMaximumApplicationLifetime(String queueName) {
CSQueue queue = getQueue(queueName);
if (!(queue instanceof AbstractLeafQueue)) {
if (isAmbiguous(queueName)) {
LOG.error("Ambiguous queue reference: " + queueName
+ " please use full queue path instead.");
} else {
LOG.error("Unknown queue: " + queueName);
}
return -1;
}
// In seconds
return queue.getMaximumApplicationLifetime();
}
@Override
public boolean isConfigurationMutable() {
return csConfProvider instanceof MutableConfigurationProvider;
}
@Override
public MutableConfigurationProvider getMutableConfProvider() {
if (isConfigurationMutable()) {
return (MutableConfigurationProvider) csConfProvider;
}
return null;
}
public CSConfigurationProvider getCsConfProvider() {
return csConfProvider;
}
@Override
public void resetSchedulerMetrics() {
CapacitySchedulerMetrics.destroy();
}
public boolean isMultiNodePlacementEnabled() {
return multiNodePlacementEnabled;
}
public int getNumAsyncSchedulerThreads() {
return asyncSchedulingConf.getNumAsyncSchedulerThreads();
}
@VisibleForTesting
public void setMaxRunningAppsEnforcer(CSMaxRunningAppsEnforcer enforcer) {
this.maxRunningEnforcer = enforcer;
}
/**
* Returning true as capacity scheduler supports placement constraints.
*/
@Override
public boolean placementConstraintEnabled() {
return true;
}
@VisibleForTesting
public void setQueueManager(CapacitySchedulerQueueManager qm) {
this.queueManager = qm;
}
@VisibleForTesting
public List<AsyncScheduleThread> getAsyncSchedulerThreads() {
return asyncSchedulingConf.getAsyncSchedulerThreads();
}
static | ResourceCommitterService |
java | apache__camel | components/camel-salesforce/camel-salesforce-component/src/main/java/org/apache/camel/component/salesforce/internal/dto/LoginError.java | {
"start": 968,
"end": 1470
} | class ____ {
private String error;
private String errorDescription;
public String getError() {
return error;
}
public void setError(String error) {
this.error = error;
}
@JsonProperty("error_description")
public String getErrorDescription() {
return errorDescription;
}
@JsonProperty("error_description")
public void setErrorDescription(String errorDescription) {
this.errorDescription = errorDescription;
}
}
| LoginError |
java | micronaut-projects__micronaut-core | inject/src/main/java/io/micronaut/context/AbstractInitializableBeanDefinition.java | {
"start": 116299,
"end": 116522
} | class ____ {
public final Argument argument;
public AnnotationReference(Argument argument) {
this.argument = ExpressionsAwareArgument.wrapIfNecessary(argument);
}
}
}
| AnnotationReference |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/jobgraph/tasks/AbstractInvokable.java | {
"start": 2429,
"end": 4059
} | class ____
implements TaskInvokable, CheckpointableTask, CoordinatedTask {
/** The environment assigned to this invokable. */
private final Environment environment;
/**
* Create an Invokable task and set its environment.
*
* @param environment The environment assigned to this invokable.
*/
public AbstractInvokable(Environment environment) {
this.environment = checkNotNull(environment);
}
// ------------------------------------------------------------------------
// Core methods
// ------------------------------------------------------------------------
@Override
public void cancel() throws Exception {
// The default implementation does nothing.
}
@Override
public void cleanUp(@Nullable Throwable throwable) throws Exception {}
@Override
public void maybeInterruptOnCancel(
Thread toInterrupt, @Nullable String taskName, @Nullable Long timeout) {
if (taskName != null && timeout != null) {
Task.logTaskThreadStackTrace(toInterrupt, taskName, timeout, "interrupting");
}
toInterrupt.interrupt();
}
// ------------------------------------------------------------------------
// Access to Environment and Configuration
// ------------------------------------------------------------------------
/**
* Returns the environment of this task.
*
* @return The environment of this task.
*/
public final Environment getEnvironment() {
return this.environment;
}
/**
* Returns the user code | AbstractInvokable |
java | netty__netty | codec-http3/src/test/java/io/netty/handler/codec/http3/Http3RequestStreamValidationHandlerTest.java | {
"start": 2515,
"end": 24391
} | class ____ extends Http3FrameTypeValidationHandlerTest {
private final QpackDecoder decoder;
public Http3RequestStreamValidationHandlerTest() {
super(true, true);
decoder = new QpackDecoder(0, 0);
}
@Override
protected ChannelHandler newHandler(boolean server) {
return new ChannelInitializer<QuicStreamChannel>() {
@Override
protected void initChannel(QuicStreamChannel ch) {
Http3RequestStreamEncodeStateValidator encStateValidator = new Http3RequestStreamEncodeStateValidator();
Http3RequestStreamDecodeStateValidator decStateValidator = new Http3RequestStreamDecodeStateValidator();
ch.pipeline().addLast(encStateValidator);
ch.pipeline().addLast(decStateValidator);
ch.pipeline().addLast(newServerValidator(qpackAttributes, decoder, encStateValidator,
decStateValidator));
}
};
}
@Override
protected List<Http3RequestStreamFrame> newValidFrames() {
return Arrays.asList(new DefaultHttp3HeadersFrame(), new DefaultHttp3DataFrame(Unpooled.directBuffer()),
new DefaultHttp3UnknownFrame(Http3CodecUtils.MAX_RESERVED_FRAME_TYPE, Unpooled.buffer()));
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testInvalidFrameSequenceStartInbound(boolean server) throws Exception {
setUp(server);
final EmbeddedQuicStreamChannel channel = newStream(QuicStreamType.BIDIRECTIONAL, newHandler(server));
Http3DataFrame dataFrame = new DefaultHttp3DataFrame(Unpooled.buffer());
Exception e = assertThrows(Exception.class, () -> channel.writeInbound(dataFrame));
assertException(H3_FRAME_UNEXPECTED, e);
verifyClose(H3_FRAME_UNEXPECTED, parent);
assertEquals(0, dataFrame.refCnt());
assertFalse(channel.finish());
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testInvalidFrameSequenceEndInbound(boolean server) throws Exception {
setUp(server);
final EmbeddedQuicStreamChannel channel = newStream(QuicStreamType.BIDIRECTIONAL, newHandler(server));
Http3HeadersFrame headersFrame = new DefaultHttp3HeadersFrame();
Http3DataFrame dataFrame = new DefaultHttp3DataFrame(Unpooled.buffer());
Http3DataFrame dataFrame2 = new DefaultHttp3DataFrame(Unpooled.buffer());
Http3DataFrame dataFrame3 = new DefaultHttp3DataFrame(Unpooled.buffer());
Http3HeadersFrame trailersFrame = new DefaultHttp3HeadersFrame();
assertTrue(channel.writeInbound(headersFrame));
assertTrue(channel.writeInbound(dataFrame.retainedDuplicate()));
assertTrue(channel.writeInbound(dataFrame2.retainedDuplicate()));
assertTrue(channel.writeInbound(trailersFrame));
Exception e = assertThrows(Exception.class, () -> channel.writeInbound(dataFrame3));
assertException(H3_FRAME_UNEXPECTED, e);
verifyClose(H3_FRAME_UNEXPECTED, parent);
assertTrue(channel.finish());
assertEquals(0, dataFrame3.refCnt());
assertFrameEquals(headersFrame, channel.readInbound());
assertFrameEquals(dataFrame, channel.readInbound());
assertFrameEquals(dataFrame2, channel.readInbound());
assertFrameEquals(trailersFrame, channel.readInbound());
assertNull(channel.readInbound());
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testInvalidFrameSequenceStartOutbound(boolean server) throws Exception {
setUp(server);
EmbeddedQuicStreamChannel channel = newStream(QuicStreamType.BIDIRECTIONAL, newHandler(server));
Http3DataFrame dataFrame = new DefaultHttp3DataFrame(Unpooled.buffer());
Exception e = assertThrows(Exception.class, () -> channel.writeOutbound(dataFrame));
assertException(H3_FRAME_UNEXPECTED, e);
assertFalse(channel.finish());
assertEquals(0, dataFrame.refCnt());
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testInvalidFrameSequenceEndOutbound(boolean server) throws Exception {
setUp(server);
EmbeddedQuicStreamChannel channel = newStream(QuicStreamType.BIDIRECTIONAL, newHandler(server));
Http3HeadersFrame headersFrame = new DefaultHttp3HeadersFrame();
Http3DataFrame dataFrame = new DefaultHttp3DataFrame(Unpooled.buffer());
Http3DataFrame dataFrame2 = new DefaultHttp3DataFrame(Unpooled.buffer());
Http3DataFrame dat3Frame3 = new DefaultHttp3DataFrame(Unpooled.buffer());
Http3HeadersFrame trailersFrame = new DefaultHttp3HeadersFrame();
assertTrue(channel.writeOutbound(headersFrame));
assertTrue(channel.writeOutbound(dataFrame.retainedDuplicate()));
assertTrue(channel.writeOutbound(dataFrame2.retainedDuplicate()));
assertTrue(channel.writeOutbound(trailersFrame));
Exception e = assertThrows(Exception.class, () -> channel.writeOutbound(dat3Frame3));
assertException(H3_FRAME_UNEXPECTED, e);
assertTrue(channel.finish());
assertEquals(0, dat3Frame3.refCnt());
assertFrameEquals(headersFrame, channel.readOutbound());
assertFrameEquals(dataFrame, channel.readOutbound());
assertFrameEquals(dataFrame2, channel.readOutbound());
assertFrameEquals(trailersFrame, channel.readOutbound());
assertNull(channel.readOutbound());
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testGoawayReceivedBeforeWritingHeaders(boolean server) throws Exception {
setUp(server);
EmbeddedQuicStreamChannel channel = newClientStream(() -> true);
Http3HeadersFrame headersFrame = new DefaultHttp3HeadersFrame();
Exception e = assertThrows(Exception.class, () -> channel.writeOutbound(headersFrame));
assertException(H3_FRAME_UNEXPECTED, e);
// We should have closed the channel.
assertFalse(channel.isActive());
assertFalse(channel.finish());
assertNull(channel.readOutbound());
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testGoawayReceivedAfterWritingHeaders(boolean server) throws Exception {
setUp(server);
AtomicBoolean goAway = new AtomicBoolean();
EmbeddedQuicStreamChannel channel = newClientStream(goAway::get);
Http3HeadersFrame headersFrame = new DefaultHttp3HeadersFrame();
Http3DataFrame dataFrame = new DefaultHttp3DataFrame(Unpooled.buffer());
assertTrue(channel.writeOutbound(headersFrame));
goAway.set(true);
assertTrue(channel.writeOutbound(dataFrame.retainedDuplicate()));
assertTrue(channel.finish());
assertFrameEquals(headersFrame, channel.readOutbound());
assertFrameEquals(dataFrame, channel.readOutbound());
assertNull(channel.readOutbound());
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testClientHeadRequestWithContentLength(boolean server) throws Exception {
setUp(server);
EmbeddedQuicStreamChannel channel = newClientStream(() -> false);
Http3HeadersFrame headersFrame = new DefaultHttp3HeadersFrame();
headersFrame.headers().method(HttpMethod.HEAD.asciiName());
assertTrue(channel.writeOutbound(headersFrame));
Http3HeadersFrame responseHeadersFrame = new DefaultHttp3HeadersFrame();
responseHeadersFrame.headers().setLong(HttpHeaderNames.CONTENT_LENGTH, 10);
assertTrue(channel.writeInbound(responseHeadersFrame));
channel.pipeline().fireUserEventTriggered(ChannelInputShutdownReadComplete.INSTANCE);
assertTrue(channel.finishAndReleaseAll());
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testClientNonHeadRequestWithContentLengthNoData(boolean server) throws Exception {
setUp(server);
testClientNonHeadRequestWithContentLength(true, false);
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testClientNonHeadRequestWithContentLengthNoDataAndTrailers(boolean server) throws Exception {
setUp(server);
testClientNonHeadRequestWithContentLength(true, true);
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testClientNonHeadRequestWithContentLengthNotEnoughData(boolean server) throws Exception {
setUp(server);
testClientNonHeadRequestWithContentLength(false, false);
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testClientNonHeadRequestWithContentLengthNotEnoughDataAndTrailer(boolean server) throws Exception {
setUp(server);
testClientNonHeadRequestWithContentLength(false, true);
}
private void testClientNonHeadRequestWithContentLength(boolean noData, boolean trailers) throws Exception {
EmbeddedQuicStreamChannel channel = newClientStream(() -> false);
Http3HeadersFrame headersFrame = new DefaultHttp3HeadersFrame();
headersFrame.headers().method(HttpMethod.GET.asciiName());
assertTrue(channel.writeOutbound(headersFrame));
Http3HeadersFrame responseHeadersFrame = new DefaultHttp3HeadersFrame();
responseHeadersFrame.headers().setLong(HttpHeaderNames.CONTENT_LENGTH, 10);
assertTrue(channel.writeInbound(responseHeadersFrame));
if (!noData) {
assertTrue(channel.writeInbound(new DefaultHttp3DataFrame(Unpooled.buffer().writeZero(9))));
}
try {
if (trailers) {
channel.writeInbound(new DefaultHttp3HeadersFrame());
} else {
channel.pipeline().fireUserEventTriggered(ChannelInputShutdownReadComplete.INSTANCE);
channel.checkException();
}
} catch (Exception e) {
assertException(Http3ErrorCode.H3_MESSAGE_ERROR, e);
}
assertTrue(channel.finishAndReleaseAll());
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testServerWithContentLengthNoData(boolean server) throws Exception {
setUp(server);
testServerWithContentLength(true, false);
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testServerWithContentLengthNoDataAndTrailers(boolean server) throws Exception {
setUp(server);
testServerWithContentLength(true, true);
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testServerWithContentLengthNotEnoughData(boolean server) throws Exception {
setUp(server);
testServerWithContentLength(false, false);
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testServerWithContentLengthNotEnoughDataAndTrailer(boolean server) throws Exception {
setUp(server);
testServerWithContentLength(false, true);
}
private void testServerWithContentLength(boolean noData, boolean trailers) throws Exception {
EmbeddedQuicStreamChannel channel = newServerStream();
Http3HeadersFrame headersFrame = new DefaultHttp3HeadersFrame();
headersFrame.headers().setLong(HttpHeaderNames.CONTENT_LENGTH, 10);
headersFrame.headers().method(HttpMethod.POST.asciiName());
assertTrue(channel.writeInbound(headersFrame));
if (!noData) {
assertTrue(channel.writeInbound(new DefaultHttp3DataFrame(Unpooled.buffer().writeZero(9))));
}
try {
if (trailers) {
channel.writeInbound(new DefaultHttp3HeadersFrame());
} else {
channel.pipeline().fireUserEventTriggered(ChannelInputShutdownReadComplete.INSTANCE);
channel.checkException();
}
} catch (Exception e) {
assertException(Http3ErrorCode.H3_MESSAGE_ERROR, e);
}
assertTrue(channel.finishAndReleaseAll());
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testHttp3HeadersFrameWithConnectionHeader(boolean server) throws Exception {
setUp(server);
Http3HeadersFrame headersFrame = new DefaultHttp3HeadersFrame();
headersFrame.headers().add(HttpHeaderNames.CONNECTION, "something");
testHeadersFrame(headersFrame, Http3ErrorCode.H3_MESSAGE_ERROR);
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testHttp3HeadersFrameWithTeHeaderAndInvalidValue(boolean server) throws Exception {
setUp(server);
Http3HeadersFrame headersFrame = new DefaultHttp3HeadersFrame();
headersFrame.headers().add(HttpHeaderNames.TE, "something");
testHeadersFrame(headersFrame, Http3ErrorCode.H3_MESSAGE_ERROR);
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testHttp3HeadersFrameWithTeHeaderAndValidValue(boolean server) throws Exception {
setUp(server);
Http3HeadersFrame headersFrame = new DefaultHttp3HeadersFrame();
headersFrame.headers().add(HttpHeaderNames.TE, HttpHeaderValues.TRAILERS);
testHeadersFrame(headersFrame, null);
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testInformationalResponseAfterActualResponseServer(boolean server) throws Exception {
setUp(server);
testInformationalResponse(true, true, newResponse(OK), newResponse(CONTINUE));
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testInformationalResponseAfterActualResponseClient(boolean server) throws Exception {
setUp(server);
testInformationalResponse(false, true, newResponse(OK), newResponse(CONTINUE));
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testMultiInformationalResponseServer(boolean server) throws Exception {
setUp(server);
testInformationalResponse(true, false, newResponse(CONTINUE), newResponse(CONTINUE), newResponse(OK));
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testMultiInformationalResponseClient(boolean server) throws Exception {
setUp(server);
testInformationalResponse(false, false, newResponse(CONTINUE), newResponse(CONTINUE), newResponse(OK));
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testMultiInformationalResponseAfterActualResponseServer(boolean server) throws Exception {
setUp(server);
testInformationalResponse(true, false, newResponse(CONTINUE), newResponse(CONTINUE), newResponse(OK));
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testMultiInformationalResponseAfterActualResponseClient(boolean server) throws Exception {
setUp(server);
testInformationalResponse(false, false, newResponse(CONTINUE), newResponse(CONTINUE), newResponse(OK));
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testInformationalResponseWithDataAndTrailersServer(boolean server) throws Exception {
setUp(server);
testInformationalResponse(true, false, newResponse(CONTINUE), newResponse(OK),
new DefaultHttp3DataFrame(Unpooled.buffer()),
new DefaultHttp3HeadersFrame());
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testInformationalResponseWithDataAndTrailersClient(boolean server) throws Exception {
setUp(server);
testInformationalResponse(false, false, newResponse(CONTINUE), newResponse(OK),
new DefaultHttp3DataFrame(Unpooled.buffer()),
new DefaultHttp3HeadersFrame());
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testInformationalResponseWithDataServer(boolean server) throws Exception {
setUp(server);
testInformationalResponse(true, false, newResponse(CONTINUE), newResponse(OK),
new DefaultHttp3DataFrame(Unpooled.buffer()));
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testInformationalResponseWithDataClient(boolean server) throws Exception {
setUp(server);
testInformationalResponse(false, false, newResponse(CONTINUE), newResponse(OK),
new DefaultHttp3DataFrame(Unpooled.buffer()));
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testInformationalResponsePostDataServer(boolean server) throws Exception {
setUp(server);
testInformationalResponse(true, true, newResponse(OK),
new DefaultHttp3DataFrame(Unpooled.buffer()), newResponse(CONTINUE));
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testInformationalResponsePostDataClient(boolean server) throws Exception {
setUp(server);
testInformationalResponse(false, true, newResponse(OK),
new DefaultHttp3DataFrame(Unpooled.buffer()), newResponse(CONTINUE));
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testInformationalResponsePostTrailersServer(boolean server) throws Exception {
setUp(server);
testInformationalResponse(true, true, newResponse(OK),
new DefaultHttp3DataFrame(Unpooled.buffer()), new DefaultHttp3HeadersFrame(), newResponse(CONTINUE));
}
@ParameterizedTest(name = "{index}: server = {0}")
@MethodSource("data")
public void testInformationalResponsePostTrailersClient(boolean server) throws Exception {
setUp(server);
testInformationalResponse(false, true, newResponse(OK),
new DefaultHttp3DataFrame(Unpooled.buffer()), new DefaultHttp3HeadersFrame(), newResponse(CONTINUE));
}
private void testInformationalResponse(boolean server, boolean expectFail, Http3Frame... frames) throws Exception {
EmbeddedQuicStreamChannel channel = server ? newServerStream() :
newClientStream(() -> false);
for (int i = 0; i < frames.length; i++) {
Http3Frame frame = frames[i];
Http3Frame read = null;
try {
if (server) {
assertTrue(channel.writeOutbound(frame));
if (expectFail && i == frames.length - 1) {
fail();
} else {
read = channel.readOutbound();
}
} else {
assertTrue(channel.writeInbound(frame));
if (expectFail && i == frames.length - 1) {
fail();
} else {
read = channel.readInbound();
}
}
assertEquals(frame, read);
} catch (Exception e) {
assertException(H3_FRAME_UNEXPECTED, e);
if (!server) {
verifyClose(H3_FRAME_UNEXPECTED, parent);
}
} finally {
release(read);
}
}
assertFalse(parent.finish());
assertFalse(channel.finish());
}
private void testHeadersFrame(Http3HeadersFrame headersFrame, @Nullable Http3ErrorCode code) throws Exception {
EmbeddedQuicStreamChannel channel = newServerStream();
try {
assertTrue(channel.writeInbound(headersFrame));
if (code != null) {
fail();
}
} catch (Throwable cause) {
if (code == null) {
throw cause;
}
assertException(code, cause);
assertEquals((Integer) code.code, channel.outputShutdownError());
}
// Only expect produced messages when there was no error.
assertEquals(code == null, channel.finishAndReleaseAll());
}
private EmbeddedQuicStreamChannel newClientStream(final BooleanSupplier goAwayReceivedSupplier) throws Exception {
return newStream(QuicStreamType.BIDIRECTIONAL, new ChannelInitializer<QuicStreamChannel>() {
@Override
protected void initChannel(QuicStreamChannel ch) {
Http3RequestStreamEncodeStateValidator encStateValidator = new Http3RequestStreamEncodeStateValidator();
Http3RequestStreamDecodeStateValidator decStateValidator = new Http3RequestStreamDecodeStateValidator();
ch.pipeline().addLast(encStateValidator);
ch.pipeline().addLast(decStateValidator);
ch.pipeline().addLast(newClientValidator(goAwayReceivedSupplier, qpackAttributes, decoder,
encStateValidator, decStateValidator));
}
});
}
private EmbeddedQuicStreamChannel newServerStream() throws Exception {
return newStream(QuicStreamType.BIDIRECTIONAL, newHandler(true));
}
private static Http3Frame newResponse(HttpResponseStatus status) {
Http3HeadersFrame frame = new DefaultHttp3HeadersFrame();
frame.headers().status(status.codeAsText());
return frame;
}
}
| Http3RequestStreamValidationHandlerTest |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java | {
"start": 1514,
"end": 1698
} | class ____ extends InternalClusterInfoService {
/** This is a marker plugin used to trigger MockNode to use this mock info service. */
public static | MockInternalClusterInfoService |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/introspect/TestNamingStrategyCustom.java | {
"start": 3538,
"end": 3682
} | class ____ {
public int intValue;
public Value() { this(0); }
public Value(int v) { intValue = v; }
}
static | Value |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/function/FailableToLongBiFunction.java | {
"start": 1215,
"end": 2099
} | interface ____<T, U, E extends Throwable> {
/** NOP singleton */
@SuppressWarnings("rawtypes")
FailableToLongBiFunction NOP = (t, u) -> 0;
/**
* Gets the NOP singleton.
*
* @param <T> the type of the first argument to the function
* @param <U> the type of the second argument to the function
* @param <E> The kind of thrown exception or error.
* @return The NOP singleton.
*/
@SuppressWarnings("unchecked")
static <T, U, E extends Throwable> FailableToLongBiFunction<T, U, E> nop() {
return NOP;
}
/**
* Applies this function to the given arguments.
*
* @param t the first function argument
* @param u the second function argument
* @return the function result
* @throws E Thrown when the function fails.
*/
long applyAsLong(T t, U u) throws E;
}
| FailableToLongBiFunction |
java | spring-projects__spring-boot | module/spring-boot-webmvc-test/src/test/java/org/springframework/boot/webmvc/test/autoconfigure/mockmvc/WebMvcTestHtmlUnitWebDriverCustomScopeIntegrationTests.java | {
"start": 2380,
"end": 2825
} | class ____ implements FactoryBean<WebDriver> {
private final HtmlUnitDriver driver;
WebDriverFactory(MockMvc mockMvc) {
this.driver = MockMvcHtmlUnitDriverBuilder.mockMvcSetup(mockMvc).build();
}
@Override
public boolean isSingleton() {
return true;
}
@Override
public Class<?> getObjectType() {
return WebDriver.class;
}
@Override
public WebDriver getObject() {
return this.driver;
}
}
}
| WebDriverFactory |
java | grpc__grpc-java | api/src/test/java/io/grpc/ServerInterceptorsTest.java | {
"start": 1881,
"end": 19432
} | class ____ {
@Rule
public final MockitoRule mocks = MockitoJUnit.rule();
@Mock
private Marshaller<String> requestMarshaller;
@Mock
private Marshaller<Integer> responseMarshaller;
@Mock
private ServerCallHandler<String, Integer> handler;
@Mock
private ServerCall.Listener<String> listener;
private MethodDescriptor<String, Integer> flowMethod;
private ServerCall<String, Integer> call = new NoopServerCall<>();
private ServerServiceDefinition serviceDefinition;
private final Metadata headers = new Metadata();
/** Set up for test. */
@Before
public void setUp() {
flowMethod = MethodDescriptor.<String, Integer>newBuilder()
.setType(MethodType.UNKNOWN)
.setFullMethodName("basic/flow")
.setRequestMarshaller(requestMarshaller)
.setResponseMarshaller(responseMarshaller)
.build();
Mockito.when(
handler.startCall(
ArgumentMatchers.<ServerCall<String, Integer>>any(),
ArgumentMatchers.<Metadata>any()))
.thenReturn(listener);
serviceDefinition = ServerServiceDefinition.builder(new ServiceDescriptor("basic", flowMethod))
.addMethod(flowMethod, handler).build();
}
/** Final checks for all tests. */
@After
public void makeSureExpectedMocksUnused() {
verifyNoInteractions(requestMarshaller);
verifyNoInteractions(responseMarshaller);
verifyNoInteractions(listener);
}
@Test
public void npeForNullServiceDefinition() {
ServerServiceDefinition serviceDef = null;
List<ServerInterceptor> interceptors = Arrays.asList();
assertThrows(NullPointerException.class,
() -> ServerInterceptors.intercept(serviceDef, interceptors));
}
@Test
public void npeForNullInterceptorList() {
assertThrows(NullPointerException.class,
() -> ServerInterceptors.intercept(serviceDefinition, (List<ServerInterceptor>) null));
}
@Test
public void npeForNullInterceptor() {
List<ServerInterceptor> interceptors = Arrays.asList((ServerInterceptor) null);
assertThrows(NullPointerException.class,
() -> ServerInterceptors.intercept(serviceDefinition, interceptors));
}
@Test
public void noop() {
assertSame(serviceDefinition,
ServerInterceptors.intercept(serviceDefinition, Arrays.<ServerInterceptor>asList()));
}
@Test
public void multipleInvocationsOfHandler() {
ServerInterceptor interceptor =
mock(ServerInterceptor.class, delegatesTo(new NoopInterceptor()));
ServerServiceDefinition intercepted
= ServerInterceptors.intercept(serviceDefinition, Arrays.asList(interceptor));
assertSame(listener,
getSoleMethod(intercepted).getServerCallHandler().startCall(call, headers));
verify(interceptor).interceptCall(same(call), same(headers), anyCallHandler());
verify(handler).startCall(call, headers);
verifyNoMoreInteractions(interceptor, handler);
assertSame(listener,
getSoleMethod(intercepted).getServerCallHandler().startCall(call, headers));
verify(interceptor, times(2))
.interceptCall(same(call), same(headers), anyCallHandler());
verify(handler, times(2)).startCall(call, headers);
verifyNoMoreInteractions(interceptor, handler);
}
@Test
public void correctHandlerCalled() {
@SuppressWarnings("unchecked")
ServerCallHandler<String, Integer> handler2 = mock(ServerCallHandler.class);
MethodDescriptor<String, Integer> flowMethod2 =
flowMethod.toBuilder().setFullMethodName("basic/flow2").build();
serviceDefinition = ServerServiceDefinition.builder(
new ServiceDescriptor("basic", flowMethod, flowMethod2))
.addMethod(flowMethod, handler)
.addMethod(flowMethod2, handler2).build();
ServerServiceDefinition intercepted = ServerInterceptors.intercept(
serviceDefinition, Arrays.<ServerInterceptor>asList(new NoopInterceptor()));
getMethod(intercepted, "basic/flow").getServerCallHandler().startCall(call, headers);
verify(handler).startCall(call, headers);
verifyNoMoreInteractions(handler);
verifyNoMoreInteractions(handler2);
getMethod(intercepted, "basic/flow2").getServerCallHandler().startCall(call, headers);
verify(handler2).startCall(call, headers);
verifyNoMoreInteractions(handler);
verifyNoMoreInteractions(handler2);
}
@Test
public void callNextTwice() {
ServerInterceptor interceptor = new ServerInterceptor() {
@Override
public <ReqT, RespT> ServerCall.Listener<ReqT> interceptCall(
ServerCall<ReqT, RespT> call,
Metadata headers,
ServerCallHandler<ReqT, RespT> next) {
// Calling next twice is permitted, although should only rarely be useful.
assertSame(listener, next.startCall(call, headers));
return next.startCall(call, headers);
}
};
ServerServiceDefinition intercepted = ServerInterceptors.intercept(serviceDefinition,
interceptor);
assertSame(listener,
getSoleMethod(intercepted).getServerCallHandler().startCall(call, headers));
verify(handler, times(2)).startCall(same(call), same(headers));
verifyNoMoreInteractions(handler);
}
@Test
public void ordered() {
final List<String> order = new ArrayList<>();
handler = new ServerCallHandler<String, Integer>() {
@Override
public ServerCall.Listener<String> startCall(
ServerCall<String, Integer> call,
Metadata headers) {
order.add("handler");
return listener;
}
};
ServerInterceptor interceptor1 = new ServerInterceptor() {
@Override
public <ReqT, RespT> ServerCall.Listener<ReqT> interceptCall(
ServerCall<ReqT, RespT> call,
Metadata headers,
ServerCallHandler<ReqT, RespT> next) {
order.add("i1");
return next.startCall(call, headers);
}
};
ServerInterceptor interceptor2 = new ServerInterceptor() {
@Override
public <ReqT, RespT> ServerCall.Listener<ReqT> interceptCall(
ServerCall<ReqT, RespT> call,
Metadata headers,
ServerCallHandler<ReqT, RespT> next) {
order.add("i2");
return next.startCall(call, headers);
}
};
ServerServiceDefinition serviceDefinition = ServerServiceDefinition.builder(
new ServiceDescriptor("basic", flowMethod))
.addMethod(flowMethod, handler).build();
ServerServiceDefinition intercepted = ServerInterceptors.intercept(
serviceDefinition, Arrays.asList(interceptor1, interceptor2));
assertSame(listener,
getSoleMethod(intercepted).getServerCallHandler().startCall(call, headers));
assertEquals(Arrays.asList("i2", "i1", "handler"), order);
}
@Test
public void orderedForward() {
final List<String> order = new ArrayList<>();
handler = new ServerCallHandler<String, Integer>() {
@Override
public ServerCall.Listener<String> startCall(
ServerCall<String, Integer> call,
Metadata headers) {
order.add("handler");
return listener;
}
};
ServerInterceptor interceptor1 = new ServerInterceptor() {
@Override
public <ReqT, RespT> ServerCall.Listener<ReqT> interceptCall(
ServerCall<ReqT, RespT> call,
Metadata headers,
ServerCallHandler<ReqT, RespT> next) {
order.add("i1");
return next.startCall(call, headers);
}
};
ServerInterceptor interceptor2 = new ServerInterceptor() {
@Override
public <ReqT, RespT> ServerCall.Listener<ReqT> interceptCall(
ServerCall<ReqT, RespT> call,
Metadata headers,
ServerCallHandler<ReqT, RespT> next) {
order.add("i2");
return next.startCall(call, headers);
}
};
ServerServiceDefinition serviceDefinition = ServerServiceDefinition.builder(
new ServiceDescriptor("basic", flowMethod))
.addMethod(flowMethod, handler).build();
ServerServiceDefinition intercepted = ServerInterceptors.interceptForward(
serviceDefinition, interceptor1, interceptor2);
assertSame(listener,
getSoleMethod(intercepted).getServerCallHandler().startCall(call, headers));
assertEquals(Arrays.asList("i1", "i2", "handler"), order);
}
@Test
public void argumentsPassed() {
final ServerCall<String, Integer> call2 = new NoopServerCall<>();
@SuppressWarnings("unchecked")
final ServerCall.Listener<String> listener2 = mock(ServerCall.Listener.class);
ServerInterceptor interceptor = new ServerInterceptor() {
@SuppressWarnings("unchecked") // Lot's of casting for no benefit. Not intended use.
@Override
public <R1, R2> ServerCall.Listener<R1> interceptCall(
ServerCall<R1, R2> call,
Metadata headers,
ServerCallHandler<R1, R2> next) {
assertSame(call, ServerInterceptorsTest.this.call);
assertSame(listener,
next.startCall((ServerCall<R1, R2>)call2, headers));
return (ServerCall.Listener<R1>) listener2;
}
};
ServerServiceDefinition intercepted = ServerInterceptors.intercept(
serviceDefinition, Arrays.asList(interceptor));
assertSame(listener2,
getSoleMethod(intercepted).getServerCallHandler().startCall(call, headers));
verify(handler).startCall(call2, headers);
}
@Test
@SuppressWarnings("unchecked")
public void typedMarshalledMessages() {
final List<String> order = new ArrayList<>();
Marshaller<Holder> marshaller = new Marshaller<Holder>() {
@Override
public InputStream stream(Holder value) {
return value.get();
}
@Override
public Holder parse(InputStream stream) {
return new Holder(stream);
}
};
ServerCallHandler<Holder, Holder> handler2 = new ServerCallHandler<Holder, Holder>() {
@Override
public Listener<Holder> startCall(final ServerCall<Holder, Holder> call,
final Metadata headers) {
return new Listener<Holder>() {
@Override
public void onMessage(Holder message) {
order.add("handler");
call.sendMessage(message);
}
};
}
};
MethodDescriptor<Holder, Holder> wrappedMethod = MethodDescriptor.<Holder, Holder>newBuilder()
.setType(MethodType.UNKNOWN)
.setFullMethodName("basic/wrapped")
.setRequestMarshaller(marshaller)
.setResponseMarshaller(marshaller)
.build();
ServerServiceDefinition serviceDef = ServerServiceDefinition.builder(
new ServiceDescriptor("basic", wrappedMethod))
.addMethod(wrappedMethod, handler2).build();
ServerInterceptor interceptor1 = new ServerInterceptor() {
@Override
public <ReqT, RespT> Listener<ReqT> interceptCall(ServerCall<ReqT, RespT> call,
Metadata headers,
ServerCallHandler<ReqT, RespT> next) {
ServerCall<ReqT, RespT> interceptedCall = new ForwardingServerCall
.SimpleForwardingServerCall<ReqT, RespT>(call) {
@Override
public void sendMessage(RespT message) {
order.add("i1sendMessage");
assertTrue(message instanceof Holder);
super.sendMessage(message);
}
};
ServerCall.Listener<ReqT> originalListener = next
.startCall(interceptedCall, headers);
return new ForwardingServerCallListener
.SimpleForwardingServerCallListener<ReqT>(originalListener) {
@Override
public void onMessage(ReqT message) {
order.add("i1onMessage");
assertTrue(message instanceof Holder);
super.onMessage(message);
}
};
}
};
ServerInterceptor interceptor2 = new ServerInterceptor() {
@Override
public <ReqT, RespT> Listener<ReqT> interceptCall(ServerCall<ReqT, RespT> call,
Metadata headers,
ServerCallHandler<ReqT, RespT> next) {
ServerCall<ReqT, RespT> interceptedCall = new ForwardingServerCall
.SimpleForwardingServerCall<ReqT, RespT>(call) {
@Override
public void sendMessage(RespT message) {
order.add("i2sendMessage");
assertTrue(message instanceof InputStream);
super.sendMessage(message);
}
};
ServerCall.Listener<ReqT> originalListener = next
.startCall(interceptedCall, headers);
return new ForwardingServerCallListener
.SimpleForwardingServerCallListener<ReqT>(originalListener) {
@Override
public void onMessage(ReqT message) {
order.add("i2onMessage");
assertTrue(message instanceof InputStream);
super.onMessage(message);
}
};
}
};
ServerServiceDefinition intercepted = ServerInterceptors.intercept(serviceDef, interceptor1);
ServerServiceDefinition inputStreamMessageService = ServerInterceptors
.useInputStreamMessages(intercepted);
ServerServiceDefinition intercepted2 = ServerInterceptors
.intercept(inputStreamMessageService, interceptor2);
ServerMethodDefinition<InputStream, InputStream> serverMethod =
(ServerMethodDefinition<InputStream, InputStream>) intercepted2.getMethod("basic/wrapped");
ServerCall<InputStream, InputStream> call2 = new NoopServerCall<>();
byte[] bytes = {};
serverMethod
.getServerCallHandler()
.startCall(call2, headers)
.onMessage(new ByteArrayInputStream(bytes));
assertEquals(
Arrays.asList("i2onMessage", "i1onMessage", "handler", "i1sendMessage", "i2sendMessage"),
order);
}
/**
* Tests the ServerInterceptors#useMarshalledMessages()} with two marshallers. Makes sure that
* on incoming request the request marshaller's stream method is called and on response the
* response marshaller's parse method is called
*/
@Test
@SuppressWarnings("unchecked")
public void distinctMarshallerForRequestAndResponse() {
final List<String> requestFlowOrder = new ArrayList<>();
final Marshaller<String> requestMarshaller = new Marshaller<String>() {
@Override
public InputStream stream(String value) {
requestFlowOrder.add("RequestStream");
return null;
}
@Override
public String parse(InputStream stream) {
requestFlowOrder.add("RequestParse");
return null;
}
};
final Marshaller<String> responseMarshaller = new Marshaller<String>() {
@Override
public InputStream stream(String value) {
requestFlowOrder.add("ResponseStream");
return null;
}
@Override
public String parse(InputStream stream) {
requestFlowOrder.add("ResponseParse");
return null;
}
};
final Marshaller<Holder> dummyMarshaller = new Marshaller<Holder>() {
@Override
public InputStream stream(Holder value) {
return value.get();
}
@Override
public Holder parse(InputStream stream) {
return new Holder(stream);
}
};
ServerCallHandler<Holder, Holder> handler = (call, headers) -> new Listener<Holder>() {
@Override
public void onMessage(Holder message) {
requestFlowOrder.add("handler");
call.sendMessage(message);
}
};
MethodDescriptor<Holder, Holder> wrappedMethod = MethodDescriptor.<Holder, Holder>newBuilder()
.setType(MethodType.UNKNOWN)
.setFullMethodName("basic/wrapped")
.setRequestMarshaller(dummyMarshaller)
.setResponseMarshaller(dummyMarshaller)
.build();
ServerServiceDefinition serviceDef = ServerServiceDefinition.builder(
new ServiceDescriptor("basic", wrappedMethod))
.addMethod(wrappedMethod, handler).build();
ServerServiceDefinition intercepted = ServerInterceptors.useMarshalledMessages(serviceDef,
requestMarshaller, responseMarshaller);
ServerMethodDefinition<String, String> serverMethod =
(ServerMethodDefinition<String, String>) intercepted.getMethod("basic/wrapped");
ServerCall<String, String> serverCall = new NoopServerCall<>();
serverMethod.getServerCallHandler().startCall(serverCall, headers).onMessage("TestMessage");
assertEquals(Arrays.asList("RequestStream", "handler", "ResponseParse"), requestFlowOrder);
}
@SuppressWarnings("unchecked")
private static ServerMethodDefinition<String, Integer> getSoleMethod(
ServerServiceDefinition serviceDef) {
if (serviceDef.getMethods().size() != 1) {
throw new AssertionError("Not exactly one method present");
}
return (ServerMethodDefinition<String, Integer>) getOnlyElement(serviceDef.getMethods());
}
@SuppressWarnings("unchecked")
private static ServerMethodDefinition<String, Integer> getMethod(
ServerServiceDefinition serviceDef, String name) {
return (ServerMethodDefinition<String, Integer>) serviceDef.getMethod(name);
}
private ServerCallHandler<String, Integer> anyCallHandler() {
return ArgumentMatchers.any();
}
private static | ServerInterceptorsTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/LeftJoinFetchSubclassesTest.java | {
"start": 4479,
"end": 4594
} | class ____ extends SuperClass {
}
@Entity( name = "SubClass2" )
@DiscriminatorValue( "2" )
public static | SubClass1 |
java | apache__flink | flink-connectors/flink-file-sink-common/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/InProgressFileWriter.java | {
"start": 2337,
"end": 2471
} | interface ____ extends PendingFileRecoverable {}
/** The handle can be used to recover pending file. */
| InProgressFileRecoverable |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastIntEvaluator.java | {
"start": 1154,
"end": 4866
} | class ____ implements EvalOperator.ExpressionEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(LeastIntEvaluator.class);
private final Source source;
private final EvalOperator.ExpressionEvaluator[] values;
private final DriverContext driverContext;
private Warnings warnings;
public LeastIntEvaluator(Source source, EvalOperator.ExpressionEvaluator[] values,
DriverContext driverContext) {
this.source = source;
this.values = values;
this.driverContext = driverContext;
}
@Override
public Block eval(Page page) {
IntBlock[] valuesBlocks = new IntBlock[values.length];
try (Releasable valuesRelease = Releasables.wrap(valuesBlocks)) {
for (int i = 0; i < valuesBlocks.length; i++) {
valuesBlocks[i] = (IntBlock)values[i].eval(page);
}
IntVector[] valuesVectors = new IntVector[values.length];
for (int i = 0; i < valuesBlocks.length; i++) {
valuesVectors[i] = valuesBlocks[i].asVector();
if (valuesVectors[i] == null) {
return eval(page.getPositionCount(), valuesBlocks);
}
}
return eval(page.getPositionCount(), valuesVectors).asBlock();
}
}
@Override
public long baseRamBytesUsed() {
long baseRamBytesUsed = BASE_RAM_BYTES_USED;
for (EvalOperator.ExpressionEvaluator e : values) {
baseRamBytesUsed += e.baseRamBytesUsed();
}
return baseRamBytesUsed;
}
public IntBlock eval(int positionCount, IntBlock[] valuesBlocks) {
try(IntBlock.Builder result = driverContext.blockFactory().newIntBlockBuilder(positionCount)) {
int[] valuesValues = new int[values.length];
position: for (int p = 0; p < positionCount; p++) {
for (int i = 0; i < valuesBlocks.length; i++) {
switch (valuesBlocks[i].getValueCount(p)) {
case 0:
result.appendNull();
continue position;
case 1:
break;
default:
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
result.appendNull();
continue position;
}
}
// unpack valuesBlocks into valuesValues
for (int i = 0; i < valuesBlocks.length; i++) {
int o = valuesBlocks[i].getFirstValueIndex(p);
valuesValues[i] = valuesBlocks[i].getInt(o);
}
result.appendInt(Least.process(valuesValues));
}
return result.build();
}
}
public IntVector eval(int positionCount, IntVector[] valuesVectors) {
try(IntVector.FixedBuilder result = driverContext.blockFactory().newIntVectorFixedBuilder(positionCount)) {
int[] valuesValues = new int[values.length];
position: for (int p = 0; p < positionCount; p++) {
// unpack valuesVectors into valuesValues
for (int i = 0; i < valuesVectors.length; i++) {
valuesValues[i] = valuesVectors[i].getInt(p);
}
result.appendInt(p, Least.process(valuesValues));
}
return result.build();
}
}
@Override
public String toString() {
return "LeastIntEvaluator[" + "values=" + Arrays.toString(values) + "]";
}
@Override
public void close() {
Releasables.closeExpectNoException(() -> Releasables.close(values));
}
private Warnings warnings() {
if (warnings == null) {
this.warnings = Warnings.createWarnings(
driverContext.warningsMode(),
source.source().getLineNumber(),
source.source().getColumnNumber(),
source.text()
);
}
return warnings;
}
static | LeastIntEvaluator |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/nestedbeans/recursive/TreeRecursionMapper.java | {
"start": 1860,
"end": 2456
} | class ____ {
private String name;
private List<ChildDto> child;
public ChildDto() {
}
public ChildDto(String name, List<ChildDto> child) {
this.name = name;
this.child = child;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public List<ChildDto> getChild() {
return child;
}
public void setChild(List<ChildDto> child) {
this.child = child;
}
}
}
| ChildDto |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/eval/EvalTest_multi_short.java | {
"start": 185,
"end": 867
} | class ____ extends TestCase {
public void test_byte() throws Exception {
assertEquals(2, SQLEvalVisitorUtils.evalExpr(JdbcConstants.MYSQL, "? * ?", (short) 1, (byte) 2));
}
public void test_byte_1() throws Exception {
assertEquals(2, SQLEvalVisitorUtils.evalExpr(JdbcConstants.MYSQL, "? * ?", (short) 1, "2"));
}
public void test_byte_2() throws Exception {
assertEquals(null, SQLEvalVisitorUtils.evalExpr(JdbcConstants.MYSQL, "? * ?", (short) 1, null));
}
public void test_byte_3() throws Exception {
assertEquals(2, SQLEvalVisitorUtils.evalExpr(JdbcConstants.MYSQL, "? * ?", "2", (short) 1));
}
}
| EvalTest_multi_short |
java | alibaba__nacos | plugin/auth/src/test/java/com/alibaba/nacos/plugin/auth/spi/mock/MockEmptyNameAuthPluginService.java | {
"start": 1177,
"end": 1983
} | class ____ implements AuthPluginService {
@Override
public Collection<String> identityNames() {
return null;
}
@Override
public boolean enableAuth(ActionTypes action, String type) {
return false;
}
@Override
public String getAuthServiceName() {
return null;
}
@Override
public AuthResult validateAuthority(IdentityContext identityContext, Permission permission)
throws AccessException {
return AuthResult.failureResult(401, "mock auth failed");
}
@Override
public AuthResult validateIdentity(IdentityContext identityContext, Resource resource)
throws AccessException {
return AuthResult.failureResult(403, "mock auth failed");
}
}
| MockEmptyNameAuthPluginService |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/intarrays/IntArrays_assertDoesNotContain_at_Index_Test.java | {
"start": 1730,
"end": 5531
} | class ____ extends IntArraysBaseTest {
@Test
void should_fail_if_actual_is_null() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> arrays.assertDoesNotContain(someInfo(), null, 8,
someIndex()))
.withMessage(actualIsNull());
}
@Test
void should_pass_if_actual_does_not_contain_value_at_Index() {
arrays.assertDoesNotContain(someInfo(), actual, 6, atIndex(1));
}
@Test
void should_pass_if_actual_is_empty() {
arrays.assertDoesNotContain(someInfo(), emptyArray(), 8, someIndex());
}
@Test
void should_throw_error_if_Index_is_null() {
assertThatNullPointerException().isThrownBy(() -> arrays.assertDoesNotContain(someInfo(), actual, 8, null))
.withMessage("Index should not be null");
}
@Test
void should_pass_if_Index_is_out_of_bounds() {
arrays.assertDoesNotContain(someInfo(), actual, 8, atIndex(6));
}
@Test
void should_fail_if_actual_contains_value_at_index() {
AssertionInfo info = someInfo();
Index index = atIndex(0);
Throwable error = catchThrowable(() -> arrays.assertDoesNotContain(info, actual, 6, index));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldNotContainAtIndex(actual, 6, index));
}
@Test
void should_fail_if_actual_is_null_whatever_custom_comparison_strategy_is() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> arraysWithCustomComparisonStrategy.assertDoesNotContain(someInfo(),
null,
-8,
someIndex()))
.withMessage(actualIsNull());
}
@Test
void should_pass_if_actual_does_not_contain_value_at_Index_according_to_custom_comparison_strategy() {
arraysWithCustomComparisonStrategy.assertDoesNotContain(someInfo(), actual, 6, atIndex(1));
}
@Test
void should_pass_if_actual_is_empty_whatever_custom_comparison_strategy_is() {
arraysWithCustomComparisonStrategy.assertDoesNotContain(someInfo(), emptyArray(), -8, someIndex());
}
@Test
void should_throw_error_if_Index_is_null_whatever_custom_comparison_strategy_is() {
assertThatNullPointerException().isThrownBy(() -> arraysWithCustomComparisonStrategy.assertDoesNotContain(someInfo(),
actual,
-8, null))
.withMessage("Index should not be null");
}
@Test
void should_pass_if_Index_is_out_of_bounds_whatever_custom_comparison_strategy_is() {
arraysWithCustomComparisonStrategy.assertDoesNotContain(someInfo(), actual, -8, atIndex(6));
}
@Test
void should_fail_if_actual_contains_value_at_index_according_to_custom_comparison_strategy() {
AssertionInfo info = someInfo();
Index index = atIndex(0);
Throwable error = catchThrowable(() -> arraysWithCustomComparisonStrategy.assertDoesNotContain(info, actual, 6, index));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldNotContainAtIndex(actual, 6, index, absValueComparisonStrategy));
}
}
| IntArrays_assertDoesNotContain_at_Index_Test |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/headers/UserAgentFromConfigTest.java | {
"start": 2358,
"end": 2443
} | interface ____ {
@Path("/")
@GET
String call();
}
}
| Client2 |
java | mybatis__mybatis-3 | src/main/java/org/apache/ibatis/type/MappedTypes.java | {
"start": 1260,
"end": 1401
} | interface ____ {
/**
* Returns java types to map {@link TypeHandler}.
*
* @return java types
*/
Class<?>[] value();
}
| MappedTypes |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/type/H2JsonArrayJdbcTypeConstructor.java | {
"start": 534,
"end": 1306
} | class ____ implements JdbcTypeConstructor {
public static final H2JsonArrayJdbcTypeConstructor INSTANCE = new H2JsonArrayJdbcTypeConstructor();
@Override
public JdbcType resolveType(
TypeConfiguration typeConfiguration,
Dialect dialect,
BasicType<?> elementType,
ColumnTypeInformation columnTypeInformation) {
return resolveType( typeConfiguration, dialect, elementType.getJdbcType(), columnTypeInformation );
}
@Override
public JdbcType resolveType(
TypeConfiguration typeConfiguration,
Dialect dialect,
JdbcType elementType,
ColumnTypeInformation columnTypeInformation) {
return new H2JsonArrayJdbcType( elementType );
}
@Override
public int getDefaultSqlTypeCode() {
return SqlTypes.JSON_ARRAY;
}
}
| H2JsonArrayJdbcTypeConstructor |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/GetQueryRulesetAction.java | {
"start": 3927,
"end": 5462
} | class ____ extends ActionResponse implements ToXContentObject {
private final QueryRuleset queryRuleset;
public Response(StreamInput in) throws IOException {
this.queryRuleset = new QueryRuleset(in);
}
public Response(QueryRuleset queryRuleset) {
this.queryRuleset = queryRuleset;
}
public Response(String rulesetId, List<QueryRule> rules) {
this.queryRuleset = new QueryRuleset(rulesetId, rules);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
queryRuleset.writeTo(out);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return queryRuleset.toXContent(builder, params);
}
public QueryRuleset queryRuleset() {
return queryRuleset;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Response response = (Response) o;
return Objects.equals(queryRuleset, response.queryRuleset);
}
@Override
public int hashCode() {
return Objects.hash(queryRuleset);
}
public static Response fromXContent(String resourceName, XContentParser parser) throws IOException {
return new Response(QueryRuleset.fromXContent(resourceName, parser));
}
}
}
| Response |
java | redisson__redisson | redisson/src/main/java/org/redisson/liveobject/core/RedissonObjectBuilder.java | {
"start": 1597,
"end": 5146
} | enum ____ {RXJAVA, REACTIVE, DEFAULT}
private static final Map<Class<?>, Class<? extends RObject>> SUPPORTED_CLASS_MAPPING = new LinkedHashMap<>();
private static final Map<Class<?>, Method> DEFAULT_CODEC_REFERENCES = new HashMap<>();
private static final Map<Class<?>, Method> CUSTOM_CODEC_REFERENCES = new HashMap<>();
static {
SUPPORTED_CLASS_MAPPING.put(SortedSet.class, RedissonSortedSet.class);
SUPPORTED_CLASS_MAPPING.put(Set.class, RedissonSet.class);
SUPPORTED_CLASS_MAPPING.put(ConcurrentMap.class, RedissonMap.class);
SUPPORTED_CLASS_MAPPING.put(Map.class, RedissonMap.class);
SUPPORTED_CLASS_MAPPING.put(BlockingDeque.class, RedissonBlockingDeque.class);
SUPPORTED_CLASS_MAPPING.put(Deque.class, RedissonDeque.class);
SUPPORTED_CLASS_MAPPING.put(BlockingQueue.class, RedissonBlockingQueue.class);
SUPPORTED_CLASS_MAPPING.put(Queue.class, RedissonQueue.class);
SUPPORTED_CLASS_MAPPING.put(List.class, RedissonList.class);
fillCodecMethods(RedissonClient.class, RObject.class);
fillCodecMethods(RedissonReactiveClient.class, RObjectReactive.class);
fillCodecMethods(RedissonRxClient.class, RObjectRx.class);
}
private final Config config;
private RedissonClient redisson;
private RedissonReactiveClient redissonReactive;
private RedissonRxClient redissonRx;
private final ReferenceCodecProvider codecProvider = new DefaultReferenceCodecProvider();
public RedissonObjectBuilder(RedissonClient redisson) {
super();
this.config = redisson.getConfig();
this.redisson = redisson;
Codec codec = config.getCodec();
codecProvider.registerCodec((Class<Codec>) codec.getClass(), codec);
}
public RedissonObjectBuilder(RedissonReactiveClient redissonReactive) {
super();
this.config = redissonReactive.getConfig();
this.redissonReactive = redissonReactive;
Codec codec = config.getCodec();
codecProvider.registerCodec((Class<Codec>) codec.getClass(), codec);
}
public RedissonObjectBuilder(RedissonRxClient redissonRx) {
super();
this.config = redissonRx.getConfig();
this.redissonRx = redissonRx;
Codec codec = config.getCodec();
codecProvider.registerCodec((Class<Codec>) codec.getClass(), codec);
}
public void storeAsync(RObject ar, String fieldName, RMap<String, Object> liveMap) {
liveMap.fastPutAsync(fieldName, ar);
}
public void store(RObject ar, String fieldName, RMap<String, Object> liveMap) {
liveMap.fastPut(fieldName, ar);
}
public RObject createObject(Object id, Class<?> clazz, Class<?> fieldType, String fieldName) {
Class<? extends RObject> mappedClass = getMappedClass(fieldType);
try {
if (mappedClass != null) {
Codec fieldCodec = getFieldCodec(clazz, mappedClass, fieldName);
NamingScheme fieldNamingScheme = getNamingScheme(clazz, fieldCodec);
String referenceName = fieldNamingScheme.getFieldReferenceName(clazz, id, mappedClass, fieldName);
return createRObject(redisson, mappedClass, referenceName, fieldCodec);
}
} catch (Exception e) {
throw new IllegalArgumentException(e);
}
return null;
}
/**
* WARNING: rEntity has to be the | ReferenceType |
java | elastic__elasticsearch | x-pack/plugin/old-lucene-versions/src/test/java/org/elasticsearch/xpack/lucene/bwc/codecs/lucene50/BlockPostingsFormat3Tests.java | {
"start": 2823,
"end": 13333
} | class ____ extends ESTestCase {
private final int MAXDOC = TEST_NIGHTLY ? Lucene50PostingsFormat.BLOCK_SIZE * 20 : Lucene50PostingsFormat.BLOCK_SIZE * 3;
// creates 8 fields with different options and does "duels" of fields against each other
public void test() throws Exception {
Directory dir = newDirectory();
Analyzer analyzer = new Analyzer(Analyzer.PER_FIELD_REUSE_STRATEGY) {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
Tokenizer tokenizer = new MockTokenizer();
if (fieldName.contains("payloadsFixed")) {
TokenFilter filter = new MockFixedLengthPayloadFilter(new Random(0), tokenizer, 1);
return new TokenStreamComponents(tokenizer, filter);
} else if (fieldName.contains("payloadsVariable")) {
TokenFilter filter = new MockVariableLengthPayloadFilter(new Random(0), tokenizer);
return new TokenStreamComponents(tokenizer, filter);
} else {
return new TokenStreamComponents(tokenizer);
}
}
};
IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
iwc.setCodec(TestUtil.alwaysPostingsFormat(new Lucene50RWPostingsFormat()));
// TODO we could actually add more fields implemented with different PFs
// or, just put this test into the usual rotation?
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
Document doc = new Document();
FieldType docsOnlyType = new FieldType(TextField.TYPE_NOT_STORED);
// turn this on for a cross-check
docsOnlyType.setStoreTermVectors(true);
docsOnlyType.setIndexOptions(IndexOptions.DOCS);
FieldType docsAndFreqsType = new FieldType(TextField.TYPE_NOT_STORED);
// turn this on for a cross-check
docsAndFreqsType.setStoreTermVectors(true);
docsAndFreqsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
FieldType positionsType = new FieldType(TextField.TYPE_NOT_STORED);
// turn these on for a cross-check
positionsType.setStoreTermVectors(true);
positionsType.setStoreTermVectorPositions(true);
positionsType.setStoreTermVectorOffsets(true);
positionsType.setStoreTermVectorPayloads(true);
FieldType offsetsType = new FieldType(positionsType);
offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
Field field1 = new Field("field1docs", "", docsOnlyType);
Field field2 = new Field("field2freqs", "", docsAndFreqsType);
Field field3 = new Field("field3positions", "", positionsType);
Field field4 = new Field("field4offsets", "", offsetsType);
Field field5 = new Field("field5payloadsFixed", "", positionsType);
Field field6 = new Field("field6payloadsVariable", "", positionsType);
Field field7 = new Field("field7payloadsFixedOffsets", "", offsetsType);
Field field8 = new Field("field8payloadsVariableOffsets", "", offsetsType);
doc.add(field1);
doc.add(field2);
doc.add(field3);
doc.add(field4);
doc.add(field5);
doc.add(field6);
doc.add(field7);
doc.add(field8);
for (int i = 0; i < MAXDOC; i++) {
String stringValue = Integer.toString(i)
+ " verycommon "
+ English.intToEnglish(i).replace('-', ' ')
+ " "
+ TestUtil.randomSimpleString(random());
field1.setStringValue(stringValue);
field2.setStringValue(stringValue);
field3.setStringValue(stringValue);
field4.setStringValue(stringValue);
field5.setStringValue(stringValue);
field6.setStringValue(stringValue);
field7.setStringValue(stringValue);
field8.setStringValue(stringValue);
iw.addDocument(doc);
}
iw.close();
verify(dir);
TestUtil.checkIndex(dir); // for some extra coverage, checkIndex before we forceMerge
iwc = newIndexWriterConfig(analyzer);
iwc.setCodec(TestUtil.alwaysPostingsFormat(new Lucene50RWPostingsFormat()));
iwc.setOpenMode(OpenMode.APPEND);
IndexWriter iw2 = new IndexWriter(dir, iwc);
iw2.forceMerge(1);
iw2.close();
verify(dir);
dir.close();
}
private void verify(Directory dir) throws Exception {
DirectoryReader ir = DirectoryReader.open(dir);
for (LeafReaderContext leaf : ir.leaves()) {
LeafReader leafReader = leaf.reader();
assertTerms(leafReader.terms("field1docs"), leafReader.terms("field2freqs"), true);
assertTerms(leafReader.terms("field3positions"), leafReader.terms("field4offsets"), true);
assertTerms(leafReader.terms("field4offsets"), leafReader.terms("field5payloadsFixed"), true);
assertTerms(leafReader.terms("field5payloadsFixed"), leafReader.terms("field6payloadsVariable"), true);
assertTerms(leafReader.terms("field6payloadsVariable"), leafReader.terms("field7payloadsFixedOffsets"), true);
assertTerms(leafReader.terms("field7payloadsFixedOffsets"), leafReader.terms("field8payloadsVariableOffsets"), true);
}
ir.close();
}
// following code is almost an exact dup of code from TestDuelingCodecs: sorry!
public void assertTerms(Terms leftTerms, Terms rightTerms, boolean deep) throws Exception {
if (leftTerms == null || rightTerms == null) {
assertNull(leftTerms);
assertNull(rightTerms);
return;
}
assertTermsStatistics(leftTerms, rightTerms);
// NOTE: we don't assert hasOffsets/hasPositions/hasPayloads because they are allowed to be
// different
boolean bothHaveFreqs = leftTerms.hasFreqs() && rightTerms.hasFreqs();
boolean bothHavePositions = leftTerms.hasPositions() && rightTerms.hasPositions();
TermsEnum leftTermsEnum = leftTerms.iterator();
TermsEnum rightTermsEnum = rightTerms.iterator();
assertTermsEnum(leftTermsEnum, rightTermsEnum, true, bothHaveFreqs, bothHavePositions);
assertTermsSeeking(leftTerms, rightTerms);
if (deep) {
int numIntersections = atLeast(3);
for (int i = 0; i < numIntersections; i++) {
String re = AutomatonTestUtil.randomRegexp(random());
Automaton determinized = Operations.determinize(
new RegExp(re, RegExp.NONE).toAutomaton(),
Operations.DEFAULT_DETERMINIZE_WORK_LIMIT
);
CompiledAutomaton automaton = new CompiledAutomaton(determinized);
if (automaton.type == CompiledAutomaton.AUTOMATON_TYPE.NORMAL) {
// TODO: test start term too
TermsEnum leftIntersection = leftTerms.intersect(automaton, null);
TermsEnum rightIntersection = rightTerms.intersect(automaton, null);
assertTermsEnum(leftIntersection, rightIntersection, rarely(), bothHaveFreqs, bothHavePositions);
}
}
}
}
private void assertTermsSeeking(Terms leftTerms, Terms rightTerms) throws Exception {
TermsEnum leftEnum = null;
TermsEnum rightEnum = null;
// just an upper bound
int numTests = atLeast(20);
Random random = random();
// collect this number of terms from the left side
HashSet<BytesRef> tests = new HashSet<>();
int numPasses = 0;
while (numPasses < 10 && tests.size() < numTests) {
leftEnum = leftTerms.iterator();
BytesRef term = null;
while ((term = leftEnum.next()) != null) {
int code = random.nextInt(10);
if (code == 0) {
// the term
tests.add(BytesRef.deepCopyOf(term));
} else if (code == 1) {
// truncated subsequence of term
term = BytesRef.deepCopyOf(term);
if (term.length > 0) {
// truncate it
term.length = random.nextInt(term.length);
}
} else if (code == 2) {
// term, but ensure a non-zero offset
byte[] newbytes = new byte[term.length + 5];
System.arraycopy(term.bytes, term.offset, newbytes, 5, term.length);
tests.add(new BytesRef(newbytes, 5, term.length));
}
}
numPasses++;
}
ArrayList<BytesRef> shuffledTests = new ArrayList<>(tests);
Collections.shuffle(shuffledTests, random);
for (BytesRef b : shuffledTests) {
leftEnum = leftTerms.iterator();
rightEnum = rightTerms.iterator();
assertEquals(leftEnum.seekExact(b), rightEnum.seekExact(b));
assertEquals(leftEnum.seekExact(b), rightEnum.seekExact(b));
SeekStatus leftStatus;
SeekStatus rightStatus;
leftStatus = leftEnum.seekCeil(b);
rightStatus = rightEnum.seekCeil(b);
assertEquals(leftStatus, rightStatus);
if (leftStatus != SeekStatus.END) {
assertEquals(leftEnum.term(), rightEnum.term());
}
leftStatus = leftEnum.seekCeil(b);
rightStatus = rightEnum.seekCeil(b);
assertEquals(leftStatus, rightStatus);
if (leftStatus != SeekStatus.END) {
assertEquals(leftEnum.term(), rightEnum.term());
}
}
}
/** checks collection-level statistics on Terms */
public void assertTermsStatistics(Terms leftTerms, Terms rightTerms) throws Exception {
assertEquals(leftTerms.getDocCount(), rightTerms.getDocCount());
assertEquals(leftTerms.getSumDocFreq(), rightTerms.getSumDocFreq());
if (leftTerms.hasFreqs() && rightTerms.hasFreqs()) {
assertEquals(leftTerms.getSumTotalTermFreq(), rightTerms.getSumTotalTermFreq());
}
if (leftTerms.size() != -1 && rightTerms.size() != -1) {
assertEquals(leftTerms.size(), rightTerms.size());
}
}
/**
* checks the terms | BlockPostingsFormat3Tests |
java | spring-projects__spring-framework | spring-beans/src/test/java/org/springframework/beans/factory/aot/BeanDefinitionPropertyValueCodeGeneratorDelegatesTests.java | {
"start": 10138,
"end": 10666
} | class ____ {
@Test
void generateWhenManagedMap() {
ManagedMap<String, String> map = new ManagedMap<>();
map.put("k1", "v1");
map.put("k2", "v2");
compile(map, (instance, compiler) -> assertThat(instance).isEqualTo(map)
.isInstanceOf(ManagedMap.class));
}
@Test
void generateWhenEmptyManagedMap() {
ManagedMap<String, String> map = new ManagedMap<>();
compile(map, (instance, compiler) -> assertThat(instance).isEqualTo(map)
.isInstanceOf(ManagedMap.class));
}
}
@Nested
| ManagedMapTests |
java | apache__camel | components/camel-jolt/src/test/java/org/apache/camel/component/jolt/JoltRemovrTest.java | {
"start": 1233,
"end": 2248
} | class ____ extends CamelTestSupport {
@Test
public void testFirstSampleJolt() {
Exchange exchange = template.request("direct://start", exchange1 -> {
Map<String, String> body = new HashMap<>();
body.put("keepMe", "This should still be in the result");
body.put("Hello", "World");
body.put("removeMe", "This should be gone");
exchange1.getIn().setBody(body);
});
assertEquals(2, exchange.getMessage().getBody(Map.class).size());
assertNull(exchange.getMessage().getBody(Map.class).get("removeMe"));
assertEquals("World", exchange.getMessage().getBody(Map.class).get("Hello"));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("direct://start")
.to("jolt:org/apache/camel/component/jolt/removr.json?transformDsl=Removr");
}
};
}
}
| JoltRemovrTest |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/asm/SymbolTable.java | {
"start": 2576,
"end": 2681
} | class ____ which this symbol table belongs. */
private int majorVersion;
/** The internal name of the | to |
java | hibernate__hibernate-orm | tooling/metamodel-generator/src/test/java/org/hibernate/processor/test/targetannotation/House.java | {
"start": 351,
"end": 584
} | class ____ {
@Id
long id;
@Embedded
@TargetEmbeddable(AddressImpl.class)
private Address address;
public Address getAddress() {
return address;
}
public void setAddress(Address address) {
this.address = address;
}
}
| House |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/generics/GenericMappedSuperclassNestedJoinTest.java | {
"start": 5927,
"end": 6081
} | class ____ extends SeqOrderLinkObjectWithUserContext<Selection> {
private Integer ident;
}
@MappedSuperclass
public abstract static | SelectionProductRule |
java | apache__hadoop | hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/ReduceAttempt20LineHistoryEventEmitter.java | {
"start": 1680,
"end": 3262
} | class ____ extends
SingleEventEmitter {
HistoryEvent maybeEmitEvent(ParsedLine line, String taskAttemptIDName,
HistoryEventEmitter thatg) {
if (taskAttemptIDName == null) {
return null;
}
TaskAttemptID taskAttemptID = TaskAttemptID.forName(taskAttemptIDName);
String finishTime = line.get("FINISH_TIME");
String status = line.get("TASK_STATUS");
if (finishTime != null && status != null
&& status.equalsIgnoreCase("success")) {
String hostName = line.get("HOSTNAME");
String counters = line.get("COUNTERS");
String state = line.get("STATE_STRING");
String shuffleFinish = line.get("SHUFFLE_FINISHED");
String sortFinish = line.get("SORT_FINISHED");
if (shuffleFinish != null && sortFinish != null
&& "success".equalsIgnoreCase(status)) {
ReduceAttempt20LineHistoryEventEmitter that =
(ReduceAttempt20LineHistoryEventEmitter) thatg;
return new ReduceAttemptFinishedEvent
(taskAttemptID,
that.originalTaskType, status,
Long.parseLong(shuffleFinish),
Long.parseLong(sortFinish),
Long.parseLong(finishTime),
hostName, -1, null,
state, maybeParseCounters(counters),
null);
}
}
return null;
}
}
@Override
List<SingleEventEmitter> finalSEEs() {
return finals;
}
@Override
List<SingleEventEmitter> nonFinalSEEs() {
return nonFinals;
}
}
| ReduceAttemptFinishedEventEmitter |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/event/service/spi/EventListenerRegistrationException.java | {
"start": 276,
"end": 525
} | class ____ extends HibernateException {
public EventListenerRegistrationException(String s) {
super( s );
}
public EventListenerRegistrationException(String string, Throwable root) {
super( string, root );
}
}
| EventListenerRegistrationException |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/spi/InFlightMetadataCollector.java | {
"start": 2784,
"end": 13752
} | interface ____ extends MetadataImplementor {
BootstrapContext getBootstrapContext();
/**
* @deprecated Use {@linkplain BootstrapContext#getModelsContext()} instead.
*/
@Deprecated
default ModelsContext getModelsContext() {
return getBootstrapContext().getModelsContext();
}
default ClassDetailsRegistry getClassDetailsRegistry() {
return getBootstrapContext().getModelsContext().getClassDetailsRegistry();
}
default AnnotationDescriptorRegistry getAnnotationDescriptorRegistry() {
return getBootstrapContext().getModelsContext().getAnnotationDescriptorRegistry();
}
GlobalRegistrations getGlobalRegistrations();
PersistenceUnitMetadata getPersistenceUnitMetadata();
/**
* Add the {@link PersistentClass} for an entity mapping.
*
* @param persistentClass The entity metadata
*
* @throws DuplicateMappingException Indicates there was already an entry
* corresponding to the given entity name.
*/
void addEntityBinding(PersistentClass persistentClass) throws DuplicateMappingException;
/**
* A map of {@link PersistentClass} by entity name.
* Needed for {@link SecondPass} handling.
*/
Map<String, PersistentClass> getEntityBindingMap();
void registerComponent(Component component);
void registerGenericComponent(Component component);
void registerEmbeddableSubclass(ClassDetails superclass, ClassDetails subclass);
List<ClassDetails> getEmbeddableSubclasses(ClassDetails superclass);
/**
* Adds an import (for use in HQL).
*
* @param importName The name to be used in HQL
* @param className The fully-qualified name of the class
*
* @throws DuplicateMappingException If className already is mapped to another
* entity name in this repository.
*/
void addImport(String importName, String className) throws DuplicateMappingException;
/**
* Add collection mapping metadata to this repository.
*
* @param collection The collection metadata
*
* @throws DuplicateMappingException Indicates there was already an entry
* corresponding to the given collection role
*/
void addCollectionBinding(Collection collection) throws DuplicateMappingException;
/**
* Adds table metadata to this repository returning the created
* metadata instance.
*
* @param schema The named schema in which the table belongs (or null).
* @param catalog The named catalog in which the table belongs (or null).
* @param name The table name
* @param subselect A select statement which defines a logical table, much
* like a DB view.
* @param isAbstract Is the table abstract (i.e. not really existing in the DB)?
*
* @return The created table metadata, or the existing reference.
*/
Table addTable(
String schema,
String catalog,
String name,
String subselect,
boolean isAbstract,
MetadataBuildingContext buildingContext);
/**
* Adds a 'denormalized table' to this repository.
*
* @param schema The named schema in which the table belongs (or null).
* @param catalog The named catalog in which the table belongs (or null).
* @param name The table name
* @param isAbstract Is the table abstract (i.e. not really existing in the DB)?
* @param subselect A select statement which defines a logical table, much
* like a DB view.
* @param includedTable The "common" table
*
* @return The created table metadata.
*
* @throws DuplicateMappingException If such a table mapping already exists.
*/
Table addDenormalizedTable(
String schema,
String catalog,
String name,
boolean isAbstract,
String subselect,
Table includedTable,
MetadataBuildingContext buildingContext) throws DuplicateMappingException;
/**
* Adds metadata for a named query to this repository.
*
* @param query The metadata
*
* @throws DuplicateMappingException If a query already exists with that name.
*/
void addNamedQuery(NamedHqlQueryDefinition<?> query) throws DuplicateMappingException;
/**
* Adds metadata for a named SQL query to this collector.
*/
void addNamedNativeQuery(NamedNativeQueryDefinition<?> query) throws DuplicateMappingException;
/**
* Adds the metadata for a named SQL result set mapping to this collector.
*/
void addResultSetMapping(NamedResultSetMappingDescriptor resultSetMappingDefinition) throws DuplicateMappingException;
/**
* Adds metadata for a named stored procedure call to this collector.
*/
void addNamedProcedureCallDefinition(NamedProcedureCallDefinition definition) throws DuplicateMappingException;
/**
* Adds metadata for a named entity graph to this repository
*
* @param namedEntityGraphDefinition The procedure call information
*
* @throws DuplicateMappingException If an entity graph already exists with that name.
*/
void addNamedEntityGraph(NamedEntityGraphDefinition namedEntityGraphDefinition);
/**
* Adds a type definition to this metadata repository.
*
* @param typeDefinition The named type definition to add.
*
* @throws DuplicateMappingException If a {@link TypeDefinition} already exists with that name.
*
* @deprecated Use {@link #getTypeDefinitionRegistry()} instead
*
* @see #getTypeDefinitionRegistry()
*/
@Deprecated
void addTypeDefinition(TypeDefinition typeDefinition);
/**
* Access to the {@link TypeDefinitionRegistry}, which may be used to add
* type definitions to this metadata repository.
*/
TypeDefinitionRegistry getTypeDefinitionRegistry();
/**
* Adds a filter definition to this repository.
*
* @param definition The filter definition to add.
*
* @throws DuplicateMappingException If a {@link FilterDefinition} already exists with that name.
*/
void addFilterDefinition(FilterDefinition definition);
/**
* Add metadata pertaining to an auxiliary database object to this repository.
*
* @param auxiliaryDatabaseObject The metadata.
*/
void addAuxiliaryDatabaseObject(AuxiliaryDatabaseObject auxiliaryDatabaseObject);
/**
* Add a {@link FetchProfile}.
*/
void addFetchProfile(FetchProfile profile);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// make sure these are account for better in metamodel
void addIdentifierGenerator(IdentifierGeneratorDefinition generatorDefinition);
/**
* Obtain the {@link ConverterRegistry} which may be
* used to register {@link AttributeConverter}s.
*/
ConverterRegistry getConverterRegistry();
/**
* Apply the descriptor for an {@link AttributeConverter}
*
* @deprecated use {@link #getConverterRegistry()}
*/
@Deprecated(since = "6.2")
void addAttributeConverter(ConverterDescriptor<?,?> descriptor);
/**
* Apply an {@link AttributeConverter}
*
* @deprecated use {@link #getConverterRegistry()}
*/
@Deprecated(since = "6.2")
void addAttributeConverter(Class<? extends AttributeConverter<?, ?>> converterClass);
/**
* @deprecated use {@link #getConverterRegistry()}
*/
@Deprecated(since = "6.2")
void addRegisteredConversion(RegisteredConversion conversion);
/**
* @deprecated use {@link #getConverterRegistry()}
*/
@Deprecated(since = "6.2")
ConverterAutoApplyHandler getAttributeConverterAutoApplyHandler();
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// second passes
void addSecondPass(SecondPass secondPass);
void addSecondPass(SecondPass sp, boolean onTopOfTheQueue);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// stuff needed for annotation binding :(
void addTableNameBinding(Identifier logicalName, Table table);
void addTableNameBinding(
String schema,
String catalog,
String logicalName,
String realTableName,
Table denormalizedSuperTable);
String getLogicalTableName(Table ownerTable);
String getPhysicalTableName(Identifier logicalName);
String getPhysicalTableName(String logicalName);
void addColumnNameBinding(Table table, Identifier logicalColumnName, Column column);
void addColumnNameBinding(Table table, String logicalColumnName, Column column);
String getPhysicalColumnName(Table table, Identifier logicalName) throws MappingException;
String getPhysicalColumnName(Table table, String logicalName) throws MappingException;
String getLogicalColumnName(Table table, Identifier physicalName);
String getLogicalColumnName(Table table, String physicalName);
void addDefaultIdentifierGenerator(IdentifierGeneratorDefinition generatorDefinition);
void addDefaultQuery(NamedHqlQueryDefinition<?> queryDefinition);
void addDefaultNamedNativeQuery(NamedNativeQueryDefinition<?> query);
void addDefaultResultSetMapping(NamedResultSetMappingDescriptor definition);
void addDefaultNamedProcedureCall(NamedProcedureCallDefinitionImpl procedureCallDefinition);
AnnotatedClassType addClassType(ClassDetails classDetails);
AnnotatedClassType getClassType(ClassDetails classDetails);
void addMappedSuperclass(Class<?> type, MappedSuperclass mappedSuperclass);
MappedSuperclass getMappedSuperclass(Class<?> type);
PropertyData getPropertyAnnotatedWithMapsId(ClassDetails persistentClassDetails, String propertyName);
void addPropertyAnnotatedWithMapsId(ClassDetails entityClassDetails, PropertyData propertyAnnotatedElement);
void addToOneAndIdProperty(ClassDetails entityClassDetails, PropertyData propertyAnnotatedElement);
PropertyData getPropertyAnnotatedWithIdAndToOne(ClassDetails persistentClassDetails, String propertyName);
boolean isInSecondPass();
NaturalIdUniqueKeyBinder locateNaturalIdUniqueKeyBinder(String entityName);
void registerNaturalIdUniqueKeyBinder(String entityName, NaturalIdUniqueKeyBinder ukBinder);
void registerValueMappingResolver(Function<MetadataBuildingContext,Boolean> resolver);
void addJavaTypeRegistration(Class<?> javaType, JavaType<?> jtd);
void addJdbcTypeRegistration(int typeCode, JdbcType jdbcType);
void registerEmbeddableInstantiator(Class<?> embeddableType, Class<? extends EmbeddableInstantiator> instantiator);
Class<? extends EmbeddableInstantiator> findRegisteredEmbeddableInstantiator(Class<?> embeddableType);
void registerCompositeUserType(Class<?> embeddableType, Class<? extends CompositeUserType<?>> userType);
Class<? extends CompositeUserType<?>> findRegisteredCompositeUserType(Class<?> embeddableType);
void registerUserType(Class<?> embeddableType, Class<? extends UserType<?>> userType);
Class<? extends UserType<?>> findRegisteredUserType(Class<?> basicType);
@Deprecated(since = "7.2", forRemoval = true) // let's not leak annotation types onto this SPI
void addCollectionTypeRegistration(org.hibernate.annotations.CollectionTypeRegistration registrationAnnotation);
void addCollectionTypeRegistration(CollectionClassification classification, CollectionTypeRegistrationDescriptor descriptor);
CollectionTypeRegistrationDescriptor findCollectionTypeRegistration(CollectionClassification classification);
| InFlightMetadataCollector |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/TaskExecutorPartitionTrackerImplTest.java | {
"start": 13147,
"end": 15833
} | class ____
implements ShuffleEnvironment<ResultPartition, SingleInputGate> {
private final ShuffleEnvironment<ResultPartition, SingleInputGate>
backingShuffleEnvironment = new NettyShuffleEnvironmentBuilder().build();
CompletableFuture<Collection<ResultPartitionID>> releasePartitionsLocallyFuture = null;
@Override
public int start() throws IOException {
return backingShuffleEnvironment.start();
}
@Override
public ShuffleIOOwnerContext createShuffleIOOwnerContext(
String ownerName, ExecutionAttemptID executionAttemptID, MetricGroup parentGroup) {
return backingShuffleEnvironment.createShuffleIOOwnerContext(
ownerName, executionAttemptID, parentGroup);
}
@Override
public List<ResultPartition> createResultPartitionWriters(
ShuffleIOOwnerContext ownerContext,
List<ResultPartitionDeploymentDescriptor> resultPartitionDeploymentDescriptors) {
return backingShuffleEnvironment.createResultPartitionWriters(
ownerContext, resultPartitionDeploymentDescriptors);
}
@Override
public void releasePartitionsLocally(Collection<ResultPartitionID> partitionIds) {
backingShuffleEnvironment.releasePartitionsLocally(partitionIds);
if (releasePartitionsLocallyFuture != null) {
releasePartitionsLocallyFuture.complete(partitionIds);
}
}
@Override
public Collection<ResultPartitionID> getPartitionsOccupyingLocalResources() {
return backingShuffleEnvironment.getPartitionsOccupyingLocalResources();
}
@Override
public List<SingleInputGate> createInputGates(
ShuffleIOOwnerContext ownerContext,
PartitionProducerStateProvider partitionProducerStateProvider,
List<InputGateDeploymentDescriptor> inputGateDeploymentDescriptors) {
return backingShuffleEnvironment.createInputGates(
ownerContext, partitionProducerStateProvider, inputGateDeploymentDescriptors);
}
@Override
public boolean updatePartitionInfo(
ExecutionAttemptID consumerID, PartitionInfo partitionInfo)
throws IOException, InterruptedException {
return backingShuffleEnvironment.updatePartitionInfo(consumerID, partitionInfo);
}
@Override
public void close() throws Exception {
backingShuffleEnvironment.close();
}
}
private static | TestingShuffleEnvironment |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequestTests.java | {
"start": 1092,
"end": 2625
} | class ____ extends ESTestCase {
public void testIndexNameCannotBeNullOrEmpty() {
expectThrows(IllegalArgumentException.class, () -> new SimulateIndexTemplateRequest(TEST_REQUEST_TIMEOUT, null));
expectThrows(IllegalArgumentException.class, () -> new SimulateIndexTemplateRequest(TEST_REQUEST_TIMEOUT, ""));
}
public void testAddingGlobalTemplateWithHiddenIndexSettingIsIllegal() {
Template template = new Template(Settings.builder().put(IndexMetadata.SETTING_INDEX_HIDDEN, true).build(), null, null);
ComposableIndexTemplate globalTemplate = ComposableIndexTemplate.builder().indexPatterns(List.of("*")).template(template).build();
TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request("test");
request.indexTemplate(globalTemplate);
SimulateIndexTemplateRequest simulateRequest = new SimulateIndexTemplateRequest(TEST_REQUEST_TIMEOUT, "testing");
simulateRequest.indexTemplateRequest(request);
ActionRequestValidationException validationException = simulateRequest.validate();
assertThat(validationException, is(notNullValue()));
List<String> validationErrors = validationException.validationErrors();
assertThat(validationErrors.size(), is(1));
String error = validationErrors.get(0);
assertThat(error, is("global composable templates may not specify the setting " + IndexMetadata.SETTING_INDEX_HIDDEN));
}
}
| SimulateIndexTemplateRequestTests |
java | quarkusio__quarkus | integration-tests/spring-data-rest/src/main/java/io/quarkus/it/spring/data/rest/AuthorsRepository.java | {
"start": 282,
"end": 600
} | interface ____ extends CrudRepository<Author, Long> {
@Override
@RolesAllowed("user")
Iterable<Author> findAll();
@RestResource(exported = false)
@RolesAllowed("superuser")
<S extends Author> S save(S author);
@RestResource(exported = false)
void deleteById(Long id);
}
| AuthorsRepository |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/sql/ast/spi/SqlAppender.java | {
"start": 306,
"end": 1978
} | interface ____ extends Appendable {
String NO_SEPARATOR = "";
String COMMA_SEPARATOR = ",";
char COMMA_SEPARATOR_CHAR = ',';
char WHITESPACE = ' ';
char OPEN_PARENTHESIS = '(';
char CLOSE_PARENTHESIS = ')';
char PARAM_MARKER = '?';
String NULL_KEYWORD = "null";
/**
* Add the passed fragment into the in-flight buffer
*/
void appendSql(String fragment);
default void appendSql(char fragment) {
appendSql( Character.toString( fragment ) );
}
default void appendSql(int value) {
appendSql( Integer.toString( value ) );
}
default void appendSql(long value) {
appendSql( Long.toString( value ) );
}
default void appendSql(boolean value) {
appendSql( String.valueOf( value ) );
}
default void appendSql(double value) {
appendSql( String.valueOf( value ) );
}
default void appendSql(float value) {
appendSql( String.valueOf( value ) );
}
default void appendDoubleQuoteEscapedString(String value) {
final StringBuilder sb = new StringBuilder( value.length() + 2 );
QuotingHelper.appendDoubleQuoteEscapedString( sb, value );
appendSql( sb.toString() );
}
default void appendSingleQuoteEscapedString(String value) {
final StringBuilder sb = new StringBuilder( value.length() + 2 );
QuotingHelper.appendSingleQuoteEscapedString( sb, value );
appendSql( sb.toString() );
}
default Appendable append(CharSequence csq) {
appendSql( csq.toString() );
return this;
}
default Appendable append(CharSequence csq, int start, int end) {
appendSql( csq.toString().substring( start, end ) );
return this;
}
default Appendable append(char c) {
appendSql( Character.toString( c ) );
return this;
}
}
| SqlAppender |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ser/filter/TestMapFiltering.java | {
"start": 1463,
"end": 1779
} | class ____ {
@JsonFilter("filterX")
public Map<String,Integer> values;
public MapBeanNoOffset() {
values = new LinkedHashMap<String,Integer>();
values.put("a", 1);
values.put("b", 2);
values.put("c", 3);
}
}
static | MapBeanNoOffset |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/records/AuxServiceFile.java | {
"start": 1410,
"end": 1474
} | class ____ {
/**
* Config Type.
**/
public | AuxServiceFile |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/format/EnumNumberFormatShape3580PojoTest.java | {
"start": 982,
"end": 1242
} | enum ____ {
OFF(17),
ON(31),
UNKNOWN(99);
private int value;
PojoStateInt3580(int value) { this.value = value; }
@JsonValue
public int value() {return this.value;}
}
public static | PojoStateInt3580 |
java | google__gson | gson/src/test/java/com/google/gson/functional/JsonAdapterSerializerDeserializerTest.java | {
"start": 2609,
"end": 2736
} | class ____ {
final String name;
private User(String name) {
this.name = name;
}
}
private static final | User |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/secondarytable/SecondaryTableQuotingTest.java | {
"start": 2055,
"end": 2205
} | class ____ {
@Id
private Long id;
private String name;
@Column(name = "bar_value", table = "bar")
private Long barValue;
}
public static | Foo |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneQueryEvaluatorTests.java | {
"start": 2531,
"end": 12734
} | class ____<T extends Block, U extends Block.Builder> extends ComputeTestCase {
private static final String FIELD = "g";
@SuppressWarnings("unchecked")
public void testDenseCollectorSmall() throws IOException {
try (LuceneQueryEvaluator.DenseCollector<U> collector = createDenseCollector(0, 2)) {
collector.setScorer(getScorer());
collector.collect(0);
collector.collect(1);
collector.collect(2);
collector.finish();
try (T result = (T) collector.build()) {
for (int i = 0; i <= 2; i++) {
assertCollectedResultMatch(result, i, true);
}
}
}
}
@SuppressWarnings("unchecked")
public void testDenseCollectorSimple() throws IOException {
try (LuceneQueryEvaluator.DenseCollector<U> collector = createDenseCollector(0, 10)) {
collector.setScorer(getScorer());
collector.collect(2);
collector.collect(5);
collector.finish();
try (T result = (T) collector.build()) {
for (int i = 0; i < 11; i++) {
assertCollectedResultMatch(result, i, i == 2 || i == 5);
}
}
}
}
@SuppressWarnings("unchecked")
public void testDenseCollector() throws IOException {
int length = between(1, 10_000);
int min = between(0, Integer.MAX_VALUE - length - 1);
int max = min + length;
boolean[] expected = new boolean[length];
try (LuceneQueryEvaluator.DenseCollector<U> collector = createDenseCollector(min, max)) {
collector.setScorer(getScorer());
for (int i = 0; i < length; i++) {
expected[i] = randomBoolean();
if (expected[i]) {
collector.collect(min + i);
}
}
collector.finish();
try (T result = (T) collector.build()) {
for (int i = 0; i < length; i++) {
assertCollectedResultMatch(result, i, expected[i]);
}
}
}
}
/**
* Create a dense collector for the given range.
*/
protected abstract LuceneQueryEvaluator.DenseCollector<U> createDenseCollector(int min, int max);
/**
* Chceks that the collected results at the given position corresponds to a match or no match
*/
protected abstract void assertCollectedResultMatch(T resultVector, int position, boolean isMatch);
public void testTermQuery() throws IOException {
Set<String> values = values();
String term = values.iterator().next();
List<Page> results = runQuery(values, new TermQuery(new Term(FIELD, term)), false);
assertTermsQuery(results, Set.of(term), 1);
}
public void testTermQueryShuffled() throws IOException {
Set<String> values = values();
String term = values.iterator().next();
List<Page> results = runQuery(values, new ConstantScoreQuery(new TermQuery(new Term(FIELD, term))), true);
assertTermsQuery(results, Set.of(term), 1);
}
public void testTermsQuery() throws IOException {
testTermsQuery(false);
}
public void testTermsQueryShuffled() throws IOException {
testTermsQuery(true);
}
private void testTermsQuery(boolean shuffleDocs) throws IOException {
Set<String> values = values();
Iterator<String> itr = values.iterator();
TreeSet<String> matching = new TreeSet<>();
TreeSet<BytesRef> matchingBytes = new TreeSet<>();
int expectedMatchCount = between(2, values.size());
for (int i = 0; i < expectedMatchCount; i++) {
String v = itr.next();
matching.add(v);
matchingBytes.add(new BytesRef(v));
}
List<Page> results = runQuery(values, new TermInSetQuery(MultiTermQuery.CONSTANT_SCORE_REWRITE, FIELD, matchingBytes), shuffleDocs);
assertTermsQuery(results, matching, expectedMatchCount);
}
protected void assertTermsQuery(List<Page> results, Set<String> matching, int expectedMatchCount) {
int matchCount = 0;
for (Page page : results) {
int initialBlockIndex = termsBlockIndex(page);
BytesRefBlock terms = page.<BytesRefBlock>getBlock(initialBlockIndex);
@SuppressWarnings("unchecked")
T resultVector = (T) page.getBlock(resultsBlockIndex(page));
for (int i = 0; i < page.getPositionCount(); i++) {
BytesRef termAtPosition = terms.getBytesRef(i, new BytesRef());
boolean isMatch = matching.contains(termAtPosition.utf8ToString());
assertTermResultMatch(resultVector, i, isMatch);
if (isMatch) {
matchCount++;
}
}
}
assertThat(matchCount, equalTo(expectedMatchCount));
}
/**
* Checks that the result at the given position corresponds to a term match or no match
*/
protected abstract void assertTermResultMatch(T resultVector, int position, boolean isMatch);
private List<Page> runQuery(Set<String> values, Query query, boolean shuffleDocs) throws IOException {
DriverContext driverContext = driverContext();
BlockFactory blockFactory = driverContext.blockFactory();
return withReader(values, reader -> {
IndexSearcher searcher = new IndexSearcher(reader);
var shardContext = new LuceneSourceOperatorTests.MockShardContext(reader, 0);
LuceneQueryEvaluator.ShardConfig shard = new LuceneQueryEvaluator.ShardConfig(searcher.rewrite(query), searcher);
List<Operator> operators = new ArrayList<>();
if (shuffleDocs) {
operators.add(new ShuffleDocsOperator(blockFactory));
}
operators.add(
new ValuesSourceReaderOperator(
blockFactory,
ByteSizeValue.ofGb(1).getBytes(),
List.of(
new ValuesSourceReaderOperator.FieldInfo(
FIELD,
ElementType.BYTES_REF,
false,
unused -> new BytesRefsFromOrdsBlockLoader(FIELD)
)
),
new IndexedByShardIdFromSingleton<>(new ValuesSourceReaderOperator.ShardContext(reader, (sourcePaths) -> {
throw new UnsupportedOperationException();
}, 0.2)),
0
)
);
var shardConfig = new IndexedByShardIdFromSingleton<>(new LuceneQueryEvaluator.ShardConfig(searcher.rewrite(query), searcher));
operators.add(createOperator(driverContext, shardConfig));
List<Page> results = new ArrayList<>();
Driver driver = TestDriverFactory.create(
driverContext,
LuceneQueryEvaluatorTests.luceneOperatorFactory(reader, new MatchAllDocsQuery(), usesScoring()).get(driverContext),
operators,
new TestResultPageSinkOperator(results::add)
);
OperatorTestCase.runDriver(driver);
OperatorTests.assertDriverContext(driverContext);
return results;
});
}
private <T> T withReader(Set<String> values, CheckedFunction<DirectoryReader, T, IOException> run) throws IOException {
try (BaseDirectoryWrapper dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir)) {
for (String value : values) {
writer.addDocument(List.of(new KeywordField(FIELD, value, Field.Store.NO)));
}
writer.commit();
try (DirectoryReader reader = writer.getReader()) {
return run.apply(reader);
}
}
}
private Set<String> values() {
int maxNumDocs = between(10, 1_000);
int keyLength = randomIntBetween(1, 10);
Set<String> values = new HashSet<>();
for (int i = 0; i < maxNumDocs; i++) {
values.add(randomAlphaOfLength(keyLength));
}
return values;
}
/**
* A {@link DriverContext} with a non-breaking-BigArrays.
*/
private DriverContext driverContext() {
BlockFactory blockFactory = blockFactory();
return new DriverContext(blockFactory.bigArrays(), blockFactory);
}
// Returns the initial block index, ignoring the score block if scoring is enabled
protected int termsBlockIndex(Page page) {
assert page.getBlock(0) instanceof DocBlock : "expected doc block at index 0";
if (usesScoring()) {
assert page.getBlock(1) instanceof DoubleBlock : "expected double block at index 1";
return 2;
} else {
return 1;
}
}
private static LuceneOperator.Factory luceneOperatorFactory(IndexReader reader, Query query, boolean scoring) {
final ShardContext searchContext = new LuceneSourceOperatorTests.MockShardContext(reader, 0);
return new LuceneSourceOperator.Factory(
new IndexedByShardIdFromSingleton<>(searchContext),
ctx -> List.of(new LuceneSliceQueue.QueryAndTags(query, List.of())),
randomFrom(DataPartitioning.values()),
DataPartitioning.AutoStrategy.DEFAULT,
randomIntBetween(1, 10),
randomPageSize(),
LuceneOperator.NO_LIMIT,
scoring
);
}
// Returns the block index for the results to check
protected abstract int resultsBlockIndex(Page page);
/**
* Returns a test scorer to use for scoring docs. Can be null
*/
protected abstract Scorable getScorer();
/**
* Create the operator to test
*/
protected abstract Operator createOperator(DriverContext blockFactory, IndexedByShardId<LuceneQueryEvaluator.ShardConfig> shards);
/**
* Should the test use scoring?
*/
protected abstract boolean usesScoring();
}
| LuceneQueryEvaluatorTests |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/api/connector/sink2/StandardSinkTopologies.java | {
"start": 1450,
"end": 3008
} | class ____ {
public static final String GLOBAL_COMMITTER_TRANSFORMATION_NAME = "Global Committer";
private StandardSinkTopologies() {}
/**
* Adds a global committer to the pipeline that runs as final operator with a parallelism of
* one.
*/
public static <CommT> void addGlobalCommitter(
DataStream<CommittableMessage<CommT>> committables,
SerializableFunction<CommitterInitContext, Committer<CommT>> committerFactory,
SerializableSupplier<SimpleVersionedSerializer<CommT>> committableSerializer) {
committables
.getExecutionEnvironment()
.addOperator(
new GlobalCommitterTransform<>(
committables, committerFactory, committableSerializer));
}
/**
* Adds a global committer to the pipeline that runs as final operator with a parallelism of
* one.
*/
public static <CommT> void addGlobalCommitter(
DataStream<CommittableMessage<CommT>> committables,
SerializableSupplier<Committer<CommT>> committerFactory,
SerializableSupplier<SimpleVersionedSerializer<CommT>> committableSerializer) {
committables
.getExecutionEnvironment()
.addOperator(
new GlobalCommitterTransform<>(
committables,
ctx -> committerFactory.get(),
committableSerializer));
}
}
| StandardSinkTopologies |
java | dropwizard__dropwizard | dropwizard-testing/src/test/java/io/dropwizard/testing/junit5/LogbackExcludedTest.java | {
"start": 2763,
"end": 3515
} | class ____.dropwizard.configuration.ConfigurationMetadata$1: Type ch.qos.logback.access.common.spi.IAccessEvent
// not present" to be emitted to stderr
String err = byteStream.toString();
assertThat(err).isEmpty();
}
}
/**
* Replace stderr with a byte-backed stream until the returned stream is closed.
*/
private static ByteArrayOutputStream captureStderr() {
PrintStream err = System.err;
ByteArrayOutputStream byteStream = new ByteArrayOutputStream() {
@Override
public void close() {
System.setErr(err);
}
};
System.setErr(new PrintStream(byteStream));
return byteStream;
}
private | io |
java | google__dagger | javatests/dagger/internal/codegen/MissingBindingValidationTest.java | {
"start": 52016,
"end": 52351
} | interface ____ {",
" RepeatedSub getSub();",
"}");
Source child2 =
CompilerTests.javaSource(
"test.Child2",
"package test;",
"",
"import dagger.Subcomponent;",
"",
"@Subcomponent(modules = Child2Module.class)",
" | Child1 |
java | elastic__elasticsearch | x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlTranslateResponseTests.java | {
"start": 512,
"end": 1874
} | class ____ extends AbstractWireSerializingTestCase<SqlTranslateResponse> {
@Override
protected SqlTranslateResponse createTestInstance() {
SearchSourceBuilder s = new SearchSourceBuilder();
if (randomBoolean()) {
long docValues = iterations(5, 10);
for (int i = 0; i < docValues; i++) {
s.docValueField(randomAlphaOfLength(10));
}
}
if (randomBoolean()) {
long sourceFields = iterations(5, 10);
for (int i = 0; i < sourceFields; i++) {
s.storedField(randomAlphaOfLength(10));
}
}
s.fetchSource(randomBoolean()).from(randomInt(256)).explain(randomBoolean()).size(randomInt(256));
return new SqlTranslateResponse(s);
}
@Override
protected Writeable.Reader<SqlTranslateResponse> instanceReader() {
return SqlTranslateResponse::new;
}
@Override
protected SqlTranslateResponse mutateInstance(SqlTranslateResponse instance) throws IOException {
SqlTranslateResponse sqlTranslateResponse = copyInstance(instance);
SearchSourceBuilder source = sqlTranslateResponse.source();
source.size(randomValueOtherThan(source.size(), () -> between(0, Integer.MAX_VALUE)));
return new SqlTranslateResponse(source);
}
}
| SqlTranslateResponseTests |
java | spring-projects__spring-boot | configuration-metadata/spring-boot-configuration-processor/src/test/java/org/springframework/boot/configurationsample/source/generation/ConfigurationPropertySourcesContainer.java | {
"start": 892,
"end": 1128
} | class ____ {
/**
* A name.
*/
private String name;
public String getName() {
return this.name;
}
public void setName(String name) {
this.name = name;
}
}
@TestConfigurationPropertiesSource
public static | First |
java | apache__flink | flink-state-backends/flink-statebackend-forst/src/test/java/org/apache/flink/state/forst/ForStAsyncAndSyncCompatibilityTest.java | {
"start": 3255,
"end": 13542
} | class ____ {
protected ForStStateBackend forStStateBackend;
protected StateExecutionController<String> aec;
protected MailboxExecutor mailboxExecutor;
protected RecordContext<String> context;
protected MockEnvironment env;
@BeforeEach
public void setup(@TempDir File temporaryFolder) throws IOException {
FileSystem.initialize(new Configuration(), null);
Configuration configuration = new Configuration();
configuration.set(ForStOptions.PRIMARY_DIRECTORY, temporaryFolder.toURI().toString());
forStStateBackend = new ForStStateBackend().configure(configuration, null);
env = getMockEnvironment(temporaryFolder);
mailboxExecutor =
new MailboxExecutorImpl(
new TaskMailboxImpl(), 0, StreamTaskActionExecutor.IMMEDIATE);
}
@Test
void testForStTransFromAsyncToSync() throws Exception {
ForStKeyedStateBackend<String> keyedBackend =
setUpAsyncKeyedStateBackend(Collections.emptyList());
MapStateDescriptor<Integer, String> mapDescriptor =
new MapStateDescriptor<>(
"testState", IntSerializer.INSTANCE, StringSerializer.INSTANCE);
MapState<Integer, String> asyncMapState =
keyedBackend.createState(1, IntSerializer.INSTANCE, mapDescriptor);
ValueStateDescriptor<Integer> valueDescriptor =
new ValueStateDescriptor<>("valueState", IntSerializer.INSTANCE);
ValueState<Integer> asyncValueState =
keyedBackend.createState(1, IntSerializer.INSTANCE, valueDescriptor);
ListStateDescriptor<Integer> listDescriptor =
new ListStateDescriptor<>("listState", IntSerializer.INSTANCE);
ListState<Integer> asyncListState =
keyedBackend.createState(1, IntSerializer.INSTANCE, listDescriptor);
context = aec.buildContext("testRecord", "testKey");
context.retain();
aec.setCurrentContext(context);
asyncMapState.asyncPut(1, "1");
asyncValueState.asyncUpdate(1);
asyncListState.asyncUpdate(Arrays.asList(40, 50));
context.release();
aec.drainInflightRecords(0);
RunnableFuture<SnapshotResult<KeyedStateHandle>> snapshot =
keyedBackend.snapshot(
1L,
System.currentTimeMillis(),
env.getCheckpointStorageAccess()
.resolveCheckpointStorageLocation(
1L, CheckpointStorageLocationReference.getDefault()),
CheckpointOptions.forCheckpointWithDefaultLocation());
if (!snapshot.isDone()) {
snapshot.run();
}
SnapshotResult<KeyedStateHandle> snapshotResult = snapshot.get();
KeyedStateHandle stateHandle = snapshotResult.getJobManagerOwnedSnapshot();
IOUtils.closeQuietly(keyedBackend);
ForStSyncKeyedStateBackend<String> syncKeyedStateBackend =
createSyncKeyedStateBackend(
forStStateBackend,
env,
StringSerializer.INSTANCE,
Collections.singletonList(stateHandle));
try {
org.apache.flink.api.common.state.MapState<Integer, String> syncMapState =
syncKeyedStateBackend.getOrCreateKeyedState(
IntSerializer.INSTANCE,
StateDescriptorUtils.transformFromV2ToV1(mapDescriptor));
org.apache.flink.api.common.state.ValueState<Integer> syncValueState =
syncKeyedStateBackend.getOrCreateKeyedState(
IntSerializer.INSTANCE,
StateDescriptorUtils.transformFromV2ToV1(valueDescriptor));
org.apache.flink.api.common.state.ListState<Integer> syncListState =
syncKeyedStateBackend.getOrCreateKeyedState(
IntSerializer.INSTANCE,
StateDescriptorUtils.transformFromV2ToV1(listDescriptor));
syncKeyedStateBackend.setCurrentKey("testKey");
((InternalKvState) syncMapState).setCurrentNamespace(1);
assertThat(syncMapState.get(1)).isEqualTo("1");
((InternalKvState) syncValueState).setCurrentNamespace(1);
assertThat(syncValueState.value()).isEqualTo(1);
((InternalKvState) syncListState).setCurrentNamespace(1);
assertThat(syncListState.get()).isEqualTo(Arrays.asList(40, 50));
} finally {
IOUtils.closeQuietly(syncKeyedStateBackend);
}
}
@Test
void testForStTransFromSyncToAsync() throws Exception {
ForStSyncKeyedStateBackend<String> keyedBackend =
createSyncKeyedStateBackend(
forStStateBackend, env, StringSerializer.INSTANCE, Collections.emptyList());
org.apache.flink.api.common.state.MapStateDescriptor<Integer, String> descriptor =
new org.apache.flink.api.common.state.MapStateDescriptor<>(
"mapState", IntSerializer.INSTANCE, StringSerializer.INSTANCE);
org.apache.flink.api.common.state.MapState<Integer, String> mapState =
keyedBackend.getOrCreateKeyedState(IntSerializer.INSTANCE, descriptor);
org.apache.flink.api.common.state.ValueStateDescriptor<Integer> valueDescriptor =
new org.apache.flink.api.common.state.ValueStateDescriptor<>(
"valueState", IntSerializer.INSTANCE);
org.apache.flink.api.common.state.ValueState<Integer> valueState =
keyedBackend.getOrCreateKeyedState(IntSerializer.INSTANCE, valueDescriptor);
org.apache.flink.api.common.state.ListStateDescriptor<Integer> listStateDescriptor =
new org.apache.flink.api.common.state.ListStateDescriptor<>(
"listState", IntSerializer.INSTANCE);
org.apache.flink.api.common.state.ListState<Integer> listState =
keyedBackend.getOrCreateKeyedState(IntSerializer.INSTANCE, listStateDescriptor);
keyedBackend.setCurrentKey("testKey");
((InternalKvState) mapState).setCurrentNamespace(1);
mapState.put(1, "1");
((InternalKvState) valueState).setCurrentNamespace(1);
valueState.update(1);
((InternalKvState) listState).setCurrentNamespace(1);
listState.update(Arrays.asList(1, 2));
RunnableFuture<SnapshotResult<KeyedStateHandle>> snapshot =
keyedBackend.snapshot(
1L,
System.currentTimeMillis(),
env.getCheckpointStorageAccess()
.resolveCheckpointStorageLocation(
1L, CheckpointStorageLocationReference.getDefault()),
CheckpointOptions.forCheckpointWithDefaultLocation());
if (!snapshot.isDone()) {
snapshot.run();
}
SnapshotResult<KeyedStateHandle> snapshotResult = snapshot.get();
KeyedStateHandle stateHandle = snapshotResult.getJobManagerOwnedSnapshot();
IOUtils.closeQuietly(keyedBackend);
ForStKeyedStateBackend<String> asyncKeyedStateBackend =
setUpAsyncKeyedStateBackend(Collections.singletonList(stateHandle));
MapStateDescriptor<Integer, String> newMapDescriptor =
new MapStateDescriptor<>(
"mapState", IntSerializer.INSTANCE, StringSerializer.INSTANCE);
ValueStateDescriptor<Integer> newValueDescriptor =
new ValueStateDescriptor<>("valueState", IntSerializer.INSTANCE);
ListStateDescriptor<Integer> newListDescriptor =
new ListStateDescriptor<>("listState", IntSerializer.INSTANCE);
try {
MapState<Integer, String> asyncMapState =
asyncKeyedStateBackend.createState(1, IntSerializer.INSTANCE, newMapDescriptor);
ValueState<Integer> asyncValueState =
asyncKeyedStateBackend.createState(
1, IntSerializer.INSTANCE, newValueDescriptor);
ListState<Integer> asyncListState =
asyncKeyedStateBackend.createState(
1, IntSerializer.INSTANCE, newListDescriptor);
context = aec.buildContext("testRecord", "testKey");
context.retain();
aec.setCurrentContext(context);
asyncMapState
.asyncGet(1)
.thenCompose(
mapValue -> {
assertThat(mapValue).isEqualTo("1");
return asyncValueState.asyncValue();
})
.thenAccept(
value -> {
assertThat(value).isEqualTo(1);
});
assertThat(listState.get()).isEqualTo(Arrays.asList(1, 2));
context.release();
aec.drainInflightRecords(0);
} finally {
IOUtils.closeQuietly(asyncKeyedStateBackend);
}
}
private ForStKeyedStateBackend<String> setUpAsyncKeyedStateBackend(
Collection<KeyedStateHandle> stateHandles) throws IOException {
ForStKeyedStateBackend<String> keyedStateBackend =
createKeyedStateBackend(
forStStateBackend, env, StringSerializer.INSTANCE, stateHandles);
aec =
new StateExecutionController<>(
mailboxExecutor,
(a, b) -> {},
keyedStateBackend.createStateExecutor(),
new DeclarationManager(),
EpochManager.ParallelMode.SERIAL_BETWEEN_EPOCH,
1,
100,
0,
1,
null,
null);
keyedStateBackend.setup(aec);
return keyedStateBackend;
}
}
| ForStAsyncAndSyncCompatibilityTest |
java | apache__flink | flink-core-api/src/main/java/org/apache/flink/util/function/ThrowingRunnable.java | {
"start": 1139,
"end": 1872
} | interface ____<E extends Throwable> {
/**
* The work method.
*
* @throws E Exceptions may be thrown.
*/
void run() throws E;
/**
* Converts a {@link ThrowingRunnable} into a {@link Runnable} which throws all checked
* exceptions as unchecked.
*
* @param throwingRunnable to convert into a {@link Runnable}
* @return {@link Runnable} which throws all checked exceptions as unchecked.
*/
static Runnable unchecked(ThrowingRunnable<?> throwingRunnable) {
return () -> {
try {
throwingRunnable.run();
} catch (Throwable t) {
ThrowingExceptionUtils.rethrow(t);
}
};
}
}
| ThrowingRunnable |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/embeddable/JoinInheritanceSelectJoinTest.java | {
"start": 3671,
"end": 3836
} | class ____ extends Human {
public Parent() {
}
public Parent(String name, Address address) {
super( name, address );
}
}
@Embeddable
public static | Parent |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/loader/ast/internal/CollectionElementLoaderByIndex.java | {
"start": 1084,
"end": 5565
} | class ____ implements Loader {
private final PluralAttributeMapping attributeMapping;
private final SelectStatement sqlAst;
private final JdbcParametersList jdbcParameters;
private final int baseIndex;
private final int keyJdbcCount;
/**
* Shortened form of {@link #CollectionElementLoaderByIndex(PluralAttributeMapping, int, LoadQueryInfluencers, SessionFactoryImplementor)}
* which applied the collection mapping's {@linkplain PluralAttributeMapping.IndexMetadata#getListIndexBase()}
*/
public CollectionElementLoaderByIndex(
PluralAttributeMapping attributeMapping,
LoadQueryInfluencers influencers,
SessionFactoryImplementor sessionFactory) {
this( attributeMapping, attributeMapping.getIndexMetadata().getListIndexBase(), influencers, sessionFactory );
}
/**
* @param baseIndex A base value to apply to the relational index values processed on {@link #incrementIndexByBase}
*/
public CollectionElementLoaderByIndex(
PluralAttributeMapping attributeMapping,
int baseIndex,
LoadQueryInfluencers influencers,
SessionFactoryImplementor sessionFactory) {
this.attributeMapping = attributeMapping;
this.baseIndex = baseIndex;
final var keyDescriptor = attributeMapping.getKeyDescriptor();
final var indexDescriptor = attributeMapping.getIndexDescriptor();
final List<ModelPart> restrictedParts = new ArrayList<>();
restrictedParts.add( keyDescriptor );
if ( indexDescriptor instanceof EntityCollectionPart entityCollectionPart ) {
var identifierMapping =
entityCollectionPart.getEntityMappingType()
.getIdentifierMapping();
restrictedParts.add( identifierMapping );
keyJdbcCount = keyDescriptor.getJdbcTypeCount() + identifierMapping.getJdbcTypeCount();
}
else {
restrictedParts.add( indexDescriptor );
keyJdbcCount = keyDescriptor.getJdbcTypeCount() + indexDescriptor.getJdbcTypeCount();
}
final List<ModelPart> partsToSelect = new ArrayList<>();
partsToSelect.add( attributeMapping.getElementDescriptor() );
final var jdbcParametersBuilder = JdbcParametersList.newBuilder( keyJdbcCount );
sqlAst = LoaderSelectBuilder.createSelect(
attributeMapping,
partsToSelect,
restrictedParts,
null,
1,
influencers,
new LockOptions(),
jdbcParametersBuilder::add,
sessionFactory
);
jdbcParameters = jdbcParametersBuilder.build();
}
@Override
public PluralAttributeMapping getLoadable() {
return getAttributeMapping();
}
public PluralAttributeMapping getAttributeMapping() {
return attributeMapping;
}
public SelectStatement getSqlAst() {
return sqlAst;
}
public JdbcParametersList getJdbcParameters() {
return jdbcParameters;
}
public Object load(Object key, Object index, SharedSessionContractImplementor session) {
final var jdbcParameterBindings = new JdbcParameterBindingsImpl( keyJdbcCount );
int offset = jdbcParameterBindings.registerParametersForEachJdbcValue(
key,
attributeMapping.getKeyDescriptor(),
jdbcParameters,
session
);
offset += jdbcParameterBindings.registerParametersForEachJdbcValue(
incrementIndexByBase( index ),
offset,
attributeMapping.getIndexDescriptor(),
jdbcParameters,
session
);
assert offset == jdbcParameters.size();
final var sessionFactory = session.getFactory();
final var jdbcServices = sessionFactory.getJdbcServices();
final var jdbcSelect =
jdbcServices.getJdbcEnvironment().getSqlAstTranslatorFactory()
.buildSelectTranslator( sessionFactory, sqlAst )
.translate( jdbcParameterBindings, QueryOptions.NONE );
var list = jdbcServices.getJdbcSelectExecutor().list(
jdbcSelect,
jdbcParameterBindings,
new BaseExecutionContext( session ),
RowTransformerStandardImpl.instance(),
null,
ListResultsConsumer.UniqueSemantic.FILTER,
1
);
return list.isEmpty() ? null : list.get( 0 );
}
/**
* If the index being loaded by for a List and the mapping specified a
* {@linkplain org.hibernate.annotations.ListIndexBase base-index}, this will return
* the passed {@code index} value incremented by the base. Otherwise, the passed {@code index}
* is returned.
*
* @param index The relational index value; specifically without any mapped base applied
*
* @return The appropriately incremented base
*/
protected Object incrementIndexByBase(Object index) {
if ( baseIndex > 0 ) {
return (Integer) index + baseIndex;
}
return index;
}
}
| CollectionElementLoaderByIndex |
java | apache__hadoop | hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialsProviderList.java | {
"start": 1404,
"end": 4345
} | class ____ implements
COSCredentialsProvider, AutoCloseable {
private static final Logger LOG =
LoggerFactory.getLogger(COSCredentialsProviderList.class);
private static final String NO_COS_CREDENTIAL_PROVIDERS =
"No COS Credential Providers";
private static final String CREDENTIALS_REQUESTED_WHEN_CLOSED =
"Credentials requested after provider list was closed";
private final List<COSCredentialsProvider> providers =
new ArrayList<COSCredentialsProvider>(1);
private boolean reuseLastProvider = true;
private COSCredentialsProvider lastProvider;
private final AtomicInteger refCount = new AtomicInteger(1);
private final AtomicBoolean isClosed = new AtomicBoolean(false);
public COSCredentialsProviderList() {
}
public COSCredentialsProviderList(
Collection<COSCredentialsProvider> providers) {
this.providers.addAll(providers);
}
public void add(COSCredentialsProvider provider) {
this.providers.add(provider);
}
public int getRefCount() {
return this.refCount.get();
}
public void checkNotEmpty() {
if (this.providers.isEmpty()) {
throw new NoAuthWithCOSException(NO_COS_CREDENTIAL_PROVIDERS);
}
}
public COSCredentialsProviderList share() {
Preconditions.checkState(!this.closed(), "Provider list is closed");
this.refCount.incrementAndGet();
return this;
}
public boolean closed() {
return this.isClosed.get();
}
@Override
public COSCredentials getCredentials() {
if (this.closed()) {
throw new NoAuthWithCOSException(CREDENTIALS_REQUESTED_WHEN_CLOSED);
}
this.checkNotEmpty();
if (this.reuseLastProvider && this.lastProvider != null) {
return this.lastProvider.getCredentials();
}
for (COSCredentialsProvider provider : this.providers) {
COSCredentials credentials = provider.getCredentials();
if (null != credentials
&& !StringUtils.isNullOrEmpty(credentials.getCOSAccessKeyId())
&& !StringUtils.isNullOrEmpty(credentials.getCOSSecretKey())
|| credentials instanceof AnonymousCOSCredentials) {
this.lastProvider = provider;
return credentials;
}
}
throw new NoAuthWithCOSException(
"No COS Credentials provided by " + this.providers.toString());
}
@Override
public void refresh() {
if (this.closed()) {
return;
}
for (COSCredentialsProvider cosCredentialsProvider : this.providers) {
cosCredentialsProvider.refresh();
}
}
@Override
public void close() throws Exception {
if (this.closed()) {
return;
}
int remainder = this.refCount.decrementAndGet();
if (remainder != 0) {
return;
}
this.isClosed.set(true);
for (COSCredentialsProvider provider : this.providers) {
if (provider instanceof Closeable) {
((Closeable) provider).close();
}
}
}
}
| COSCredentialsProviderList |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/AutoConfigurationSorter.java | {
"start": 4548,
"end": 6516
} | class ____ {
private final Map<String, AutoConfigurationClass> classes = new LinkedHashMap<>();
AutoConfigurationClasses(MetadataReaderFactory metadataReaderFactory,
@Nullable AutoConfigurationMetadata autoConfigurationMetadata, Collection<String> classNames) {
addToClasses(metadataReaderFactory, autoConfigurationMetadata, classNames, true);
}
Set<String> getAllNames() {
return this.classes.keySet();
}
private void addToClasses(MetadataReaderFactory metadataReaderFactory,
@Nullable AutoConfigurationMetadata autoConfigurationMetadata, Collection<String> classNames,
boolean required) {
for (String className : classNames) {
if (!this.classes.containsKey(className)) {
AutoConfigurationClass autoConfigurationClass = new AutoConfigurationClass(className,
metadataReaderFactory, autoConfigurationMetadata);
boolean available = autoConfigurationClass.isAvailable();
if (required || available) {
this.classes.put(className, autoConfigurationClass);
}
if (available) {
addToClasses(metadataReaderFactory, autoConfigurationMetadata,
autoConfigurationClass.getBefore(), false);
addToClasses(metadataReaderFactory, autoConfigurationMetadata,
autoConfigurationClass.getAfter(), false);
}
}
}
}
AutoConfigurationClass get(String className) {
AutoConfigurationClass autoConfigurationClass = this.classes.get(className);
Assert.state(autoConfigurationClass != null, "'autoConfigurationClass' must not be null");
return autoConfigurationClass;
}
Set<String> getClassesRequestedAfter(String className) {
Set<String> classesRequestedAfter = new LinkedHashSet<>(get(className).getAfter());
this.classes.forEach((name, autoConfigurationClass) -> {
if (autoConfigurationClass.getBefore().contains(className)) {
classesRequestedAfter.add(name);
}
});
return classesRequestedAfter;
}
}
private | AutoConfigurationClasses |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/common/runtime/src/main/java/org/jboss/resteasy/reactive/common/model/ResourceReader.java | {
"start": 484,
"end": 3556
} | class ____ {
private BeanFactory<MessageBodyReader<?>> factory;
private List<String> mediaTypeStrings = new ArrayList<>();
private RuntimeType constraint;
private boolean builtin = true;
private Integer priority = Priorities.USER;
private volatile List<MediaType> mediaTypes;
private volatile MessageBodyReader<?> instance;
public ResourceReader setFactory(BeanFactory<MessageBodyReader<?>> factory) {
this.factory = factory;
return this;
}
public BeanFactory<MessageBodyReader<?>> getFactory() {
return factory;
}
public List<String> getMediaTypeStrings() {
return mediaTypeStrings;
}
public ResourceReader setMediaTypeStrings(List<String> mediaTypeStrings) {
this.mediaTypeStrings = mediaTypeStrings;
return this;
}
public RuntimeType getConstraint() {
return constraint;
}
public ResourceReader setConstraint(RuntimeType constraint) {
this.constraint = constraint;
return this;
}
public boolean isBuiltin() {
return builtin;
}
public ResourceReader setBuiltin(boolean builtin) {
this.builtin = builtin;
return this;
}
public Integer getPriority() {
return priority;
}
public ResourceReader setPriority(Integer priority) {
this.priority = priority;
return this;
}
public MessageBodyReader<?> instance() {
if (instance == null) {
synchronized (this) {
if (instance == null) {
//todo: manage lifecycle of bean
instance = factory.createInstance().getInstance();
}
}
}
return instance;
}
public List<MediaType> mediaTypes() {
if (mediaTypes == null) {
//todo: does this actually need to be threadsafe?
synchronized (this) {
List<MediaType> mts = new ArrayList<>(mediaTypeStrings.size());
for (int i = 0; i < mediaTypeStrings.size(); i++) {
mts.add(MediaTypeHelper.valueOf(mediaTypeStrings.get(i)));
}
mediaTypes = Collections.unmodifiableList(mts);
}
}
return mediaTypes;
}
public boolean matchesRuntimeType(RuntimeType runtimeType) {
if (runtimeType == null) {
return true;
}
if (constraint == null) {
return true;
}
return runtimeType == constraint;
}
/**
* The comparison for now is simple:
* 1) Application provided writers come first
* 2) Readers with lower priority come first (same as reader interceptors)
* 3) Then the more specific the media type, the higher the priority
* 4) Finally we compare the number of media types
*
* The spec doesn't seem to mention this sorting being explicitly needed, but there are tests
* in the TCK that only pass reliably if the Readers are sorted like this
*/
public static | ResourceReader |
java | micronaut-projects__micronaut-core | http-server/src/main/java/io/micronaut/http/server/util/locale/HttpLocaleResolver.java | {
"start": 937,
"end": 1010
} | interface ____ extends LocaleResolver<HttpRequest<?>> {
}
| HttpLocaleResolver |
java | apache__camel | core/camel-util/src/main/java/org/apache/camel/util/ObjectHelper.java | {
"start": 33866,
"end": 33988
} | class ____ a subclass (extends or implements)
*
* @param clazz the class
* @param subClass the subclass ( | has |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng4690InterdependentConflictResolutionTest.java | {
"start": 1123,
"end": 3422
} | class ____ extends AbstractMavenIntegrationTestCase {
// Ideally, all six permutations of the three direct dependencies should yield the same result...
@Test
public void testitADX() throws Exception {
// requiresMavenVersion("[3.0-beta-3,)");
testit("test-adx");
}
@Test
public void testitAXD() throws Exception {
testit("test-axd");
}
@Test
public void testitDAX() throws Exception {
// requiresMavenVersion("[3.0-beta-3,)");
testit("test-dax");
}
@Test
public void testitDXA() throws Exception {
testit("test-dxa");
}
@Test
public void testitXAD() throws Exception {
testit("test-xad");
}
@Test
public void testitXDA() throws Exception {
testit("test-xda");
}
/**
* Verify that conflict resolution doesn't depend on the declaration order of dependencies (from distinct tree
* levels) when the resolution of one conflict influences another conflict.
*/
private void testit(String test) throws Exception {
File testDir = extractResources("/mng-4690");
Verifier verifier = newVerifier(new File(testDir, test).getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.deleteArtifacts("org.apache.maven.its.mng4690");
verifier.addCliArgument("-s");
verifier.addCliArgument("settings.xml");
verifier.filterFile("../settings-template.xml", "settings.xml");
verifier.addCliArgument("validate");
verifier.execute();
verifier.verifyErrorFreeLog();
List<String> classpath = verifier.loadLines("target/classpath.txt");
assertTrue(classpath.contains("a-1.jar"), test + " > " + classpath.toString());
assertTrue(classpath.contains("b-1.jar"), test + " > " + classpath.toString());
assertTrue(classpath.contains("c-1.jar"), test + " > " + classpath.toString());
assertTrue(classpath.contains("d-1.jar"), test + " > " + classpath.toString());
assertTrue(classpath.contains("x-1.jar"), test + " > " + classpath.toString());
assertTrue(classpath.contains("y-2.jar"), test + " > " + classpath.toString());
}
}
| MavenITmng4690InterdependentConflictResolutionTest |
java | apache__logging-log4j2 | log4j-layout-template-json/src/main/java/org/apache/logging/log4j/layout/template/json/resolver/CounterResolver.java | {
"start": 2761,
"end": 7251
} | class ____ implements EventResolver {
private final Consumer<JsonWriter> delegate;
public CounterResolver(final EventResolverContext context, final TemplateResolverConfig config) {
this.delegate = createDelegate(context, config);
}
private static Consumer<JsonWriter> createDelegate(
final EventResolverContext context, final TemplateResolverConfig config) {
final BigInteger start = readStart(config);
final boolean overflowing = config.getBoolean("overflowing", true);
final boolean stringified = config.getBoolean("stringified", false);
if (stringified) {
final Recycler<StringBuilder> stringBuilderRecycler = createStringBuilderRecycler(context);
return overflowing
? createStringifiedLongResolver(start, stringBuilderRecycler)
: createStringifiedBigIntegerResolver(start, stringBuilderRecycler);
} else {
return overflowing ? createLongResolver(start) : createBigIntegerResolver(start);
}
}
private static BigInteger readStart(final TemplateResolverConfig config) {
final Object start = config.getObject("start", Object.class);
if (start == null) {
return BigInteger.ZERO;
} else if (start instanceof Short || start instanceof Integer || start instanceof Long) {
return BigInteger.valueOf(((Number) start).longValue());
} else if (start instanceof BigInteger) {
return (BigInteger) start;
} else {
final Class<?> clazz = start.getClass();
final String message = String.format("could not read start of type %s: %s", clazz, config);
throw new IllegalArgumentException(message);
}
}
private static Consumer<JsonWriter> createLongResolver(final BigInteger start) {
final long effectiveStart = start.longValue();
final AtomicLong counter = new AtomicLong(effectiveStart);
return (jsonWriter) -> {
final long number = counter.getAndIncrement();
jsonWriter.writeNumber(number);
};
}
private static Consumer<JsonWriter> createBigIntegerResolver(final BigInteger start) {
final AtomicBigInteger counter = new AtomicBigInteger(start);
return jsonWriter -> {
final BigInteger number = counter.getAndIncrement();
jsonWriter.writeNumber(number);
};
}
private static Recycler<StringBuilder> createStringBuilderRecycler(final EventResolverContext context) {
return context.getRecyclerFactory().create(StringBuilder::new, stringBuilder -> {
final int maxLength = context.getJsonWriter().getMaxStringLength();
trimStringBuilder(stringBuilder, maxLength);
});
}
private static void trimStringBuilder(final StringBuilder stringBuilder, final int maxLength) {
if (stringBuilder.length() > maxLength) {
stringBuilder.setLength(maxLength);
stringBuilder.trimToSize();
}
stringBuilder.setLength(0);
}
private static Consumer<JsonWriter> createStringifiedLongResolver(
final BigInteger start, final Recycler<StringBuilder> stringBuilderRecycler) {
final long effectiveStart = start.longValue();
final AtomicLong counter = new AtomicLong(effectiveStart);
return (jsonWriter) -> {
final long number = counter.getAndIncrement();
final StringBuilder stringBuilder = stringBuilderRecycler.acquire();
try {
stringBuilder.append(number);
jsonWriter.writeString(stringBuilder);
} finally {
stringBuilderRecycler.release(stringBuilder);
}
};
}
private static Consumer<JsonWriter> createStringifiedBigIntegerResolver(
final BigInteger start, final Recycler<StringBuilder> stringBuilderRecycler) {
final AtomicBigInteger counter = new AtomicBigInteger(start);
return jsonWriter -> {
final BigInteger number = counter.getAndIncrement();
final StringBuilder stringBuilder = stringBuilderRecycler.acquire();
try {
stringBuilder.append(number);
jsonWriter.writeString(stringBuilder);
} finally {
stringBuilderRecycler.release(stringBuilder);
}
};
}
private static final | CounterResolver |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/Aws2DdbstreamComponentBuilderFactory.java | {
"start": 1896,
"end": 16382
} | interface ____ extends ComponentBuilder<Ddb2StreamComponent> {
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default Aws2DdbstreamComponentBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* The component configuration.
*
* The option is a:
* <code>org.apache.camel.component.aws2.ddbstream.Ddb2StreamConfiguration</code> type.
*
* Group: consumer
*
* @param configuration the value to set
* @return the dsl builder
*/
default Aws2DdbstreamComponentBuilder configuration(org.apache.camel.component.aws2.ddbstream.Ddb2StreamConfiguration configuration) {
doSetProperty("configuration", configuration);
return this;
}
/**
* Maximum number of records that will be fetched in each poll.
*
* The option is a: <code>int</code> type.
*
* Group: consumer
*
* @param maxResultsPerRequest the value to set
* @return the dsl builder
*/
default Aws2DdbstreamComponentBuilder maxResultsPerRequest(int maxResultsPerRequest) {
doSetProperty("maxResultsPerRequest", maxResultsPerRequest);
return this;
}
/**
* Set the need for overidding the endpoint. This option needs to be
* used in combination with uriEndpointOverride option.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param overrideEndpoint the value to set
* @return the dsl builder
*/
default Aws2DdbstreamComponentBuilder overrideEndpoint(boolean overrideEndpoint) {
doSetProperty("overrideEndpoint", overrideEndpoint);
return this;
}
/**
* The region in which DDBStreams client needs to work.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param region the value to set
* @return the dsl builder
*/
default Aws2DdbstreamComponentBuilder region(java.lang.String region) {
doSetProperty("region", region);
return this;
}
/**
* Defines where in the DynamoDB stream to start getting records. Note
* that using FROM_START can cause a significant delay before the stream
* has caught up to real-time.
*
* The option is a:
* <code>org.apache.camel.component.aws2.ddbstream.Ddb2StreamConfiguration.StreamIteratorType</code> type.
*
* Default: FROM_LATEST
* Group: consumer
*
* @param streamIteratorType the value to set
* @return the dsl builder
*/
default Aws2DdbstreamComponentBuilder streamIteratorType(org.apache.camel.component.aws2.ddbstream.Ddb2StreamConfiguration.StreamIteratorType streamIteratorType) {
doSetProperty("streamIteratorType", streamIteratorType);
return this;
}
/**
* Set the overriding uri endpoint. This option needs to be used in
* combination with overrideEndpoint option.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param uriEndpointOverride the value to set
* @return the dsl builder
*/
default Aws2DdbstreamComponentBuilder uriEndpointOverride(java.lang.String uriEndpointOverride) {
doSetProperty("uriEndpointOverride", uriEndpointOverride);
return this;
}
/**
* Amazon DynamoDB client to use for all requests for this endpoint.
*
* The option is a:
* <code>software.amazon.awssdk.services.dynamodb.streams.DynamoDbStreamsClient</code> type.
*
* Group: consumer (advanced)
*
* @param amazonDynamoDbStreamsClient the value to set
* @return the dsl builder
*/
default Aws2DdbstreamComponentBuilder amazonDynamoDbStreamsClient(software.amazon.awssdk.services.dynamodb.streams.DynamoDbStreamsClient amazonDynamoDbStreamsClient) {
doSetProperty("amazonDynamoDbStreamsClient", amazonDynamoDbStreamsClient);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default Aws2DdbstreamComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
/**
* Used for enabling or disabling all consumer based health checks from
* this component.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: health
*
* @param healthCheckConsumerEnabled the value to set
* @return the dsl builder
*/
default Aws2DdbstreamComponentBuilder healthCheckConsumerEnabled(boolean healthCheckConsumerEnabled) {
doSetProperty("healthCheckConsumerEnabled", healthCheckConsumerEnabled);
return this;
}
/**
* Used for enabling or disabling all producer based health checks from
* this component. Notice: Camel has by default disabled all producer
* based health-checks. You can turn on producer checks globally by
* setting camel.health.producersEnabled=true.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: health
*
* @param healthCheckProducerEnabled the value to set
* @return the dsl builder
*/
default Aws2DdbstreamComponentBuilder healthCheckProducerEnabled(boolean healthCheckProducerEnabled) {
doSetProperty("healthCheckProducerEnabled", healthCheckProducerEnabled);
return this;
}
/**
* To define a proxy host when instantiating the DDBStreams client.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: proxy
*
* @param proxyHost the value to set
* @return the dsl builder
*/
default Aws2DdbstreamComponentBuilder proxyHost(java.lang.String proxyHost) {
doSetProperty("proxyHost", proxyHost);
return this;
}
/**
* To define a proxy port when instantiating the DDBStreams client.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: proxy
*
* @param proxyPort the value to set
* @return the dsl builder
*/
default Aws2DdbstreamComponentBuilder proxyPort(java.lang.Integer proxyPort) {
doSetProperty("proxyPort", proxyPort);
return this;
}
/**
* To define a proxy protocol when instantiating the DDBStreams client.
*
* The option is a:
* <code>software.amazon.awssdk.core.Protocol</code> type.
*
* Default: HTTPS
* Group: proxy
*
* @param proxyProtocol the value to set
* @return the dsl builder
*/
default Aws2DdbstreamComponentBuilder proxyProtocol(software.amazon.awssdk.core.Protocol proxyProtocol) {
doSetProperty("proxyProtocol", proxyProtocol);
return this;
}
/**
* Amazon AWS Access Key.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param accessKey the value to set
* @return the dsl builder
*/
default Aws2DdbstreamComponentBuilder accessKey(java.lang.String accessKey) {
doSetProperty("accessKey", accessKey);
return this;
}
/**
* If using a profile credentials provider this parameter will set the
* profile name.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param profileCredentialsName the value to set
* @return the dsl builder
*/
default Aws2DdbstreamComponentBuilder profileCredentialsName(java.lang.String profileCredentialsName) {
doSetProperty("profileCredentialsName", profileCredentialsName);
return this;
}
/**
* Amazon AWS Secret Key.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param secretKey the value to set
* @return the dsl builder
*/
default Aws2DdbstreamComponentBuilder secretKey(java.lang.String secretKey) {
doSetProperty("secretKey", secretKey);
return this;
}
/**
* Amazon AWS Session Token used when the user needs to assume a IAM
* role.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param sessionToken the value to set
* @return the dsl builder
*/
default Aws2DdbstreamComponentBuilder sessionToken(java.lang.String sessionToken) {
doSetProperty("sessionToken", sessionToken);
return this;
}
/**
* If we want to trust all certificates in case of overriding the
* endpoint.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param trustAllCertificates the value to set
* @return the dsl builder
*/
default Aws2DdbstreamComponentBuilder trustAllCertificates(boolean trustAllCertificates) {
doSetProperty("trustAllCertificates", trustAllCertificates);
return this;
}
/**
* Set whether the DynamoDB Streams client should expect to load
* credentials through a default credentials provider or to expect
* static credentials to be passed in.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useDefaultCredentialsProvider the value to set
* @return the dsl builder
*/
default Aws2DdbstreamComponentBuilder useDefaultCredentialsProvider(boolean useDefaultCredentialsProvider) {
doSetProperty("useDefaultCredentialsProvider", useDefaultCredentialsProvider);
return this;
}
/**
* Set whether the Cloudtrail client should expect to load credentials
* through a profile credentials provider.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useProfileCredentialsProvider the value to set
* @return the dsl builder
*/
default Aws2DdbstreamComponentBuilder useProfileCredentialsProvider(boolean useProfileCredentialsProvider) {
doSetProperty("useProfileCredentialsProvider", useProfileCredentialsProvider);
return this;
}
/**
* Set whether the DDB Streams client should expect to use Session
* Credentials. This is useful in situation in which the user needs to
* assume a IAM role for doing operations in DDB.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useSessionCredentials the value to set
* @return the dsl builder
*/
default Aws2DdbstreamComponentBuilder useSessionCredentials(boolean useSessionCredentials) {
doSetProperty("useSessionCredentials", useSessionCredentials);
return this;
}
}
| Aws2DdbstreamComponentBuilder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.