language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__hadoop | hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingCounters.java | {
"start": 1187,
"end": 1254
} | class ____ streaming counters in MapReduce local mode.
*/
public | tests |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java | {
"start": 2435,
"end": 11361
} | class ____ extends ESIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Arrays.asList(CustomIngestTestPlugin.class);
}
private static final AtomicLong versionCounter = new AtomicLong(1);
private static final String testJSON = """
{
"metadata": {
"version": "%s",
"compatibility": "8.4.0"
},
"state": {
"ingest_pipelines": {
"my_ingest_pipeline": {
"description": "_description",
"processors": [
{
"test" : {
"field": "pipeline",
"value": "pipeline"
}
}
]
},
"my_ingest_pipeline_1": {
"description": "_description",
"processors": [
{
"test" : {
"field": "pipeline",
"value": "pipeline"
}
}
]
}
}
}
}""";
private static final String testErrorJSON = """
{
"metadata": {
"version": "%s",
"compatibility": "8.4.0"
},
"state": {
"ingest_pipelines": {
"my_ingest_pipeline": {
"description": "_description",
"processors":
{
"foo" : {
"field": "pipeline",
"value": "pipeline"
}
}
]
}
}
}
}""";
private void writeJSONFile(String node, String json) throws Exception {
long version = versionCounter.incrementAndGet();
FileSettingsService fileSettingsService = internalCluster().getInstance(FileSettingsService.class, node);
assertTrue(fileSettingsService.watching());
Files.deleteIfExists(fileSettingsService.watchedFile());
Files.createDirectories(fileSettingsService.watchedFileDir());
Path tempFilePath = createTempFile();
logger.info("--> writing JSON config to node {} with path {}", node, tempFilePath);
logger.info(Strings.format(json, version));
Files.writeString(tempFilePath, Strings.format(json, version));
Files.move(tempFilePath, fileSettingsService.watchedFile(), StandardCopyOption.ATOMIC_MOVE);
}
private Tuple<CountDownLatch, AtomicLong> setupClusterStateListener(String node) {
ClusterService clusterService = internalCluster().clusterService(node);
CountDownLatch savedClusterState = new CountDownLatch(1);
AtomicLong metadataVersion = new AtomicLong(-1);
clusterService.addListener(new ClusterStateListener() {
@Override
public void clusterChanged(ClusterChangedEvent event) {
ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE);
if (reservedState != null) {
ReservedStateHandlerMetadata handlerMetadata = reservedState.handlers().get(ReservedPipelineAction.NAME);
if (handlerMetadata != null && handlerMetadata.keys().contains("my_ingest_pipeline")) {
clusterService.removeListener(this);
metadataVersion.set(event.state().metadata().version());
savedClusterState.countDown();
}
}
}
});
return new Tuple<>(savedClusterState, metadataVersion);
}
private void assertPipelinesSaveOK(CountDownLatch savedClusterState, AtomicLong metadataVersion) throws Exception {
boolean awaitSuccessful = savedClusterState.await(20, TimeUnit.SECONDS);
assertTrue(awaitSuccessful);
final ClusterStateResponse clusterStateResponse = clusterAdmin().state(
new ClusterStateRequest(TEST_REQUEST_TIMEOUT).waitForMetadataVersion(metadataVersion.get())
).get();
ReservedStateMetadata reservedState = clusterStateResponse.getState()
.metadata()
.reservedStateMetadata()
.get(FileSettingsService.NAMESPACE);
ReservedStateHandlerMetadata handlerMetadata = reservedState.handlers().get(ReservedPipelineAction.NAME);
assertThat(handlerMetadata.keys(), allOf(notNullValue(), containsInAnyOrder("my_ingest_pipeline", "my_ingest_pipeline_1")));
// Try using the REST API to update the my_autoscaling_policy policy
// This should fail, we have reserved certain autoscaling policies in operator mode
assertEquals(
"Failed to process request [org.elasticsearch.action.ingest.PutPipelineRequest/unset] with errors: "
+ "[[my_ingest_pipeline] set as read-only by [file_settings]]",
expectThrows(
IllegalArgumentException.class,
client().execute(PutPipelineTransportAction.TYPE, sampleRestRequest("my_ingest_pipeline"))
).getMessage()
);
}
public void testPoliciesApplied() throws Exception {
ensureGreen();
var savedClusterState = setupClusterStateListener(internalCluster().getMasterName());
writeJSONFile(internalCluster().getMasterName(), testJSON);
assertPipelinesSaveOK(savedClusterState.v1(), savedClusterState.v2());
}
private Tuple<CountDownLatch, AtomicLong> setupClusterStateListenerForError(String node) {
ClusterService clusterService = internalCluster().clusterService(node);
CountDownLatch savedClusterState = new CountDownLatch(1);
AtomicLong metadataVersion = new AtomicLong(-1);
clusterService.addListener(new ClusterStateListener() {
@Override
public void clusterChanged(ClusterChangedEvent event) {
ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE);
if (reservedState != null && reservedState.errorMetadata() != null) {
clusterService.removeListener(this);
metadataVersion.set(event.state().metadata().version());
savedClusterState.countDown();
assertEquals(ReservedStateErrorMetadata.ErrorKind.PARSING, reservedState.errorMetadata().errorKind());
assertThat(reservedState.errorMetadata().errors(), allOf(notNullValue(), hasSize(1)));
assertThat(
reservedState.errorMetadata().errors().get(0),
containsString("org.elasticsearch.xcontent.XContentParseException: [17:16] [reserved_state_chunk] failed")
);
}
}
});
return new Tuple<>(savedClusterState, metadataVersion);
}
private void assertPipelinesNotSaved(CountDownLatch savedClusterState, AtomicLong metadataVersion) throws Exception {
boolean awaitSuccessful = savedClusterState.await(20, TimeUnit.SECONDS);
assertTrue(awaitSuccessful);
// This should succeed, nothing was reserved
client().execute(PutPipelineTransportAction.TYPE, sampleRestRequest("my_ingest_pipeline_bad")).get();
}
public void testErrorSaved() throws Exception {
ensureGreen();
var savedClusterState = setupClusterStateListenerForError(internalCluster().getMasterName());
writeJSONFile(internalCluster().getMasterName(), testErrorJSON);
assertPipelinesNotSaved(savedClusterState.v1(), savedClusterState.v2());
}
private PutPipelineRequest sampleRestRequest(String id) throws Exception {
var json = """
{
"description": "_description",
"processors": [
{
"test" : {
"field": "_foo",
"value": "_bar"
}
}
]
}""";
try (
var bis = new ByteArrayInputStream(json.getBytes(StandardCharsets.UTF_8));
var parser = JSON.xContent().createParser(XContentParserConfiguration.EMPTY, bis);
var builder = XContentFactory.contentBuilder(JSON)
) {
builder.map(parser.map());
return putJsonPipelineRequest(id, bytes(builder));
}
}
public static | IngestFileSettingsIT |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/joinable/ManyToOneJoinTableTest.java | {
"start": 2780,
"end": 3811
} | class ____ extends ResourceImpl implements Issuer {
private static final String SELECT_RESOURCES_BY_ISSUER = "SELECT_RESOURCES_BY_ISSUER";
private static final String ENTITY_NAME = "Issuer";
public static final String PARENT_ISSUER_COLUMN = "parent_issuer";
public static final String PARENT_IDENTIFIER_COLUMN = "parent_identifier";
public static final String TABLE_NAME = "issuer_impl";
@ManyToOne(targetEntity = IssuerImpl.class)
@JoinColumn(name = PARENT_ISSUER_COLUMN, table = TABLE_NAME, referencedColumnName = "issuer")
@JoinColumn(name = PARENT_IDENTIFIER_COLUMN, table = TABLE_NAME, referencedColumnName = "identifier")
private Issuer parentIssuer;
public Identifier getIdentifier() {
return identifier;
}
public void setIdentifier(Identifier identifier) {
this.identifier = identifier;
}
public Issuer getParentIssuer() {
return parentIssuer;
}
public void setParentIssuer(Issuer parentIssuer) {
this.parentIssuer = parentIssuer;
}
}
@Embeddable
public static | IssuerImpl |
java | hibernate__hibernate-orm | hibernate-testing/src/main/java/org/hibernate/testing/orm/domain/gambit/MutableValue.java | {
"start": 365,
"end": 639
} | class ____ implements Serializable {
private String state;
public MutableValue() {
}
public MutableValue(String state) {
this.state = state;
}
public String getState() {
return state;
}
public void setState(String state) {
this.state = state;
}
}
| MutableValue |
java | elastic__elasticsearch | x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/FollowInfoIT.java | {
"start": 1108,
"end": 8837
} | class ____ extends CcrSingleNodeTestCase {
public void testFollowInfoApiFollowerIndexFiltering() throws Exception {
final String leaderIndexSettings = getIndexSettings(1, 0, Collections.emptyMap());
assertAcked(client().admin().indices().prepareCreate("leader1").setSource(leaderIndexSettings, XContentType.JSON));
ensureGreen("leader1");
assertAcked(client().admin().indices().prepareCreate("leader2").setSource(leaderIndexSettings, XContentType.JSON));
ensureGreen("leader2");
PutFollowAction.Request followRequest = getPutFollowRequest("leader1", "follower1");
client().execute(PutFollowAction.INSTANCE, followRequest).get();
followRequest = getPutFollowRequest("leader2", "follower2");
client().execute(PutFollowAction.INSTANCE, followRequest).get();
FollowInfoAction.Request request = new FollowInfoAction.Request(TEST_REQUEST_TIMEOUT);
request.setFollowerIndices("follower1");
FollowInfoAction.Response response = client().execute(FollowInfoAction.INSTANCE, request).actionGet();
assertThat(response.getFollowInfos().size(), equalTo(1));
assertThat(response.getFollowInfos().get(0).getFollowerIndex(), equalTo("follower1"));
assertThat(response.getFollowInfos().get(0).getLeaderIndex(), equalTo("leader1"));
assertThat(response.getFollowInfos().get(0).getStatus(), equalTo(Status.ACTIVE));
assertThat(response.getFollowInfos().get(0).getParameters(), notNullValue());
request = new FollowInfoAction.Request(TEST_REQUEST_TIMEOUT);
request.setFollowerIndices("follower2");
response = client().execute(FollowInfoAction.INSTANCE, request).actionGet();
assertThat(response.getFollowInfos().size(), equalTo(1));
assertThat(response.getFollowInfos().get(0).getFollowerIndex(), equalTo("follower2"));
assertThat(response.getFollowInfos().get(0).getLeaderIndex(), equalTo("leader2"));
assertThat(response.getFollowInfos().get(0).getStatus(), equalTo(Status.ACTIVE));
assertThat(response.getFollowInfos().get(0).getParameters(), notNullValue());
request = new FollowInfoAction.Request(TEST_REQUEST_TIMEOUT);
request.setFollowerIndices("_all");
response = client().execute(FollowInfoAction.INSTANCE, request).actionGet();
response.getFollowInfos().sort(Comparator.comparing(FollowInfoAction.Response.FollowerInfo::getFollowerIndex));
assertThat(response.getFollowInfos().size(), equalTo(2));
assertThat(response.getFollowInfos().get(0).getFollowerIndex(), equalTo("follower1"));
assertThat(response.getFollowInfos().get(0).getLeaderIndex(), equalTo("leader1"));
assertThat(response.getFollowInfos().get(0).getStatus(), equalTo(Status.ACTIVE));
assertThat(response.getFollowInfos().get(0).getParameters(), notNullValue());
assertThat(response.getFollowInfos().get(1).getFollowerIndex(), equalTo("follower2"));
assertThat(response.getFollowInfos().get(1).getLeaderIndex(), equalTo("leader2"));
assertThat(response.getFollowInfos().get(1).getStatus(), equalTo(Status.ACTIVE));
assertThat(response.getFollowInfos().get(1).getParameters(), notNullValue());
// Pause follower1 index and check the follower info api:
assertAcked(
client().execute(PauseFollowAction.INSTANCE, new PauseFollowAction.Request(TEST_REQUEST_TIMEOUT, "follower1")).actionGet()
);
request = new FollowInfoAction.Request(TEST_REQUEST_TIMEOUT);
request.setFollowerIndices("follower1");
response = client().execute(FollowInfoAction.INSTANCE, request).actionGet();
assertThat(response.getFollowInfos().size(), equalTo(1));
assertThat(response.getFollowInfos().get(0).getFollowerIndex(), equalTo("follower1"));
assertThat(response.getFollowInfos().get(0).getLeaderIndex(), equalTo("leader1"));
assertThat(response.getFollowInfos().get(0).getStatus(), equalTo(Status.PAUSED));
assertThat(response.getFollowInfos().get(0).getParameters(), nullValue());
request = new FollowInfoAction.Request(TEST_REQUEST_TIMEOUT);
request.setFollowerIndices("follower2");
response = client().execute(FollowInfoAction.INSTANCE, request).actionGet();
assertThat(response.getFollowInfos().size(), equalTo(1));
assertThat(response.getFollowInfos().get(0).getFollowerIndex(), equalTo("follower2"));
assertThat(response.getFollowInfos().get(0).getLeaderIndex(), equalTo("leader2"));
assertThat(response.getFollowInfos().get(0).getStatus(), equalTo(Status.ACTIVE));
assertThat(response.getFollowInfos().get(0).getParameters(), notNullValue());
request = new FollowInfoAction.Request(TEST_REQUEST_TIMEOUT);
request.setFollowerIndices("_all");
response = client().execute(FollowInfoAction.INSTANCE, request).actionGet();
response.getFollowInfos().sort(Comparator.comparing(FollowInfoAction.Response.FollowerInfo::getFollowerIndex));
assertThat(response.getFollowInfos().size(), equalTo(2));
assertThat(response.getFollowInfos().get(0).getFollowerIndex(), equalTo("follower1"));
assertThat(response.getFollowInfos().get(0).getLeaderIndex(), equalTo("leader1"));
assertThat(response.getFollowInfos().get(0).getStatus(), equalTo(Status.PAUSED));
assertThat(response.getFollowInfos().get(0).getParameters(), nullValue());
assertThat(response.getFollowInfos().get(1).getFollowerIndex(), equalTo("follower2"));
assertThat(response.getFollowInfos().get(1).getLeaderIndex(), equalTo("leader2"));
assertThat(response.getFollowInfos().get(1).getStatus(), equalTo(Status.ACTIVE));
assertThat(response.getFollowInfos().get(1).getParameters(), notNullValue());
assertAcked(
client().execute(PauseFollowAction.INSTANCE, new PauseFollowAction.Request(TEST_REQUEST_TIMEOUT, "follower2")).actionGet()
);
}
public void testFollowInfoApiIndexMissing() throws Exception {
final String leaderIndexSettings = getIndexSettings(1, 0, Collections.emptyMap());
assertAcked(client().admin().indices().prepareCreate("leader1").setSource(leaderIndexSettings, XContentType.JSON));
ensureGreen("leader1");
assertAcked(client().admin().indices().prepareCreate("leader2").setSource(leaderIndexSettings, XContentType.JSON));
ensureGreen("leader2");
PutFollowAction.Request followRequest = getPutFollowRequest("leader1", "follower1");
client().execute(PutFollowAction.INSTANCE, followRequest).get();
followRequest = getPutFollowRequest("leader2", "follower2");
client().execute(PutFollowAction.INSTANCE, followRequest).get();
FollowInfoAction.Request request1 = new FollowInfoAction.Request(TEST_REQUEST_TIMEOUT);
request1.setFollowerIndices("follower3");
expectThrows(IndexNotFoundException.class, () -> client().execute(FollowInfoAction.INSTANCE, request1).actionGet());
FollowInfoAction.Request request2 = new FollowInfoAction.Request(TEST_REQUEST_TIMEOUT);
request2.setFollowerIndices("follower2", "follower3");
expectThrows(IndexNotFoundException.class, () -> client().execute(FollowInfoAction.INSTANCE, request2).actionGet());
assertAcked(
client().execute(PauseFollowAction.INSTANCE, new PauseFollowAction.Request(TEST_REQUEST_TIMEOUT, "follower1")).actionGet()
);
assertAcked(
client().execute(PauseFollowAction.INSTANCE, new PauseFollowAction.Request(TEST_REQUEST_TIMEOUT, "follower2")).actionGet()
);
}
}
| FollowInfoIT |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/Aws2S3ComponentBuilderFactory.java | {
"start": 1880,
"end": 26858
} | interface ____ extends ComponentBuilder<AWS2S3Component> {
/**
* Setting the autocreation of the S3 bucket bucketName. This will apply
* also in case of moveAfterRead option enabled, and it will create the
* destinationBucket if it doesn't exist already.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param autoCreateBucket the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder autoCreateBucket(boolean autoCreateBucket) {
doSetProperty("autoCreateBucket", autoCreateBucket);
return this;
}
/**
* The component configuration.
*
* The option is a:
* <code>org.apache.camel.component.aws2.s3.AWS2S3Configuration</code> type.
*
* Group: common
*
* @param configuration the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder configuration(org.apache.camel.component.aws2.s3.AWS2S3Configuration configuration) {
doSetProperty("configuration", configuration);
return this;
}
/**
* The delimiter which is used in the
* com.amazonaws.services.s3.model.ListObjectsRequest to only consume
* objects we are interested in.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param delimiter the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder delimiter(java.lang.String delimiter) {
doSetProperty("delimiter", delimiter);
return this;
}
/**
* Set whether the S3 client should use path-style URL instead of
* virtual-hosted-style.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param forcePathStyle the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder forcePathStyle(boolean forcePathStyle) {
doSetProperty("forcePathStyle", forcePathStyle);
return this;
}
/**
* If it is true, the S3 Object Body will be ignored completely if it is
* set to false, the S3 Object will be put in the body. Setting this to
* true will override any behavior defined by includeBody option.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param ignoreBody the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder ignoreBody(boolean ignoreBody) {
doSetProperty("ignoreBody", ignoreBody);
return this;
}
/**
* Set the need for overriding the endpoint. This option needs to be
* used in combination with the uriEndpointOverride option.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param overrideEndpoint the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder overrideEndpoint(boolean overrideEndpoint) {
doSetProperty("overrideEndpoint", overrideEndpoint);
return this;
}
/**
* If we want to use a POJO request as body or not.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param pojoRequest the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder pojoRequest(boolean pojoRequest) {
doSetProperty("pojoRequest", pojoRequest);
return this;
}
/**
* The policy for this queue to set in the
* com.amazonaws.services.s3.AmazonS3#setBucketPolicy() method.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param policy the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder policy(java.lang.String policy) {
doSetProperty("policy", policy);
return this;
}
/**
* The prefix which is used in the
* com.amazonaws.services.s3.model.ListObjectsRequest to only consume
* objects we are interested in.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param prefix the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder prefix(java.lang.String prefix) {
doSetProperty("prefix", prefix);
return this;
}
/**
* The region in which the S3 client needs to work. When using this
* parameter, the configuration will expect the lowercase name of the
* region (for example, ap-east-1) You'll need to use the name
* Region.EU_WEST_1.id().
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param region the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder region(java.lang.String region) {
doSetProperty("region", region);
return this;
}
/**
* Set the overriding uri endpoint. This option needs to be used in
* combination with overrideEndpoint option.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param uriEndpointOverride the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder uriEndpointOverride(java.lang.String uriEndpointOverride) {
doSetProperty("uriEndpointOverride", uriEndpointOverride);
return this;
}
/**
* Define the customer algorithm to use in case CustomerKey is enabled.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common (advanced)
*
* @param customerAlgorithm the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder customerAlgorithm(java.lang.String customerAlgorithm) {
doSetProperty("customerAlgorithm", customerAlgorithm);
return this;
}
/**
* Define the id of the Customer key to use in case CustomerKey is
* enabled.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common (advanced)
*
* @param customerKeyId the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder customerKeyId(java.lang.String customerKeyId) {
doSetProperty("customerKeyId", customerKeyId);
return this;
}
/**
* Define the MD5 of Customer key to use in case CustomerKey is enabled.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common (advanced)
*
* @param customerKeyMD5 the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder customerKeyMD5(java.lang.String customerKeyMD5) {
doSetProperty("customerKeyMD5", customerKeyMD5);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Delete objects from S3 after they have been retrieved. The deleting
* is only performed if the Exchange is committed. If a rollback occurs,
* the object is not deleted. If this option is false, then the same
* objects will be retrieved over and over again in the polls.
* Therefore, you need to use the Idempotent Consumer EIP in the route
* to filter out duplicates. You can filter using the
* AWS2S3Constants#BUCKET_NAME and AWS2S3Constants#KEY headers, or only
* the AWS2S3Constants#KEY header.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: consumer
*
* @param deleteAfterRead the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder deleteAfterRead(boolean deleteAfterRead) {
doSetProperty("deleteAfterRead", deleteAfterRead);
return this;
}
/**
* Define the destination bucket where an object must be moved when
* moveAfterRead is set to true.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param destinationBucket the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder destinationBucket(java.lang.String destinationBucket) {
doSetProperty("destinationBucket", destinationBucket);
return this;
}
/**
* Define the destination bucket prefix to use when an object must be
* moved, and moveAfterRead is set to true.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param destinationBucketPrefix the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder destinationBucketPrefix(java.lang.String destinationBucketPrefix) {
doSetProperty("destinationBucketPrefix", destinationBucketPrefix);
return this;
}
/**
* Define the destination bucket suffix to use when an object must be
* moved, and moveAfterRead is set to true.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param destinationBucketSuffix the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder destinationBucketSuffix(java.lang.String destinationBucketSuffix) {
doSetProperty("destinationBucketSuffix", destinationBucketSuffix);
return this;
}
/**
* If provided, Camel will only consume files if a done file exists.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param doneFileName the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder doneFileName(java.lang.String doneFileName) {
doSetProperty("doneFileName", doneFileName);
return this;
}
/**
* To get the object from the bucket with the given file name.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param fileName the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder fileName(java.lang.String fileName) {
doSetProperty("fileName", fileName);
return this;
}
/**
* If it is true, the S3Object exchange will be consumed and put into
* the body and closed. If false, the S3Object stream will be put raw
* into the body and the headers will be set with the S3 object
* metadata. This option is strongly related to the autocloseBody
* option. In case of setting includeBody to true because the S3Object
* stream will be consumed then it will also be closed, while in case of
* includeBody false then it will be up to the caller to close the
* S3Object stream. However, setting autocloseBody to true when
* includeBody is false it will schedule to close the S3Object stream
* automatically on exchange completion.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: consumer
*
* @param includeBody the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder includeBody(boolean includeBody) {
doSetProperty("includeBody", includeBody);
return this;
}
/**
* If it is true, the folders/directories will be consumed. If it is
* false, they will be ignored, and Exchanges will not be created for
* those.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: consumer
*
* @param includeFolders the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder includeFolders(boolean includeFolders) {
doSetProperty("includeFolders", includeFolders);
return this;
}
/**
* Move objects from S3 bucket to a different bucket after they have
* been retrieved. To accomplish the operation, the destinationBucket
* option must be set. The copy bucket operation is only performed if
* the Exchange is committed. If a rollback occurs, the object is not
* moved.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param moveAfterRead the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder moveAfterRead(boolean moveAfterRead) {
doSetProperty("moveAfterRead", moveAfterRead);
return this;
}
/**
* Remove the contents of the prefix configuration string from the new
* S3Object key before copying. For example, if prefix is set to
* 'demo/notify' and the destinationBucketPrefix is set to
* 'demo/archive', an S3Object with a key of 'demo/notify/example.txt'
* will be copied to 'demo/archive/example.txt', rather than the default
* behavior where the new key is 'demo/archive/demo/notify/example.txt'.
* Only applicable when moveAfterRead is true.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param removePrefixOnMove the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder removePrefixOnMove(boolean removePrefixOnMove) {
doSetProperty("removePrefixOnMove", removePrefixOnMove);
return this;
}
/**
* If this option is true and includeBody is false, then the
* S3Object.close() method will be called on exchange completion. This
* option is strongly related to includeBody option. In case of setting
* includeBody to false and autocloseBody to false, it will be up to the
* caller to close the S3Object stream. Setting autocloseBody to true,
* will close the S3Object stream automatically.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: consumer (advanced)
*
* @param autocloseBody the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder autocloseBody(boolean autocloseBody) {
doSetProperty("autocloseBody", autocloseBody);
return this;
}
/**
* The number of messages composing a batch in streaming upload mode.
*
* The option is a: <code>int</code> type.
*
* Default: 10
* Group: producer
*
* @param batchMessageNumber the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder batchMessageNumber(int batchMessageNumber) {
doSetProperty("batchMessageNumber", batchMessageNumber);
return this;
}
/**
* The batch size (in bytes) in streaming upload mode.
*
* The option is a: <code>int</code> type.
*
* Default: 1000000
* Group: producer
*
* @param batchSize the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder batchSize(int batchSize) {
doSetProperty("batchSize", batchSize);
return this;
}
/**
* The buffer size (in bytes) in streaming upload mode.
*
* The option is a: <code>int</code> type.
*
* Default: 1000000
* Group: producer
*
* @param bufferSize the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder bufferSize(int bufferSize) {
doSetProperty("bufferSize", bufferSize);
return this;
}
/**
* Delete file object after the S3 file has been uploaded.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param deleteAfterWrite the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder deleteAfterWrite(boolean deleteAfterWrite) {
doSetProperty("deleteAfterWrite", deleteAfterWrite);
return this;
}
/**
* Setting the key name for an element in the bucket through endpoint
* parameter.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param keyName the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder keyName(java.lang.String keyName) {
doSetProperty("keyName", keyName);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* If it is true, camel will upload the file with multipart format. The
* part size is decided by the partSize option. Camel will only do
* multipart uploads for files that are larger than the part-size
* thresholds. Files that are smaller will be uploaded in a single
* operation.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param multiPartUpload the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder multiPartUpload(boolean multiPartUpload) {
doSetProperty("multiPartUpload", multiPartUpload);
return this;
}
/**
* The naming strategy to use in streaming upload mode.
*
* The option is a:
* <code>org.apache.camel.component.aws2.s3.stream.AWSS3NamingStrategyEnum</code> type.
*
* Default: progressive
* Group: producer
*
* @param namingStrategy the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder namingStrategy(org.apache.camel.component.aws2.s3.stream.AWSS3NamingStrategyEnum namingStrategy) {
doSetProperty("namingStrategy", namingStrategy);
return this;
}
/**
* The operation to do in case the user don't want to do only an upload.
*
* The option is a:
* <code>org.apache.camel.component.aws2.s3.AWS2S3Operations</code> type.
*
* Group: producer
*
* @param operation the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder operation(org.apache.camel.component.aws2.s3.AWS2S3Operations operation) {
doSetProperty("operation", operation);
return this;
}
/**
* Set up the partSize which is used in multipart upload, the default
* size is 25 MB. The minimum size in AWS is 5 MB. Camel will only do
* multipart uploads for files that are larger than the part-size
* thresholds. Files that are smaller will be uploaded in a single
* operation.
*
* The option is a: <code>long</code> type.
*
* Default: 26214400
* Group: producer
*
* @param partSize the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder partSize(long partSize) {
doSetProperty("partSize", partSize);
return this;
}
/**
* The restarting policy to use in streaming upload mode.
*
* The option is a:
* <code>org.apache.camel.component.aws2.s3.stream.AWSS3RestartingPolicyEnum</code> type.
*
* Default: override
* Group: producer
*
* @param restartingPolicy the value to set
* @return the dsl builder
*/
default Aws2S3ComponentBuilder restartingPolicy(org.apache.camel.component.aws2.s3.stream.AWSS3RestartingPolicyEnum restartingPolicy) {
doSetProperty("restartingPolicy", restartingPolicy);
return this;
}
/**
* The storage | Aws2S3ComponentBuilder |
java | elastic__elasticsearch | modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java | {
"start": 6250,
"end": 58795
} | class ____ extends ESTestCase {
private NetworkService networkService;
private ThreadPool threadPool;
private ClusterSettings clusterSettings;
@Before
public void setup() throws Exception {
networkService = new NetworkService(Collections.emptyList());
threadPool = new TestThreadPool("test");
clusterSettings = randomClusterSettings();
}
@After
public void shutdown() throws Exception {
if (threadPool != null) {
threadPool.shutdownNow();
}
threadPool = null;
networkService = null;
clusterSettings = null;
}
/**
* Test that {@link Netty4HttpServerTransport} supports the "Expect: 100-continue" HTTP header
* @throws InterruptedException if the client communication with the server is interrupted
*/
public void testExpectContinueHeader() throws InterruptedException {
final Settings settings = createSettings();
final int contentLength = randomIntBetween(1, HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings).bytesAsInt());
runExpectHeaderTest(settings, HttpHeaderValues.CONTINUE.toString(), contentLength, HttpResponseStatus.CONTINUE);
}
/**
* Test that {@link Netty4HttpServerTransport} responds to a
* 100-continue expectation with too large a content-length
* with a 413 status.
* @throws InterruptedException if the client communication with the server is interrupted
*/
public void testExpectContinueHeaderContentLengthTooLong() throws InterruptedException {
final String key = HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.getKey();
final int maxContentLength = randomIntBetween(1, 104857600);
final Settings settings = createBuilderWithPort().put(key, maxContentLength + "b").build();
final int contentLength = randomIntBetween(maxContentLength + 1, Integer.MAX_VALUE);
runExpectHeaderTest(settings, HttpHeaderValues.CONTINUE.toString(), contentLength, HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE);
}
/**
* Test that {@link Netty4HttpServerTransport} responds to an unsupported expectation with a 417 status.
* @throws InterruptedException if the client communication with the server is interrupted
*/
public void testExpectUnsupportedExpectation() throws InterruptedException {
Settings settings = createSettings();
runExpectHeaderTest(settings, "chocolate=yummy", 0, HttpResponseStatus.EXPECTATION_FAILED);
}
private void runExpectHeaderTest(
final Settings settings,
final String expectation,
final int contentLength,
final HttpResponseStatus expectedStatus
) throws InterruptedException {
final HttpServerTransport.Dispatcher dispatcher = new AggregatingDispatcher() {
@Override
public void dispatchAggregatedRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) {
channel.sendResponse(new RestResponse(OK, RestResponse.TEXT_CONTENT_TYPE, new BytesArray("done")));
}
@Override
public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, Throwable cause) {
logger.error(() -> "--> Unexpected bad request [" + FakeRestRequest.requestToString(channel.request()) + "]", cause);
throw new AssertionError();
}
};
try (
Netty4HttpServerTransport transport = new Netty4HttpServerTransport(
settings,
networkService,
threadPool,
xContentRegistry(),
dispatcher,
clusterSettings,
new SharedGroupFactory(settings),
TelemetryProvider.NOOP,
TLSConfig.noTLS(),
null,
randomFrom((httpPreRequest, channel, listener) -> listener.onResponse(null), null)
)
) {
transport.start();
final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses());
try (Netty4HttpClient client = new Netty4HttpClient()) {
final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/");
request.headers().set(HttpHeaderNames.EXPECT, expectation);
HttpUtil.setContentLength(request, contentLength);
final FullHttpResponse response = client.send(remoteAddress.address(), request);
try {
assertThat(response.status(), equalTo(expectedStatus));
if (expectedStatus.equals(HttpResponseStatus.CONTINUE)) {
final FullHttpRequest continuationRequest = new DefaultFullHttpRequest(
HttpVersion.HTTP_1_1,
HttpMethod.POST,
"/",
Unpooled.EMPTY_BUFFER
);
final FullHttpResponse continuationResponse = client.send(remoteAddress.address(), continuationRequest);
try {
assertThat(continuationResponse.status(), is(HttpResponseStatus.OK));
assertThat(
new String(ByteBufUtil.getBytes(continuationResponse.content()), StandardCharsets.UTF_8),
is("done")
);
} finally {
continuationResponse.release();
}
}
} finally {
response.release();
}
}
}
}
public void testBindUnavailableAddress() {
Settings initialSettings = createSettings();
try (
Netty4HttpServerTransport transport = new Netty4HttpServerTransport(
initialSettings,
networkService,
threadPool,
xContentRegistry(),
new AggregatingDispatcher(),
clusterSettings,
new SharedGroupFactory(Settings.EMPTY),
TelemetryProvider.NOOP,
TLSConfig.noTLS(),
null,
randomFrom((httpPreRequest, channel, listener) -> listener.onResponse(null), null)
)
) {
transport.start();
TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses());
Settings settings = Settings.builder()
.put("http.port", remoteAddress.getPort())
.put("network.host", remoteAddress.getAddress())
.build();
try (
Netty4HttpServerTransport otherTransport = new Netty4HttpServerTransport(
settings,
networkService,
threadPool,
xContentRegistry(),
new AggregatingDispatcher(),
clusterSettings,
new SharedGroupFactory(settings),
TelemetryProvider.NOOP,
TLSConfig.noTLS(),
null,
randomFrom((httpPreRequest, channel, listener) -> listener.onResponse(null), null)
)
) {
BindHttpException bindHttpException = expectThrows(BindHttpException.class, otherTransport::start);
assertEquals("Failed to bind to " + NetworkAddress.format(remoteAddress.address()), bindHttpException.getMessage());
}
}
}
public void testBadRequest() throws InterruptedException {
final AtomicReference<Throwable> causeReference = new AtomicReference<>();
final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() {
@Override
public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) {
logger.error("--> Unexpected successful request [{}]", FakeRestRequest.requestToString(request));
throw new AssertionError();
}
@Override
public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) {
causeReference.set(cause);
try {
final ElasticsearchException e = new ElasticsearchException("you sent a bad request and you should feel bad");
channel.sendResponse(new RestResponse(channel, BAD_REQUEST, e));
} catch (final IOException e) {
throw new AssertionError(e);
}
}
};
final Settings settings;
final int maxInitialLineLength;
final Setting<ByteSizeValue> httpMaxInitialLineLengthSetting = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH;
if (randomBoolean()) {
maxInitialLineLength = httpMaxInitialLineLengthSetting.getDefault(Settings.EMPTY).bytesAsInt();
settings = createSettings();
} else {
maxInitialLineLength = randomIntBetween(1, 8192);
settings = createBuilderWithPort().put(httpMaxInitialLineLengthSetting.getKey(), maxInitialLineLength + "b").build();
}
try (
Netty4HttpServerTransport transport = new Netty4HttpServerTransport(
settings,
networkService,
threadPool,
xContentRegistry(),
dispatcher,
clusterSettings,
new SharedGroupFactory(settings),
TelemetryProvider.NOOP,
TLSConfig.noTLS(),
null,
randomFrom((httpPreRequest, channel, listener) -> listener.onResponse(null), null)
)
) {
transport.start();
final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses());
try (Netty4HttpClient client = new Netty4HttpClient()) {
final String url = "/" + new String(new byte[maxInitialLineLength], StandardCharsets.UTF_8);
final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url);
final FullHttpResponse response = client.send(remoteAddress.address(), request);
try {
assertThat(response.status(), equalTo(HttpResponseStatus.BAD_REQUEST));
assertThat(
new String(response.content().array(), StandardCharsets.UTF_8),
containsString("you sent a bad request and you should feel bad")
);
} finally {
response.release();
}
}
}
assertNotNull(causeReference.get());
assertThat(causeReference.get(), instanceOf(TooLongFrameException.class));
}
public void testLargeCompressedResponse() throws InterruptedException {
testLargeResponse(true);
}
public void testLargeUncompressedResponse() throws InterruptedException {
testLargeResponse(false);
}
private void testLargeResponse(boolean compressed) throws InterruptedException {
final String responseString = randomAlphaOfLength(4 * 1024 * 1024);
final String url = "/thing";
final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() {
@Override
public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) {
if (url.equals(request.uri())) {
channel.sendResponse(new RestResponse(OK, responseString));
} else {
logger.error("--> Unexpected successful uri [{}]", request.uri());
throw new AssertionError();
}
}
@Override
public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) {
logger.error(() -> "--> Unexpected bad request [" + FakeRestRequest.requestToString(channel.request()) + "]", cause);
throw new AssertionError();
}
};
final AtomicBoolean seenThrottledWrite = new AtomicBoolean(false);
try (
Netty4HttpServerTransport transport = new Netty4HttpServerTransport(
Settings.EMPTY,
networkService,
threadPool,
xContentRegistry(),
dispatcher,
clusterSettings,
new SharedGroupFactory(Settings.EMPTY),
TelemetryProvider.NOOP,
TLSConfig.noTLS(),
null,
randomFrom((httpPreRequest, channel, listener) -> listener.onResponse(null), null)
) {
@Override
public ChannelHandler configureServerChannelHandler() {
return new HttpChannelHandler(
this,
handlingSettings,
TLSConfig.noTLS(),
null,
randomFrom((httpPreRequest, channel, listener) -> listener.onResponse(null), null)
) {
@Override
protected void initChannel(Channel ch) throws Exception {
super.initChannel(ch);
ch.pipeline().addBefore("pipelining", "assert-throttling", new ChannelOutboundHandlerAdapter() {
private boolean seenNotWritable = false;
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
if (seenNotWritable) {
// track that we saw a write after the channel became unwriteable on a previous write, so we can
// later assert that we indeed saw throttled writes in this test
seenThrottledWrite.set(true);
}
assertTrue("handler should throttle to only write into writable channels", ctx.channel().isWritable());
super.write(ctx, msg, promise);
if (ctx.channel().isWritable() == false) {
seenNotWritable = true;
}
}
});
}
};
}
}
) {
transport.start();
final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses());
try (Netty4HttpClient client = new Netty4HttpClient()) {
DefaultFullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url);
if (compressed) {
request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, randomFrom("deflate", "gzip"));
}
long numOfHugeAllocations = getHugeAllocationCount();
final FullHttpResponse response = client.send(remoteAddress.address(), request);
try {
assertThat(getHugeAllocationCount(), equalTo(numOfHugeAllocations));
assertThat(response.status(), equalTo(HttpResponseStatus.OK));
byte[] bytes = new byte[response.content().readableBytes()];
response.content().readBytes(bytes);
assertThat(new String(bytes, StandardCharsets.UTF_8), equalTo(responseString));
assertTrue(seenThrottledWrite.get());
} finally {
response.release();
}
}
}
}
private long getHugeAllocationCount() {
long numOfHugAllocations = 0;
ByteBufAllocator allocator = NettyAllocator.getAllocator();
assert allocator instanceof NettyAllocator.NoDirectBuffers;
ByteBufAllocator delegate = ((NettyAllocator.NoDirectBuffers) allocator).getDelegate();
if (delegate instanceof PooledByteBufAllocator) {
PooledByteBufAllocatorMetric metric = ((PooledByteBufAllocator) delegate).metric();
numOfHugAllocations = metric.heapArenas().stream().mapToLong(PoolArenaMetric::numHugeAllocations).sum();
}
return numOfHugAllocations;
}
public void testCorsRequest() throws InterruptedException {
final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() {
@Override
public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) {
logger.error("--> Unexpected successful request [{}]", FakeRestRequest.requestToString(request));
throw new AssertionError();
}
@Override
public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) {
logger.error(() -> "--> Unexpected bad request [" + FakeRestRequest.requestToString(channel.request()) + "]", cause);
throw new AssertionError();
}
};
final Settings settings = createBuilderWithPort().put(SETTING_CORS_ENABLED.getKey(), true)
.put(SETTING_CORS_ALLOW_ORIGIN.getKey(), "elastic.co")
.build();
try (
Netty4HttpServerTransport transport = new Netty4HttpServerTransport(
settings,
networkService,
threadPool,
xContentRegistry(),
dispatcher,
randomClusterSettings(),
new SharedGroupFactory(settings),
TelemetryProvider.NOOP,
TLSConfig.noTLS(),
null,
randomFrom((httpPreRequest, channel, listener) -> listener.onResponse(null), null)
)
) {
transport.start();
final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses());
// Test pre-flight request
try (Netty4HttpClient client = new Netty4HttpClient()) {
final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.OPTIONS, "/");
request.headers().add(CorsHandler.ORIGIN, "elastic.co");
request.headers().add(CorsHandler.ACCESS_CONTROL_REQUEST_METHOD, "POST");
final FullHttpResponse response = client.send(remoteAddress.address(), request);
try {
assertThat(response.status(), equalTo(HttpResponseStatus.OK));
assertThat(response.headers().get(CorsHandler.ACCESS_CONTROL_ALLOW_ORIGIN), equalTo("elastic.co"));
assertThat(response.headers().get(CorsHandler.VARY), equalTo(CorsHandler.ORIGIN));
assertTrue(response.headers().contains(CorsHandler.DATE));
} finally {
response.release();
}
}
// Test short-circuited request
try (Netty4HttpClient client = new Netty4HttpClient()) {
final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/");
request.headers().add(CorsHandler.ORIGIN, "elastic2.co");
final FullHttpResponse response = client.send(remoteAddress.address(), request);
try {
assertThat(response.status(), equalTo(HttpResponseStatus.FORBIDDEN));
} finally {
response.release();
}
}
}
}
public void testChannelAcceptorCannotTamperThreadContext() throws Exception {
HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() {
@Override
public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) {
assertThreadContextNotTampered(threadContext);
channel.sendResponse(new RestResponse(OK, RestResponse.TEXT_CONTENT_TYPE, new BytesArray("done")));
}
@Override
public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) {
logger.error(() -> "--> Unexpected bad request [" + FakeRestRequest.requestToString(channel.request()) + "]", cause);
throw new AssertionError();
}
};
// there's only one netty worker thread that's reused across client requests
Settings settings = createBuilderWithPort().put(Netty4Plugin.WORKER_COUNT.getKey(), 1)
.put(Netty4Plugin.SETTING_HTTP_WORKER_COUNT.getKey(), 0)
.build();
AtomicBoolean acceptChannel = new AtomicBoolean();
try (
Netty4HttpServerTransport transport = new Netty4HttpServerTransport(
settings,
networkService,
threadPool,
xContentRegistry(),
dispatcher,
randomClusterSettings(),
new SharedGroupFactory(settings),
TelemetryProvider.NOOP,
TLSConfig.noTLS(),
new AcceptChannelHandler.AcceptPredicate() {
@Override
public void setBoundAddress(BoundTransportAddress boundHttpTransportAddress) {}
@Override
public boolean test(String profile, InetSocketAddress peerAddress) {
assertThreadContextNotTampered(threadPool.getThreadContext());
tamperThreadContext(threadPool.getThreadContext());
return acceptChannel.get();
}
},
randomFrom((httpPreRequest, channel, listener) -> listener.onResponse(null), null)
)
) {
transport.start();
int nRetries = randomIntBetween(7, 9);
try (Netty4HttpClient client = new Netty4HttpClient()) {
for (int i = 0; i < nRetries; i++) {
acceptChannel.set(randomBoolean());
var responses = client.get(randomFrom(transport.boundAddress().boundAddresses()).address(), "/test/url");
try {
if (acceptChannel.get()) {
assertThat(responses, iterableWithSize(1));
assertThat(responses.iterator().next().status(), equalTo(HttpResponseStatus.OK));
} else {
assertThat(responses, emptyIterable());
}
} finally {
for (FullHttpResponse response : responses) {
response.release();
}
}
}
}
}
}
public void testReadTimeout() throws Exception {
final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() {
@Override
public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) {
logger.error("--> Unexpected successful request [{}]", FakeRestRequest.requestToString(request));
throw new AssertionError("Should not have received a dispatched request");
}
@Override
public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) {
logger.error(() -> "--> Unexpected bad request [" + FakeRestRequest.requestToString(channel.request()) + "]", cause);
throw new AssertionError("Should not have received a dispatched request");
}
};
Settings settings = createBuilderWithPort().put(
HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT.getKey(),
new TimeValue(randomIntBetween(100, 300))
).build();
NioEventLoopGroup group = new NioEventLoopGroup();
try (
Netty4HttpServerTransport transport = new Netty4HttpServerTransport(
settings,
networkService,
threadPool,
xContentRegistry(),
dispatcher,
randomClusterSettings(),
new SharedGroupFactory(settings),
TelemetryProvider.NOOP,
TLSConfig.noTLS(),
null,
randomFrom((httpPreRequest, channel, listener) -> listener.onResponse(null), null)
)
) {
transport.start();
final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses());
CountDownLatch channelClosedLatch = new CountDownLatch(1);
Bootstrap clientBootstrap = new Bootstrap().option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator())
.channel(NioSocketChannel.class)
.handler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) {
ch.pipeline().addLast(new ChannelHandlerAdapter() {
});
}
})
.group(group);
ChannelFuture connect = clientBootstrap.connect(remoteAddress.address());
connect.channel().closeFuture().addListener(future -> channelClosedLatch.countDown());
assertTrue("Channel should be closed due to read timeout", channelClosedLatch.await(1, TimeUnit.MINUTES));
} finally {
group.shutdownGracefully().await();
}
}
public void testHeadRequestToChunkedApi() throws InterruptedException {
final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() {
@Override
public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) {
try {
channel.sendResponse(
RestResponse.chunked(OK, ChunkedRestResponseBodyPart.fromXContent(ignored -> Iterators.single((builder, params) -> {
throw new AssertionError("should not be called for HEAD REQUEST");
}), ToXContent.EMPTY_PARAMS, channel), null)
);
} catch (IOException e) {
throw new AssertionError(e);
}
}
@Override
public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) {
throw new AssertionError();
}
};
final Settings settings = createSettings();
try (
Netty4HttpServerTransport transport = new Netty4HttpServerTransport(
settings,
networkService,
threadPool,
xContentRegistry(),
dispatcher,
clusterSettings,
new SharedGroupFactory(settings),
TelemetryProvider.NOOP,
TLSConfig.noTLS(),
null,
randomFrom((httpPreRequest, channel, listener) -> listener.onResponse(null), null)
)
) {
transport.start();
final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses());
try (Netty4HttpClient client = new Netty4HttpClient()) {
final String url = "/some-head-endpoint";
final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.HEAD, url);
final FullHttpResponse response = client.send(remoteAddress.address(), request);
try {
assertThat(response.status(), equalTo(HttpResponseStatus.OK));
assertFalse(response.content().isReadable());
} finally {
response.release();
}
}
}
}
public void testHttpHeadersSuccessfulValidation() throws InterruptedException {
final AtomicReference<HttpMethod> httpMethodReference = new AtomicReference<>();
final AtomicReference<String> urlReference = new AtomicReference<>();
final AtomicReference<String> requestHeaderReference = new AtomicReference<>();
final AtomicReference<String> requestHeaderValueReference = new AtomicReference<>();
final AtomicReference<String> contextHeaderReference = new AtomicReference<>();
final AtomicReference<String> contextHeaderValueReference = new AtomicReference<>();
final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() {
@Override
public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) {
assertThat(request.getHttpRequest().uri(), is(urlReference.get()));
assertThat(request.getHttpRequest().header(requestHeaderReference.get()), is(requestHeaderValueReference.get()));
assertThat(request.getHttpRequest().method(), is(translateRequestMethod(httpMethodReference.get())));
// validation context is restored
assertThat(threadPool.getThreadContext().getHeader(contextHeaderReference.get()), is(contextHeaderValueReference.get()));
assertThat(threadPool.getThreadContext().getTransient(contextHeaderReference.get()), is(contextHeaderValueReference.get()));
// return some response
channel.sendResponse(new RestResponse(OK, RestResponse.TEXT_CONTENT_TYPE, new BytesArray("done")));
}
@Override
public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) {
throw new AssertionError("A validated request should not dispatch as bad");
}
};
final HttpValidator httpValidator = (httpRequest, channel, validationListener) -> {
// assert that the validator sees the request unaltered
assertThat(httpRequest.uri(), is(urlReference.get()));
assertThat(httpRequest.headers().get(requestHeaderReference.get()), is(requestHeaderValueReference.get()));
assertThat(httpRequest.method(), is(httpMethodReference.get()));
// make validation alter the thread context
contextHeaderReference.set(randomAlphaOfLengthBetween(4, 8));
contextHeaderValueReference.set(randomAlphaOfLengthBetween(4, 8));
threadPool.getThreadContext().putHeader(contextHeaderReference.get(), contextHeaderValueReference.get());
threadPool.getThreadContext().putTransient(contextHeaderReference.get(), contextHeaderValueReference.get());
// validate successfully
validationListener.onResponse(null);
};
try (
Netty4HttpServerTransport transport = getTestNetty4HttpServerTransport(
dispatcher,
httpValidator,
(restRequest, threadContext) -> {
// assert the thread context does not yet contain anything that validation set in
assertThat(threadPool.getThreadContext().getHeader(contextHeaderReference.get()), nullValue());
assertThat(threadPool.getThreadContext().getTransient(contextHeaderReference.get()), nullValue());
ThreadContext.StoredContext storedAuthenticatedContext = HttpHeadersAuthenticatorUtils.extractAuthenticationContext(
restRequest.getHttpRequest()
);
assertThat(storedAuthenticatedContext, notNullValue());
// restore validation context
storedAuthenticatedContext.restore();
// assert that now, after restoring the validation context, it does contain what validation put in
assertThat(
threadPool.getThreadContext().getHeader(contextHeaderReference.get()),
is(contextHeaderValueReference.get())
);
assertThat(
threadPool.getThreadContext().getTransient(contextHeaderReference.get()),
is(contextHeaderValueReference.get())
);
}
)
) {
transport.start();
final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses());
for (HttpMethod httpMethod : List.of(HttpMethod.GET, HttpMethod.POST, HttpMethod.PUT, HttpMethod.DELETE, HttpMethod.PATCH)) {
httpMethodReference.set(httpMethod);
urlReference.set(
"/"
+ randomAlphaOfLengthBetween(4, 8)
+ "?X-"
+ randomAlphaOfLengthBetween(4, 8)
+ "="
+ randomAlphaOfLengthBetween(4, 8)
);
requestHeaderReference.set("X-" + randomAlphaOfLengthBetween(4, 8));
requestHeaderValueReference.set(randomAlphaOfLengthBetween(4, 8));
try (Netty4HttpClient client = new Netty4HttpClient()) {
FullHttpRequest request = new DefaultFullHttpRequest(
HttpVersion.HTTP_1_1,
httpMethodReference.get(),
urlReference.get()
);
request.headers().set(requestHeaderReference.get(), requestHeaderValueReference.get());
FullHttpResponse response = client.send(remoteAddress.address(), request);
assertThat(response.status(), is(HttpResponseStatus.OK));
}
}
}
}
public void testLargeRequestIsNeverDispatched() throws Exception {
final String uri = "/"
+ randomAlphaOfLengthBetween(4, 8)
+ "?X-"
+ randomAlphaOfLengthBetween(4, 8)
+ "="
+ randomAlphaOfLengthBetween(4, 8);
final Settings settings = createBuilderWithPort().put(HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.getKey(), "1mb")
.build();
final String requestString = randomAlphaOfLength(2 * 1024 * 1024); // request size is twice the limit
final HttpServerTransport.Dispatcher dispatcher = new AggregatingDispatcher() {
@Override
public void dispatchAggregatedRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) {
throw new AssertionError("Request dispatched but shouldn't");
}
@Override
public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) {
throw new AssertionError("Request dispatched but shouldn't");
}
};
try (
Netty4HttpServerTransport transport = getTestNetty4HttpServerTransport(
settings,
dispatcher,
(r, c, l) -> l.onResponse(null),
(restRequest, threadContext) -> {
throw new AssertionError("Request dispatched but shouldn't");
}
)
) {
transport.start();
final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses());
try (Netty4HttpClient client = new Netty4HttpClient()) {
Collection<FullHttpResponse> response = client.post(remoteAddress.address(), List.of(Tuple.tuple(uri, requestString)));
assertThat(response, hasSize(1));
assertThat(response.stream().findFirst().get().status(), is(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE));
}
}
}
public void testHttpHeadersFailedValidation() throws InterruptedException {
final AtomicReference<HttpMethod> httpMethodReference = new AtomicReference<>();
final AtomicReference<String> urlReference = new AtomicReference<>();
final AtomicReference<String> headerReference = new AtomicReference<>();
final AtomicReference<String> headerValueReference = new AtomicReference<>();
final AtomicReference<Exception> validationResultExceptionReference = new AtomicReference<>();
final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() {
@Override
public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) {
throw new AssertionError("Request that failed validation should not be dispatched");
}
@Override
public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) {
assertThat(cause, instanceOf(HttpHeadersValidationException.class));
assertThat(((ElasticsearchWrapperException) cause).getCause(), is(validationResultExceptionReference.get()));
assertThat(channel.request().getHttpRequest().uri(), is(urlReference.get()));
assertThat(channel.request().getHttpRequest().header(headerReference.get()), is(headerValueReference.get()));
assertThat(channel.request().getHttpRequest().method(), is(translateRequestMethod(httpMethodReference.get())));
// assert content is dropped
assertThat(channel.request().getHttpRequest().body().asFull().bytes().utf8ToString(), is(""));
try {
channel.sendResponse(new RestResponse(channel, (Exception) ((ElasticsearchWrapperException) cause).getCause()));
} catch (IOException e) {
throw new AssertionError(e);
}
}
};
final HttpValidator failureHeadersValidator = (httpRequest, channel, validationResultListener) -> {
// assert that the validator sees the request unaltered
assertThat(httpRequest.uri(), is(urlReference.get()));
assertThat(httpRequest.headers().get(headerReference.get()), is(headerValueReference.get()));
assertThat(httpRequest.method(), is(httpMethodReference.get()));
// failed validation
validationResultListener.onFailure(validationResultExceptionReference.get());
};
try (
Netty4HttpServerTransport transport = getTestNetty4HttpServerTransport(
dispatcher,
failureHeadersValidator,
(restRequest, threadContext) -> {
throw new AssertionError("Request that failed validation should not be dispatched");
}
)
) {
transport.start();
final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses());
for (HttpMethod httpMethod : List.of(HttpMethod.GET, HttpMethod.POST, HttpMethod.PUT, HttpMethod.DELETE, HttpMethod.PATCH)) {
httpMethodReference.set(httpMethod);
urlReference.set(
"/"
+ randomAlphaOfLengthBetween(4, 8)
+ "?X-"
+ randomAlphaOfLengthBetween(4, 8)
+ "="
+ randomAlphaOfLengthBetween(4, 8)
);
validationResultExceptionReference.set(new ElasticsearchSecurityException("Boom", UNAUTHORIZED));
try (Netty4HttpClient client = new Netty4HttpClient()) {
ByteBuf content = Unpooled.copiedBuffer(randomAlphaOfLengthBetween(1, 32), StandardCharsets.UTF_8);
FullHttpRequest request = new DefaultFullHttpRequest(
HttpVersion.HTTP_1_1,
httpMethodReference.get(),
urlReference.get(),
content
);
// submit the request with some header custom header
headerReference.set("X-" + randomAlphaOfLengthBetween(4, 8));
headerValueReference.set(randomAlphaOfLengthBetween(4, 8));
request.headers().set(headerReference.get(), headerValueReference.get());
FullHttpResponse response = client.send(remoteAddress.address(), request);
assertThat(response.status(), is(HttpResponseStatus.UNAUTHORIZED));
}
}
}
}
public void testMultipleValidationsOnTheSameChannel() throws InterruptedException {
// ensure that there is a single channel active
final Settings settings = createBuilderWithPort().put(Netty4Plugin.SETTING_HTTP_WORKER_COUNT.getKey(), 1).build();
final Set<String> okURIs = ConcurrentHashMap.newKeySet();
final Set<String> nokURIs = ConcurrentHashMap.newKeySet();
final SetOnce<Channel> channelSetOnce = new SetOnce<>();
final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() {
@Override
public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) {
assertThat(request.uri(), in(okURIs));
// assert validated request is dispatched
okURIs.remove(request.uri());
channel.sendResponse(new RestResponse(OK, RestResponse.TEXT_CONTENT_TYPE, new BytesArray("dispatch OK")));
}
@Override
public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) {
// assert unvalidated request is NOT dispatched
assertThat(channel.request().uri(), in(nokURIs));
nokURIs.remove(channel.request().uri());
try {
channel.sendResponse(new RestResponse(channel, (Exception) ((ElasticsearchWrapperException) cause).getCause()));
} catch (IOException e) {
throw new AssertionError(e);
}
}
};
final HttpValidator headersValidator = (httpPreRequest, channel, validationListener) -> {
// assert all validations run on the same channel
channelSetOnce.trySet(channel);
assertThat(channelSetOnce.get(), is(channel));
// some requests are validated while others are not
if (httpPreRequest.uri().contains("X-Auth=OK")) {
randomFrom(EsExecutors.DIRECT_EXECUTOR_SERVICE, channel.eventLoop()).execute(() -> validationListener.onResponse(null));
} else if (httpPreRequest.uri().contains("X-Auth=NOK")) {
randomFrom(EsExecutors.DIRECT_EXECUTOR_SERVICE, channel.eventLoop()).execute(
() -> validationListener.onFailure(new ElasticsearchSecurityException("Boom", UNAUTHORIZED))
);
} else {
throw new AssertionError("Unrecognized URI");
}
};
try (
Netty4HttpServerTransport transport = getTestNetty4HttpServerTransport(
settings,
dispatcher,
headersValidator,
(restRequest, threadContext) -> {}
)
) {
transport.start();
final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses());
final int totalRequestCount = randomIntBetween(64, 128);
for (int requestId = 0; requestId < totalRequestCount; requestId++) {
String uri = "/" + randomAlphaOfLengthBetween(4, 8) + "?Request-Id=" + requestId;
if (randomBoolean()) {
uri = uri + "&X-Auth=OK";
okURIs.add(uri);
} else {
uri = uri + "&X-Auth=NOK";
nokURIs.add(uri);
}
}
List<String> allURIs = new ArrayList<>();
allURIs.addAll(okURIs);
allURIs.addAll(nokURIs);
Collections.shuffle(allURIs, getRandom());
assertThat(allURIs.size(), is(totalRequestCount));
try (Netty4HttpClient client = new Netty4HttpClient()) {
client.get(remoteAddress.address(), allURIs.toArray(new String[0]));
// assert all validations have been dispatched (or not) correctly
assertThat(okURIs.size(), is(0));
assertThat(nokURIs.size(), is(0));
}
}
}
public void testRespondAfterServiceCloseWithClientCancel() throws Exception {
runRespondAfterServiceCloseTest(true);
}
public void testRespondAfterServiceCloseWithServerCancel() throws Exception {
runRespondAfterServiceCloseTest(false);
}
private void runRespondAfterServiceCloseTest(boolean clientCancel) throws Exception {
final String url = "/" + randomIdentifier();
final CountDownLatch responseReleasedLatch = new CountDownLatch(1);
final SubscribableListener<Void> transportClosedFuture = new SubscribableListener<>();
final CountDownLatch handlingRequestLatch = new CountDownLatch(1);
final HttpServerTransport.Dispatcher dispatcher = new AggregatingDispatcher() {
@Override
public void dispatchAggregatedRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) {
assertEquals(request.uri(), url);
final var response = RestResponse.chunked(
OK,
ChunkedRestResponseBodyPart.fromTextChunks(RestResponse.TEXT_CONTENT_TYPE, Collections.emptyIterator()),
responseReleasedLatch::countDown
);
transportClosedFuture.addListener(ActionListener.running(() -> channel.sendResponse(response)));
handlingRequestLatch.countDown();
}
@Override
public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) {
fail(cause, "--> Unexpected bad request [%s]", FakeRestRequest.requestToString(channel.request()));
}
};
try (
Netty4HttpServerTransport transport = new Netty4HttpServerTransport(
clientCancel
? Settings.EMPTY
: Settings.builder().put(SETTING_HTTP_SERVER_SHUTDOWN_GRACE_PERIOD.getKey(), TimeValue.timeValueMillis(1)).build(),
networkService,
threadPool,
xContentRegistry(),
dispatcher,
clusterSettings,
new SharedGroupFactory(Settings.EMPTY),
TelemetryProvider.NOOP,
TLSConfig.noTLS(),
null,
randomFrom((httpPreRequest, channel, listener) -> listener.onResponse(null), null)
)
) {
transport.start();
final var address = randomFrom(transport.boundAddress().boundAddresses()).address();
try (var client = RestClient.builder(new HttpHost(address.getAddress(), address.getPort())).build()) {
final var responseExceptionFuture = new PlainActionFuture<Exception>();
final var cancellable = client.performRequestAsync(
new Request("GET", url),
ActionTestUtils.wrapAsRestResponseListener(ActionTestUtils.assertNoSuccessListener(responseExceptionFuture::onResponse))
);
safeAwait(handlingRequestLatch);
if (clientCancel) {
threadPool.generic().execute(cancellable::cancel);
}
transport.close();
transportClosedFuture.onResponse(null);
safeAwait(responseReleasedLatch);
final var responseException = safeGet(responseExceptionFuture);
if (clientCancel) {
assertThat(responseException, instanceOf(CancellationException.class));
} else {
assertThat(responseException, instanceOf(ConnectionClosedException.class));
}
}
}
}
private Netty4HttpServerTransport getTestNetty4HttpServerTransport(
HttpServerTransport.Dispatcher dispatcher,
HttpValidator httpValidator,
BiConsumer<RestRequest, ThreadContext> populatePerRequestContext
) {
return getTestNetty4HttpServerTransport(createSettings(), dispatcher, httpValidator, populatePerRequestContext);
}
private Netty4HttpServerTransport getTestNetty4HttpServerTransport(
Settings settings,
HttpServerTransport.Dispatcher dispatcher,
HttpValidator httpValidator,
BiConsumer<RestRequest, ThreadContext> populatePerRequestContext
) {
return new Netty4HttpServerTransport(
settings,
networkService,
threadPool,
xContentRegistry(),
dispatcher,
clusterSettings,
new SharedGroupFactory(settings),
TelemetryProvider.NOOP,
TLSConfig.noTLS(),
null,
httpValidator
) {
@Override
protected void populatePerRequestThreadContext(RestRequest restRequest, ThreadContext threadContext) {
populatePerRequestContext.accept(restRequest, threadContext);
}
};
}
private Settings createSettings() {
return createBuilderWithPort().build();
}
private Settings.Builder createBuilderWithPort() {
return Settings.builder().put(HttpTransportSettings.SETTING_HTTP_PORT.getKey(), getPortRange());
}
private static RestRequest.Method translateRequestMethod(HttpMethod httpMethod) {
if (httpMethod == HttpMethod.GET) return RestRequest.Method.GET;
if (httpMethod == HttpMethod.POST) return RestRequest.Method.POST;
if (httpMethod == HttpMethod.PUT) return RestRequest.Method.PUT;
if (httpMethod == HttpMethod.DELETE) return RestRequest.Method.DELETE;
if (httpMethod == HttpMethod.PATCH) {
return RestRequest.Method.PATCH;
}
throw new IllegalArgumentException("Unexpected http method: " + httpMethod);
}
private static void tamperThreadContext(ThreadContext threadContext) {
boolean tampered = false;
if (randomBoolean()) {
threadContext.putHeader(randomAlphaOfLength(16), "tampered with request header");
tampered = true;
}
if (randomBoolean()) {
threadContext.putTransient(randomAlphaOfLength(16), "tampered with transient request header");
tampered = true;
}
if (randomBoolean() || tampered == false) {
threadContext.addResponseHeader(randomAlphaOfLength(8), "tampered with response header");
}
}
private static void assertThreadContextNotTampered(ThreadContext threadContext) {
if (false == threadContext.isDefaultContext()) {
throw new AssertionError("tampered thread context");
}
Transports.assertTransportThread();
}
}
| Netty4HttpServerTransportTests |
java | spring-projects__spring-framework | spring-websocket/src/test/java/org/springframework/web/socket/messaging/StompWebSocketIntegrationTests.java | {
"start": 12717,
"end": 13246
} | class ____ implements Filter {
private final Principal user;
private UserFilter(Principal user) {
this.user = user;
}
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
throws IOException, ServletException {
request = new HttpServletRequestWrapper((HttpServletRequest) request) {
@Override
public Principal getUserPrincipal() {
return user;
}
};
chain.doFilter(request, response);
}
}
@IntegrationTestController
static | UserFilter |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/error/ShouldHaveSameHourAs_create_Test.java | {
"start": 1171,
"end": 2162
} | class ____ {
@Test
void should_create_error_message_localtime() {
// GIVEN
ErrorMessageFactory factory = shouldHaveSameHourAs(LocalTime.of(12, 0), LocalTime.of(13, 0));
// WHEN
String errorMessage = factory.create(new TextDescription("Test"), STANDARD_REPRESENTATION);
// THEN
then(errorMessage).isEqualTo("[Test] %nExpecting actual:%n 12:00%nto have same hour as:%n 13:00%nbut had not.".formatted());
}
@Test
void should_create_error_message_offset() {
// GIVEN
ErrorMessageFactory factory = shouldHaveSameHourAs(OffsetTime.of(12, 0, 0, 0, ZoneOffset.UTC),
OffsetTime.of(13, 0, 0, 0, ZoneOffset.UTC));
// WHEN
String errorMessage = factory.create(new TextDescription("Test"), STANDARD_REPRESENTATION);
// THEN
then(errorMessage).isEqualTo("[Test] %nExpecting actual:%n 12:00Z%nto have same hour as:%n 13:00Z%nbut had not.".formatted());
}
}
| ShouldHaveSameHourAs_create_Test |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/repository/SchemaResolveVisitorFactory.java | {
"start": 22245,
"end": 88234
} | class ____ extends SQLASTVisitorAdapter implements SchemaResolveVisitor {
private int options;
private SchemaRepository repository;
private Context context;
public SQLResolveVisitor(SchemaRepository repository, int options) {
this.repository = repository;
this.options = options;
}
public boolean visit(SQLSelectItem x) {
SQLExpr expr = x.getExpr();
if (expr instanceof SQLIdentifierExpr) {
resolveIdent(this, (SQLIdentifierExpr) expr);
return false;
}
if (expr instanceof SQLPropertyExpr) {
resolve(this, (SQLPropertyExpr) expr);
return false;
}
return true;
}
@Override
public boolean isEnabled(Option option) {
return (options & option.mask) != 0;
}
public int getOptions() {
return options;
}
@Override
public Context getContext() {
return context;
}
public Context createContext(SQLObject object) {
return this.context = new Context(object, context);
}
@Override
public void popContext() {
if (context != null) {
context = context.parent;
}
}
public SchemaRepository getRepository() {
return repository;
}
}
static void resolve(SchemaResolveVisitor visitor, SQLCreateTableStatement x) {
SchemaResolveVisitor.Context ctx = visitor.createContext(x);
SQLExprTableSource table = x.getTableSource();
ctx.setTableSource(table);
table.accept(visitor);
List<SQLTableElement> elements = x.getTableElementList();
for (int i = 0; i < elements.size(); i++) {
SQLTableElement e = elements.get(i);
if (e instanceof SQLColumnDefinition) {
SQLColumnDefinition columnn = (SQLColumnDefinition) e;
SQLName columnnName = columnn.getName();
if (columnnName instanceof SQLIdentifierExpr) {
SQLIdentifierExpr identifierExpr = (SQLIdentifierExpr) columnnName;
identifierExpr.setResolvedTableSource(table);
identifierExpr.setResolvedColumn(columnn);
}
} else if (e instanceof SQLUniqueConstraint) {
List<SQLSelectOrderByItem> columns = ((SQLUniqueConstraint) e).getColumns();
for (SQLSelectOrderByItem orderByItem : columns) {
SQLExpr orderByItemExpr = orderByItem.getExpr();
if (orderByItemExpr instanceof SQLIdentifierExpr) {
SQLIdentifierExpr identifierExpr = (SQLIdentifierExpr) orderByItemExpr;
identifierExpr.setResolvedTableSource(table);
SQLColumnDefinition column = x.findColumn(identifierExpr.nameHashCode64());
if (column != null) {
identifierExpr.setResolvedColumn(column);
}
}
}
} else {
e.accept(visitor);
}
}
SQLSelect select = x.getSelect();
if (select != null) {
visitor.visit(select);
}
SchemaRepository repository = visitor.getRepository();
if (repository != null) {
repository.acceptCreateTable(x);
}
visitor.popContext();
SQLExprTableSource like = x.getLike();
if (like != null) {
like.accept(visitor);
}
}
static void resolve(SchemaResolveVisitor visitor, SQLUpdateStatement x) {
SchemaResolveVisitor.Context ctx = visitor.createContext(x);
SQLWithSubqueryClause with = x.getWith();
if (with != null) {
with.accept(visitor);
}
SQLTableSource table = x.getTableSource();
SQLTableSource from = x.getFrom();
ctx.setTableSource(table);
ctx.setFrom(from);
table.accept(visitor);
if (from != null) {
from.accept(visitor);
}
List<SQLUpdateSetItem> items = x.getItems();
for (SQLUpdateSetItem item : items) {
SQLExpr column = item.getColumn();
if (column instanceof SQLIdentifierExpr) {
SQLIdentifierExpr identifierExpr = (SQLIdentifierExpr) column;
identifierExpr.setResolvedTableSource(table);
visitor.visit(identifierExpr);
} else if (column instanceof SQLListExpr) {
SQLListExpr columnGroup = (SQLListExpr) column;
for (SQLExpr columnGroupItem : columnGroup.getItems()) {
if (columnGroupItem instanceof SQLIdentifierExpr) {
SQLIdentifierExpr identifierExpr = (SQLIdentifierExpr) columnGroupItem;
identifierExpr.setResolvedTableSource(table);
visitor.visit(identifierExpr);
} else {
columnGroupItem.accept(visitor);
}
}
} else {
column.accept(visitor);
}
SQLExpr value = item.getValue();
if (value != null) {
value.accept(visitor);
}
}
SQLExpr where = x.getWhere();
if (where != null) {
where.accept(visitor);
}
SQLOrderBy orderBy = x.getOrderBy();
if (orderBy != null) {
orderBy.accept(visitor);
}
for (SQLExpr sqlExpr : x.getReturning()) {
sqlExpr.accept(visitor);
}
visitor.popContext();
}
static void resolve(SchemaResolveVisitor visitor, SQLDeleteStatement x) {
SchemaResolveVisitor.Context ctx = visitor.createContext(x);
SQLWithSubqueryClause with = x.getWith();
if (with != null) {
visitor.visit(with);
}
SQLTableSource table = x.getTableSource();
SQLTableSource from = x.getFrom();
if (from == null) {
from = x.getUsing();
}
if (table == null && from != null) {
table = from;
from = null;
}
if (from != null) {
ctx.setFrom(from);
from.accept(visitor);
}
if (table != null) {
if (from != null && table instanceof SQLExprTableSource) {
SQLExpr tableExpr = ((SQLExprTableSource) table).getExpr();
if (tableExpr instanceof SQLPropertyExpr
&& ((SQLPropertyExpr) tableExpr).getName().equals("*")) {
String alias = ((SQLPropertyExpr) tableExpr).getOwnernName();
SQLTableSource refTableSource = from.findTableSource(alias);
if (refTableSource != null) {
((SQLPropertyExpr) tableExpr).setResolvedTableSource(refTableSource);
}
}
}
table.accept(visitor);
ctx.setTableSource(table);
}
SQLExpr where = x.getWhere();
if (where != null) {
where.accept(visitor);
}
visitor.popContext();
}
static void resolve(SchemaResolveVisitor visitor, SQLInsertStatement x) {
SchemaResolveVisitor.Context ctx = visitor.createContext(x);
SQLWithSubqueryClause with = x.getWith();
if (with != null) {
visitor.visit(with);
}
SQLTableSource table = x.getTableSource();
ctx.setTableSource(table);
if (table != null) {
table.accept(visitor);
}
for (SQLExpr column : x.getColumns()) {
column.accept(visitor);
}
if (x instanceof HiveInsertStatement) {
for (SQLAssignItem item : ((HiveInsertStatement) x).getPartitions()) {
item.accept(visitor);
}
}
for (SQLInsertStatement.ValuesClause valuesClause : x.getValuesList()) {
valuesClause.accept(visitor);
}
SQLSelect query = x.getQuery();
if (query != null) {
visitor.visit(query);
}
if (x instanceof OracleInsertStatement) {
SQLObject returning = ((OracleInsertStatement) x).getReturning();
if (returning != null) {
returning.accept(visitor);
}
}
visitor.popContext();
}
static void resolveIdent(SchemaResolveVisitor visitor, SQLIdentifierExpr x) {
SchemaResolveVisitor.Context ctx = visitor.getContext();
if (ctx == null) {
return;
}
String ident = x.getName();
long hash = x.nameHashCode64();
SQLTableSource tableSource = null;
if ((hash == FnvHash.Constants.LEVEL || hash == FnvHash.Constants.CONNECT_BY_ISCYCLE)
&& ctx.object instanceof SQLSelectQueryBlock) {
SQLSelectQueryBlock queryBlock = (SQLSelectQueryBlock) ctx.object;
if (queryBlock.getStartWith() != null
|| queryBlock.getConnectBy() != null) {
return;
}
}
SQLTableSource ctxTable = ctx.getTableSource();
if (ctxTable instanceof SQLJoinTableSource) {
SQLJoinTableSource join = (SQLJoinTableSource) ctxTable;
tableSource = join.findTableSourceWithColumn(hash, ident, visitor.getOptions());
if (tableSource == null) {
final SQLTableSource left = join.getLeft(), right = join.getRight();
if (left instanceof SQLSubqueryTableSource
&& right instanceof SQLExprTableSource) {
SQLSelect leftSelect = ((SQLSubqueryTableSource) left).getSelect();
if (leftSelect.getQuery() instanceof SQLSelectQueryBlock) {
boolean hasAllColumn = ((SQLSelectQueryBlock) leftSelect.getQuery()).selectItemHasAllColumn();
if (!hasAllColumn) {
tableSource = right;
}
}
} else if (right instanceof SQLSubqueryTableSource
&& left instanceof SQLExprTableSource) {
SQLSelect rightSelect = ((SQLSubqueryTableSource) right).getSelect();
if (rightSelect.getQuery() instanceof SQLSelectQueryBlock) {
boolean hasAllColumn = ((SQLSelectQueryBlock) rightSelect.getQuery()).selectItemHasAllColumn();
if (!hasAllColumn) {
tableSource = left;
}
}
} else if (left instanceof SQLExprTableSource && right instanceof SQLExprTableSource) {
SQLExprTableSource leftExprTableSource = (SQLExprTableSource) left;
SQLExprTableSource rightExprTableSource = (SQLExprTableSource) right;
if (leftExprTableSource.getSchemaObject() != null
&& rightExprTableSource.getSchemaObject() == null) {
tableSource = rightExprTableSource;
} else if (rightExprTableSource.getSchemaObject() != null
&& leftExprTableSource.getSchemaObject() == null) {
tableSource = leftExprTableSource;
}
}
}
} else if (ctxTable instanceof SQLSubqueryTableSource) {
tableSource = ctxTable.findTableSourceWithColumn(hash, ident, visitor.getOptions());
} else if (ctxTable instanceof SQLLateralViewTableSource) {
tableSource = ctxTable.findTableSourceWithColumn(hash, ident, visitor.getOptions());
if (tableSource == null) {
tableSource = ((SQLLateralViewTableSource) ctxTable).getTableSource();
}
} else {
for (SchemaResolveVisitor.Context parentCtx = ctx;
parentCtx != null;
parentCtx = parentCtx.parent) {
SQLDeclareItem declareItem = parentCtx.findDeclare(hash);
if (declareItem != null) {
x.setResolvedDeclareItem(declareItem);
return;
}
if (parentCtx.object instanceof SQLBlockStatement) {
SQLBlockStatement block = (SQLBlockStatement) parentCtx.object;
SQLParameter parameter = block.findParameter(hash);
if (parameter != null) {
x.setResolvedParameter(parameter);
return;
}
} else if (parentCtx.object instanceof SQLCreateProcedureStatement) {
SQLCreateProcedureStatement createProc = (SQLCreateProcedureStatement) parentCtx.object;
SQLParameter parameter = createProc.findParameter(hash);
if (parameter != null) {
x.setResolvedParameter(parameter);
return;
}
}
}
tableSource = ctxTable;
if (tableSource instanceof SQLExprTableSource) {
SchemaObject table = ((SQLExprTableSource) tableSource).getSchemaObject();
if (table != null) {
if (table.findColumn(hash) == null) {
SQLCreateTableStatement createStmt = null;
{
SQLStatement smt = table.getStatement();
if (smt instanceof SQLCreateTableStatement) {
createStmt = (SQLCreateTableStatement) smt;
}
}
if (createStmt != null && createStmt.getTableElementList().size() > 0) {
tableSource = null; // maybe parent
}
}
}
}
}
if (tableSource instanceof SQLExprTableSource) {
SQLExpr expr = ((SQLExprTableSource) tableSource).getExpr();
if (expr instanceof SQLMethodInvokeExpr) {
SQLMethodInvokeExpr func = (SQLMethodInvokeExpr) expr;
if (func.methodNameHashCode64() == FnvHash.Constants.ANN) {
expr = func.getArguments().get(0);
}
}
if (expr instanceof SQLIdentifierExpr) {
SQLIdentifierExpr identExpr = (SQLIdentifierExpr) expr;
long identHash = identExpr.nameHashCode64();
tableSource = unwrapAlias(ctx, tableSource, identHash);
}
}
if (tableSource != null) {
x.setResolvedTableSource(tableSource);
SQLObject column = tableSource.findColumn(hash);
if (column == null) {
column = tableSource.resolveColumn(hash);
}
if (column != null) {
x.setResolvedColumn(column);
}
if (ctxTable instanceof SQLJoinTableSource) {
String alias = tableSource.computeAlias();
if (alias == null || tableSource instanceof SQLWithSubqueryClause.Entry) {
return;
}
if (visitor.isEnabled(SchemaResolveVisitor.Option.ResolveIdentifierAlias)) {
SQLPropertyExpr propertyExpr = new SQLPropertyExpr(new SQLIdentifierExpr(alias), ident, hash);
propertyExpr.setResolvedColumn(x.getResolvedColumn());
propertyExpr.setResolvedTableSource(x.getResolvedTableSource());
SQLUtils.replaceInParent(x, propertyExpr);
}
}
}
if (x.getResolvedColumn() == null
&& x.getResolvedTableSource() == null) {
for (SchemaResolveVisitor.Context parentCtx = ctx;
parentCtx != null;
parentCtx = parentCtx.parent) {
SQLDeclareItem declareItem = parentCtx.findDeclare(hash);
if (declareItem != null) {
x.setResolvedDeclareItem(declareItem);
return;
}
if (parentCtx.object instanceof SQLBlockStatement) {
SQLBlockStatement block = (SQLBlockStatement) parentCtx.object;
SQLParameter parameter = block.findParameter(hash);
if (parameter != null) {
x.setResolvedParameter(parameter);
return;
}
} else if (parentCtx.object instanceof SQLCreateProcedureStatement) {
SQLCreateProcedureStatement createProc = (SQLCreateProcedureStatement) parentCtx.object;
SQLParameter parameter = createProc.findParameter(hash);
if (parameter != null) {
x.setResolvedParameter(parameter);
return;
}
}
}
}
if (x.getResolvedColumnObject() == null && ctx.object instanceof SQLSelectQueryBlock) {
SQLSelectQueryBlock queryBlock = (SQLSelectQueryBlock) ctx.object;
boolean having = false;
for (SQLObject current = x, parent = x.getParent(); parent != null; current = parent, parent = parent.getParent()) {
if (parent instanceof SQLSelectGroupByClause && parent.getParent() == queryBlock) {
SQLSelectGroupByClause groupBy = (SQLSelectGroupByClause) parent;
if (current == groupBy.getHaving()) {
having = true;
}
break;
}
}
if (having) {
SQLSelectItem selectItem = queryBlock.findSelectItem(x.hashCode64());
if (selectItem != null) {
x.setResolvedColumn(selectItem);
}
}
}
}
static void resolve(SchemaResolveVisitor visitor, SQLPropertyExpr x) {
SchemaResolveVisitor.Context ctx = visitor.getContext();
if (ctx == null) {
return;
}
long owner_hash = 0;
{
SQLExpr ownerObj = x.getOwner();
if (ownerObj instanceof SQLIdentifierExpr) {
SQLIdentifierExpr owner = (SQLIdentifierExpr) ownerObj;
owner_hash = owner.nameHashCode64();
} else if (ownerObj instanceof SQLPropertyExpr) {
owner_hash = ((SQLPropertyExpr) ownerObj).hashCode64();
}
}
SQLTableSource tableSource = null;
SQLTableSource ctxTable = ctx.getTableSource();
if (ctxTable != null) {
tableSource = ctxTable.findTableSource(owner_hash);
}
if (tableSource == null) {
SQLTableSource ctxFrom = ctx.getFrom();
if (ctxFrom != null) {
tableSource = ctxFrom.findTableSource(owner_hash);
}
}
if (tableSource == null) {
for (SchemaResolveVisitor.Context parentCtx = ctx;
parentCtx != null;
parentCtx = parentCtx.parent) {
SQLTableSource parentCtxTable = parentCtx.getTableSource();
if (parentCtxTable != null) {
tableSource = parentCtxTable.findTableSource(owner_hash);
if (tableSource == null) {
SQLTableSource ctxFrom = parentCtx.getFrom();
if (ctxFrom != null) {
tableSource = ctxFrom.findTableSource(owner_hash);
}
}
if (tableSource != null) {
break;
}
} else {
if (parentCtx.object instanceof SQLBlockStatement) {
SQLBlockStatement block = (SQLBlockStatement) parentCtx.object;
SQLParameter parameter = block.findParameter(owner_hash);
if (parameter != null) {
x.setResolvedOwnerObject(parameter);
return;
}
} else if (parentCtx.object instanceof SQLMergeStatement) {
SQLMergeStatement mergeStatement = (SQLMergeStatement) parentCtx.object;
SQLTableSource into = mergeStatement.getInto();
if (into instanceof SQLSubqueryTableSource
&& into.aliasHashCode64() == owner_hash) {
x.setResolvedOwnerObject(into);
}
}
SQLDeclareItem declareItem = parentCtx.findDeclare(owner_hash);
if (declareItem != null) {
SQLObject resolvedObject = declareItem.getResolvedObject();
if (resolvedObject instanceof SQLCreateProcedureStatement
|| resolvedObject instanceof SQLCreateFunctionStatement
|| resolvedObject instanceof SQLTableSource) {
x.setResolvedOwnerObject(resolvedObject);
}
break;
}
}
}
}
if (tableSource != null) {
x.setResolvedTableSource(tableSource);
SQLObject column = tableSource.resolveColum(
x.nameHashCode64());
if (column instanceof SQLColumnDefinition) {
x.setResolvedColumn((SQLColumnDefinition) column);
} else if (column instanceof SQLSelectItem) {
x.setResolvedColumn((SQLSelectItem) column);
}
}
}
static void resolve(SchemaResolveVisitor visitor, SQLBinaryOpExpr x) {
final SQLBinaryOperator op = x.getOperator();
final SQLExpr left = x.getLeft();
if ((op == SQLBinaryOperator.BooleanAnd || op == SQLBinaryOperator.BooleanOr)
&& left instanceof SQLBinaryOpExpr
&& ((SQLBinaryOpExpr) left).getOperator() == op) {
List<SQLExpr> groupList = SQLBinaryOpExpr.split(x, op);
for (int i = 0; i < groupList.size(); i++) {
SQLExpr item = groupList.get(i);
item.accept(visitor);
}
return;
}
if (left != null) {
if (left instanceof SQLBinaryOpExpr) {
resolve(visitor, (SQLBinaryOpExpr) left);
} else {
left.accept(visitor);
}
}
SQLExpr right = x.getRight();
if (right != null) {
right.accept(visitor);
}
}
static SQLTableSource unwrapAlias(SchemaResolveVisitor.Context ctx, SQLTableSource tableSource, long identHash) {
if (ctx == null) {
return tableSource;
}
if (ctx.object instanceof SQLDeleteStatement
&& (ctx.getTableSource() == null || tableSource == ctx.getTableSource())
&& ctx.getFrom() != null) {
SQLTableSource found = ctx.getFrom().findTableSource(identHash);
if (found != null) {
return found;
}
}
for (SchemaResolveVisitor.Context parentCtx = ctx;
parentCtx != null;
parentCtx = parentCtx.parent) {
SQLWithSubqueryClause with = null;
if (parentCtx.object instanceof SQLSelect) {
SQLSelect select = (SQLSelect) parentCtx.object;
with = select.getWithSubQuery();
} else if (parentCtx.object instanceof SQLDeleteStatement) {
SQLDeleteStatement delete = (SQLDeleteStatement) parentCtx.object;
with = delete.getWith();
} else if (parentCtx.object instanceof SQLInsertStatement) {
SQLInsertStatement insertStmt = (SQLInsertStatement) parentCtx.object;
with = insertStmt.getWith();
} else if (parentCtx.object instanceof SQLUpdateStatement) {
SQLUpdateStatement updateStmt = (SQLUpdateStatement) parentCtx.object;
with = updateStmt.getWith();
}
if (with != null) {
SQLWithSubqueryClause.Entry entry = with.findEntry(identHash);
if (entry != null) {
return entry;
}
}
}
return tableSource;
}
static void resolve(SchemaResolveVisitor visitor, SQLSelectQueryBlock x) {
SchemaResolveVisitor.Context ctx = visitor.createContext(x);
if (ctx != null && ctx.level >= 32) {
return;
}
SQLTableSource from = x.getFrom();
if (from != null) {
ctx.setTableSource(from);
Class fromClass = from.getClass();
if (fromClass == SQLExprTableSource.class) {
visitor.visit((SQLExprTableSource) from);
} else {
from.accept(visitor);
}
} else if (x.getParent() != null && x.getParent().getParent() instanceof HiveInsert
&& x.getParent().getParent().getParent() instanceof HiveMultiInsertStatement) {
HiveMultiInsertStatement insert = (HiveMultiInsertStatement) x.getParent().getParent().getParent();
if (insert.getFrom() instanceof SQLExprTableSource) {
from = insert.getFrom();
ctx.setTableSource(from);
}
}
List<SQLSelectItem> selectList = x.getSelectList();
List<SQLSelectItem> columns = new ArrayList<SQLSelectItem>();
for (int i = selectList.size() - 1; i >= 0; i--) {
SQLSelectItem selectItem = selectList.get(i);
SQLExpr expr = selectItem.getExpr();
if (expr instanceof SQLAllColumnExpr) {
SQLAllColumnExpr allColumnExpr = (SQLAllColumnExpr) expr;
SQLExpr owner = allColumnExpr.getOwner();
SQLTableSource resolvedTableSource = from;
if (owner instanceof SQLIdentifierExpr) {
String ownerName = ((SQLIdentifierExpr) owner).getName();
resolvedTableSource = x.findTableSource(ownerName);
}
allColumnExpr.setResolvedTableSource(resolvedTableSource);
visitor.visit(allColumnExpr);
if (visitor.isEnabled(SchemaResolveVisitor.Option.ResolveAllColumn)) {
extractColumns(visitor, resolvedTableSource, null, columns);
}
} else if (expr instanceof SQLPropertyExpr) {
SQLPropertyExpr propertyExpr = (SQLPropertyExpr) expr;
visitor.visit(propertyExpr);
String ownerName = propertyExpr.getOwnernName();
if (propertyExpr.getName().equals("*")) {
if (visitor.isEnabled(SchemaResolveVisitor.Option.ResolveAllColumn)) {
SQLTableSource tableSource = x.findTableSource(ownerName);
extractColumns(visitor, tableSource, ownerName, columns);
}
}
SQLColumnDefinition column = propertyExpr.getResolvedColumn();
if (column != null) {
continue;
}
SQLTableSource tableSource = x.findTableSource(propertyExpr.getOwnernName());
if (tableSource != null) {
column = tableSource.findColumn(propertyExpr.nameHashCode64());
if (column != null) {
propertyExpr.setResolvedColumn(column);
}
}
} else if (expr instanceof SQLAllColumnExpr) {
SQLAllColumnExpr allColumnExpr = (SQLAllColumnExpr) expr;
SQLExpr owner = allColumnExpr.getOwner();
if (owner instanceof SQLIdentifierExpr) {
SQLIdentifierExpr ownerIdent = (SQLIdentifierExpr) owner;
String ownerName = ownerIdent.getName();
if (visitor.isEnabled(SchemaResolveVisitor.Option.ResolveAllColumn)) {
SQLTableSource tableSource = x.findTableSource(ownerName);
extractColumns(visitor, tableSource, ownerName, columns);
}
}
} else if (expr instanceof SQLIdentifierExpr) {
SQLIdentifierExpr identExpr = (SQLIdentifierExpr) expr;
visitor.visit(identExpr);
long name_hash = identExpr.nameHashCode64();
SQLColumnDefinition column = identExpr.getResolvedColumn();
if (column != null) {
continue;
}
if (from == null) {
continue;
}
column = from.findColumn(name_hash);
if (column != null) {
identExpr.setResolvedColumn(column);
}
} else {
expr.accept(visitor);
}
if (columns.size() > 0) {
for (SQLSelectItem column : columns) {
column.setParent(x);
column.getExpr().accept(visitor);
}
selectList.remove(i);
selectList.addAll(i, columns);
columns.clear();
}
}
SQLExprTableSource into = x.getInto();
if (into != null) {
visitor.visit(into);
}
SQLExpr where = x.getWhere();
if (where != null) {
if (where instanceof SQLBinaryOpExpr) {
SQLBinaryOpExpr binaryOpExpr = (SQLBinaryOpExpr) where;
resolveExpr(visitor, binaryOpExpr.getLeft());
resolveExpr(visitor, binaryOpExpr.getRight());
} else if (where instanceof SQLBinaryOpExprGroup) {
SQLBinaryOpExprGroup binaryOpExprGroup = (SQLBinaryOpExprGroup) where;
for (SQLExpr item : binaryOpExprGroup.getItems()) {
if (item instanceof SQLBinaryOpExpr) {
SQLBinaryOpExpr binaryOpExpr = (SQLBinaryOpExpr) item;
resolveExpr(visitor, binaryOpExpr.getLeft());
resolveExpr(visitor, binaryOpExpr.getRight());
} else {
item.accept(visitor);
}
}
} else {
where.accept(visitor);
}
}
SQLExpr startWith = x.getStartWith();
if (startWith != null) {
startWith.accept(visitor);
}
SQLExpr connectBy = x.getConnectBy();
if (connectBy != null) {
connectBy.accept(visitor);
}
SQLSelectGroupByClause groupBy = x.getGroupBy();
if (groupBy != null) {
groupBy.accept(visitor);
}
List<SQLWindow> windows = x.getWindows();
if (windows != null) {
for (SQLWindow window : windows) {
window.accept(visitor);
}
}
SQLOrderBy orderBy = x.getOrderBy();
if (orderBy != null) {
for (SQLSelectOrderByItem orderByItem : orderBy.getItems()) {
SQLExpr orderByItemExpr = orderByItem.getExpr();
if (orderByItemExpr instanceof SQLIdentifierExpr) {
SQLIdentifierExpr orderByItemIdentExpr = (SQLIdentifierExpr) orderByItemExpr;
long hash = orderByItemIdentExpr.nameHashCode64();
SQLSelectItem selectItem = x.findSelectItem(hash);
if (selectItem != null) {
orderByItem.setResolvedSelectItem(selectItem);
SQLExpr selectItemExpr = selectItem.getExpr();
if (selectItemExpr instanceof SQLIdentifierExpr) {
orderByItemIdentExpr.setResolvedTableSource(((SQLIdentifierExpr) selectItemExpr).getResolvedTableSource());
orderByItemIdentExpr.setResolvedColumn(((SQLIdentifierExpr) selectItemExpr).getResolvedColumn());
} else if (selectItemExpr instanceof SQLPropertyExpr) {
orderByItemIdentExpr.setResolvedTableSource(((SQLPropertyExpr) selectItemExpr).getResolvedTableSource());
orderByItemIdentExpr.setResolvedColumn(((SQLPropertyExpr) selectItemExpr).getResolvedColumn());
}
continue;
}
}
orderByItemExpr.accept(visitor);
}
}
int forUpdateOfSize = x.getForUpdateOfSize();
if (forUpdateOfSize > 0) {
for (SQLExpr sqlExpr : x.getForUpdateOf()) {
sqlExpr.accept(visitor);
}
}
List<SQLSelectOrderByItem> distributeBy = x.getDistributeBy();
if (distributeBy != null) {
for (SQLSelectOrderByItem item : distributeBy) {
item.accept(visitor);
}
}
List<SQLSelectOrderByItem> sortBy = x.getSortBy();
if (sortBy != null) {
for (SQLSelectOrderByItem item : sortBy) {
item.accept(visitor);
}
}
visitor.popContext();
}
static void extractColumns(SchemaResolveVisitor visitor,
SQLTableSource from,
String ownerName,
List<SQLSelectItem> columns) {
if (from instanceof SQLExprTableSource) {
SQLExpr expr = ((SQLExprTableSource) from).getExpr();
SchemaRepository repository = visitor.getRepository();
if (repository == null) {
return;
}
String alias = from.getAlias();
SchemaObject table = repository.findTable((SQLExprTableSource) from);
if (table != null) {
SQLCreateTableStatement createTableStmt = (SQLCreateTableStatement) table.getStatement();
for (SQLTableElement e : createTableStmt.getTableElementList()) {
if (e instanceof SQLColumnDefinition) {
SQLColumnDefinition column = (SQLColumnDefinition) e;
if (alias != null) {
SQLPropertyExpr name = new SQLPropertyExpr(alias, column.getName().getSimpleName());
name.setResolvedColumn(column);
columns.add(new SQLSelectItem(name));
} else if (ownerName != null) {
SQLPropertyExpr name = new SQLPropertyExpr(ownerName, column.getName().getSimpleName());
name.setResolvedColumn(column);
columns.add(new SQLSelectItem(name));
} else if (from.getParent() instanceof SQLJoinTableSource
&& from instanceof SQLExprTableSource
&& expr instanceof SQLName) {
String tableName = expr.toString();
SQLPropertyExpr name = new SQLPropertyExpr(tableName, column.getName().getSimpleName());
name.setResolvedColumn(column);
columns.add(new SQLSelectItem(name));
} else {
SQLIdentifierExpr name = (SQLIdentifierExpr) column.getName().clone();
name.setResolvedColumn(column);
columns.add(new SQLSelectItem(name));
}
}
}
return;
}
if (expr instanceof SQLIdentifierExpr) {
SQLTableSource resolvedTableSource = ((SQLIdentifierExpr) expr).getResolvedTableSource();
if (resolvedTableSource instanceof SQLWithSubqueryClause.Entry) {
SQLWithSubqueryClause.Entry entry = (SQLWithSubqueryClause.Entry) resolvedTableSource;
SQLSelect select = ((SQLWithSubqueryClause.Entry) resolvedTableSource).getSubQuery();
SQLSelectQueryBlock firstQueryBlock = select.getFirstQueryBlock();
if (firstQueryBlock != null) {
for (SQLSelectItem item : firstQueryBlock.getSelectList()) {
String itemAlias = item.computeAlias();
if (itemAlias != null) {
SQLIdentifierExpr columnExpr = new SQLIdentifierExpr(itemAlias);
columnExpr.setResolvedColumn(item);
columns.add(
new SQLSelectItem(columnExpr));
}
}
}
}
}
} else if (from instanceof SQLJoinTableSource) {
SQLJoinTableSource join = (SQLJoinTableSource) from;
extractColumns(visitor, join.getLeft(), ownerName, columns);
extractColumns(visitor, join.getRight(), ownerName, columns);
} else if (from instanceof SQLSubqueryTableSource) {
SQLSelectQueryBlock subQuery = ((SQLSubqueryTableSource) from).getSelect().getQueryBlock();
if (subQuery == null) {
return;
}
final List<SQLSelectItem> subSelectList = subQuery.getSelectList();
for (SQLSelectItem subSelectItem : subSelectList) {
if (subSelectItem.getAlias() != null) {
continue;
}
if (!(subSelectItem.getExpr() instanceof SQLName)) {
return; // skip
}
}
for (SQLSelectItem subSelectItem : subSelectList) {
String alias = subSelectItem.computeAlias();
columns.add(new SQLSelectItem(new SQLIdentifierExpr(alias)));
}
} else if (from instanceof SQLUnionQueryTableSource) {
SQLSelectQueryBlock firstQueryBlock = ((SQLUnionQueryTableSource) from).getUnion().getFirstQueryBlock();
if (firstQueryBlock == null) {
return;
}
final List<SQLSelectItem> subSelectList = firstQueryBlock.getSelectList();
for (SQLSelectItem subSelectItem : subSelectList) {
if (subSelectItem.getAlias() != null) {
continue;
}
if (!(subSelectItem.getExpr() instanceof SQLName)) {
return; // skip
}
}
for (SQLSelectItem subSelectItem : subSelectList) {
String alias = subSelectItem.computeAlias();
columns.add(new SQLSelectItem(new SQLIdentifierExpr(alias)));
}
}
}
static void resolve(SchemaResolveVisitor visitor, SQLAllColumnExpr x) {
SQLTableSource tableSource = x.getResolvedTableSource();
if (tableSource == null) {
SQLSelectQueryBlock queryBlock = null;
for (SQLObject parent = x.getParent(); parent != null; parent = parent.getParent()) {
if (parent instanceof SQLTableSource) {
return;
}
if (parent instanceof SQLSelectQueryBlock) {
queryBlock = (SQLSelectQueryBlock) parent;
break;
}
}
if (queryBlock == null) {
return;
}
SQLTableSource from = queryBlock.getFrom();
if (from == null || from instanceof SQLJoinTableSource) {
return;
}
x.setResolvedTableSource(from);
tableSource = from;
}
if (tableSource instanceof SQLExprTableSource) {
SQLExpr expr = ((SQLExprTableSource) tableSource).getExpr();
if (expr instanceof SQLIdentifierExpr) {
SQLTableSource resolvedTableSource = ((SQLIdentifierExpr) expr).getResolvedTableSource();
if (resolvedTableSource != null) {
x.setResolvedTableSource(resolvedTableSource);
}
}
}
}
static void resolve(SchemaResolveVisitor v, SQLMethodInvokeExpr x) {
SQLExpr owner = x.getOwner();
if (owner != null) {
resolveExpr(v, owner);
}
for (SQLExpr arg : x.getArguments()) {
resolveExpr(v, arg);
}
SQLExpr from = x.getFrom();
if (from != null) {
resolveExpr(v, from);
}
SQLExpr using = x.getUsing();
if (using != null) {
resolveExpr(v, using);
}
SQLExpr _for = x.getFor();
if (_for != null) {
resolveExpr(v, _for);
}
long nameHash = x.methodNameHashCode64();
SchemaRepository repository = v.getRepository();
if (repository != null) {
SQLDataType dataType = repository.findFuntionReturnType(nameHash);
if (dataType != null) {
x.setResolvedReturnDataType(dataType);
}
}
}
static void resolve(SchemaResolveVisitor visitor, SQLSelect x) {
SchemaResolveVisitor.Context ctx = visitor.createContext(x);
SQLWithSubqueryClause with = x.getWithSubQuery();
if (with != null) {
visitor.visit(with);
}
SQLSelectQuery query = x.getQuery();
if (query != null) {
if (query instanceof SQLSelectQueryBlock) {
try {
visitor.visit((SQLSelectQueryBlock) query);
} catch (StackOverflowError ignored) {
// ignore
}
} else {
query.accept(visitor);
}
}
SQLSelectQueryBlock queryBlock = x.getFirstQueryBlock();
SQLOrderBy orderBy = x.getOrderBy();
if (orderBy != null) {
for (SQLSelectOrderByItem orderByItem : orderBy.getItems()) {
SQLExpr orderByItemExpr = orderByItem.getExpr();
if (orderByItemExpr instanceof SQLIdentifierExpr) {
SQLIdentifierExpr orderByItemIdentExpr = (SQLIdentifierExpr) orderByItemExpr;
long hash = orderByItemIdentExpr.nameHashCode64();
SQLSelectItem selectItem = null;
if (queryBlock != null) {
selectItem = queryBlock.findSelectItem(hash);
}
if (selectItem != null) {
orderByItem.setResolvedSelectItem(selectItem);
SQLExpr selectItemExpr = selectItem.getExpr();
if (selectItemExpr instanceof SQLIdentifierExpr) {
orderByItemIdentExpr.setResolvedTableSource(((SQLIdentifierExpr) selectItemExpr).getResolvedTableSource());
orderByItemIdentExpr.setResolvedColumn(((SQLIdentifierExpr) selectItemExpr).getResolvedColumn());
} else if (selectItemExpr instanceof SQLPropertyExpr) {
orderByItemIdentExpr.setResolvedTableSource(((SQLPropertyExpr) selectItemExpr).getResolvedTableSource());
orderByItemIdentExpr.setResolvedColumn(((SQLPropertyExpr) selectItemExpr).getResolvedColumn());
}
continue;
}
}
orderByItemExpr.accept(visitor);
}
}
visitor.popContext();
}
static void resolve(SchemaResolveVisitor visitor, SQLWithSubqueryClause x) {
List<SQLWithSubqueryClause.Entry> entries = x.getEntries();
final SchemaResolveVisitor.Context context = visitor.getContext();
for (SQLWithSubqueryClause.Entry entry : entries) {
SQLSelect query = entry.getSubQuery();
if (query != null) {
visitor.visit(query);
final long alias_hash = entry.aliasHashCode64();
if (context != null && alias_hash != 0) {
context.addTableSource(alias_hash, entry);
}
} else if (entry.getExpr() != null) {
entry.getExpr().accept(visitor);
} else {
SQLStatement returningStatement = entry.getReturningStatement();
if (returningStatement != null) {
returningStatement.accept(visitor);
}
}
}
}
static void resolve(SchemaResolveVisitor visitor, SQLExprTableSource x) {
SQLExpr expr = x.getExpr();
SQLExpr annFeature = null;
if (expr instanceof SQLMethodInvokeExpr) {
SQLMethodInvokeExpr func = (SQLMethodInvokeExpr) expr;
if (func.methodNameHashCode64() == FnvHash.Constants.ANN) {
expr = func.getArguments().get(0);
annFeature = func.getArguments().get(1);
if (annFeature instanceof SQLIdentifierExpr) {
((SQLIdentifierExpr) annFeature).setResolvedTableSource(x);
} else if (annFeature instanceof SQLPropertyExpr) {
((SQLPropertyExpr) annFeature).setResolvedTableSource(x);
}
}
}
if (expr instanceof SQLName || expr instanceof SQLAllColumnExpr) {
if (x.getSchemaObject() != null) {
return;
}
SQLIdentifierExpr identifierExpr = null;
if (expr instanceof SQLIdentifierExpr) {
identifierExpr = (SQLIdentifierExpr) expr;
} else if (expr instanceof SQLPropertyExpr) {
SQLExpr owner = ((SQLPropertyExpr) expr).getOwner();
if (owner instanceof SQLIdentifierExpr) {
identifierExpr = (SQLIdentifierExpr) owner;
}
} else if (expr instanceof SQLAllColumnExpr) {
SQLExpr owner = ((SQLAllColumnExpr) expr).getOwner();
if (owner instanceof SQLIdentifierExpr) {
identifierExpr = (SQLIdentifierExpr) owner;
}
}
if (identifierExpr != null) {
checkParameter(visitor, identifierExpr);
SQLTableSource tableSource = unwrapAlias(visitor.getContext(), null, identifierExpr.nameHashCode64());
if (tableSource == null && x.getParent() instanceof HiveMultiInsertStatement) {
SQLWithSubqueryClause with = ((HiveMultiInsertStatement) x.getParent()).getWith();
if (with != null) {
SQLWithSubqueryClause.Entry entry = with.findEntry(identifierExpr.nameHashCode64());
tableSource = entry;
}
}
if (tableSource != null) {
identifierExpr.setResolvedTableSource(tableSource);
return;
}
}
SchemaRepository repository = visitor.getRepository();
if (repository != null) {
SchemaObject table = repository.findTable((SQLName) expr);
if (table != null) {
x.setSchemaObject(table);
if (annFeature != null) {
if (annFeature instanceof SQLIdentifierExpr) {
SQLIdentifierExpr identExpr = (SQLIdentifierExpr) annFeature;
SQLColumnDefinition column = table.findColumn(identExpr.nameHashCode64());
if (column != null) {
identExpr.setResolvedColumn(column);
}
}
}
return;
}
SchemaObject view = repository.findView((SQLName) expr);
if (view != null) {
x.setSchemaObject(view);
return;
}
}
return;
}
if (expr instanceof SQLMethodInvokeExpr) {
visitor.visit((SQLMethodInvokeExpr) expr);
return;
}
if (expr instanceof SQLQueryExpr) {
SQLSelect select =
((SQLQueryExpr) expr)
.getSubQuery();
visitor.visit(select);
SQLSelectQueryBlock queryBlock = select.getQueryBlock();
if (queryBlock != null && annFeature instanceof SQLIdentifierExpr) {
SQLIdentifierExpr identExpr = (SQLIdentifierExpr) annFeature;
SQLObject columnDef = queryBlock.resolveColum(identExpr.nameHashCode64());
if (columnDef instanceof SQLColumnDefinition) {
identExpr.setResolvedColumn((SQLColumnDefinition) columnDef);
} else if (columnDef instanceof SQLSelectItem) {
identExpr.setResolvedColumn((SQLSelectItem) columnDef);
}
}
//if (queryBlock.findColumn())
return;
}
expr.accept(visitor);
}
static void resolve(SchemaResolveVisitor visitor, SQLAlterTableStatement x) {
SchemaResolveVisitor.Context ctx = visitor.createContext(x);
SQLTableSource tableSource = x.getTableSource();
ctx.setTableSource(tableSource);
for (SQLAlterTableItem item : x.getItems()) {
item.accept(visitor);
}
visitor.popContext();
}
static void resolve(SchemaResolveVisitor visitor, SQLMergeStatement x) {
SchemaResolveVisitor.Context ctx = visitor.createContext(x);
SQLTableSource into = x.getInto();
if (into instanceof SQLExprTableSource) {
ctx.setTableSource(into);
} else {
into.accept(visitor);
}
SQLTableSource using = x.getUsing();
if (using != null) {
using.accept(visitor);
ctx.setFrom(using);
}
SQLExpr on = x.getOn();
if (on != null) {
on.accept(visitor);
}
List<SQLMergeStatement.When> whens = x.getWhens();
for (SQLMergeStatement.When when : whens) {
SQLName by = when.getBy();
if (by != null) {
by.accept(visitor);
}
if (when instanceof SQLMergeStatement.WhenUpdate) {
SQLMergeStatement.WhenUpdate updateClause = (SQLMergeStatement.WhenUpdate) when;
for (SQLUpdateSetItem item : updateClause.getItems()) {
SQLExpr column = item.getColumn();
if (column instanceof SQLIdentifierExpr) {
((SQLIdentifierExpr) column).setResolvedTableSource(into);
} else if (column instanceof SQLPropertyExpr) {
((SQLPropertyExpr) column).setResolvedTableSource(into);
} else {
column.accept(visitor);
}
SQLExpr value = item.getValue();
if (value != null) {
value.accept(visitor);
}
}
} else if (when instanceof SQLMergeStatement.WhenInsert) {
SQLMergeStatement.WhenInsert insertClause = (SQLMergeStatement.WhenInsert) when;
for (SQLExpr column : insertClause.getColumns()) {
if (column instanceof SQLIdentifierExpr) {
((SQLIdentifierExpr) column).setResolvedTableSource(into);
} else if (column instanceof SQLPropertyExpr) {
((SQLPropertyExpr) column).setResolvedTableSource(into);
}
column.accept(visitor);
}
for (SQLExpr value : insertClause.getValues()) {
value.accept(visitor);
}
}
SQLExpr where = when.getWhere();
if (where != null) {
where.accept(visitor);
}
}
visitor.popContext();
}
static void resolve(SchemaResolveVisitor visitor, SQLCreateFunctionStatement x) {
SchemaResolveVisitor.Context ctx = visitor.createContext(x);
{
SQLDeclareItem declareItem = new SQLDeclareItem(x.getName().clone(), null);
declareItem.setResolvedObject(x);
SchemaResolveVisitor.Context parentCtx = visitor.getContext();
if (parentCtx != null) {
parentCtx.declare(declareItem);
} else {
ctx.declare(declareItem);
}
}
for (SQLParameter parameter : x.getParameters()) {
parameter.accept(visitor);
}
SQLStatement block = x.getBlock();
if (block != null) {
block.accept(visitor);
}
visitor.popContext();
}
static void resolve(SchemaResolveVisitor visitor, SQLCreateProcedureStatement x) {
SchemaResolveVisitor.Context ctx = visitor.createContext(x);
{
SQLDeclareItem declareItem = new SQLDeclareItem(x.getName().clone(), null);
declareItem.setResolvedObject(x);
SchemaResolveVisitor.Context parentCtx = visitor.getContext();
if (parentCtx != null) {
parentCtx.declare(declareItem);
} else {
ctx.declare(declareItem);
}
}
for (SQLParameter parameter : x.getParameters()) {
parameter.accept(visitor);
}
SQLStatement block = x.getBlock();
if (block != null) {
block.accept(visitor);
}
visitor.popContext();
}
static boolean resolve(SchemaResolveVisitor visitor, SQLIfStatement x) {
SchemaResolveVisitor.Context ctx = visitor.createContext(x);
SQLExpr condition = x.getCondition();
if (condition != null) {
condition.accept(visitor);
}
for (SQLStatement stmt : x.getStatements()) {
stmt.accept(visitor);
}
for (SQLIfStatement.ElseIf elseIf : x.getElseIfList()) {
elseIf.accept(visitor);
}
SQLIfStatement.Else e = x.getElseItem();
if (e != null) {
e.accept(visitor);
}
visitor.popContext();
return false;
}
static void resolve(SchemaResolveVisitor visitor, SQLBlockStatement x) {
SchemaResolveVisitor.Context ctx = visitor.createContext(x);
for (SQLParameter parameter : x.getParameters()) {
visitor.visit(parameter);
}
for (SQLStatement stmt : x.getStatementList()) {
stmt.accept(visitor);
}
SQLStatement exception = x.getException();
if (exception != null) {
exception.accept(visitor);
}
visitor.popContext();
}
static void resolve(SchemaResolveVisitor visitor, SQLParameter x) {
SQLName name = x.getName();
if (name instanceof SQLIdentifierExpr) {
((SQLIdentifierExpr) name).setResolvedParameter(x);
}
SQLExpr expr = x.getDefaultValue();
SchemaResolveVisitor.Context ctx = null;
if (expr != null) {
if (expr instanceof SQLQueryExpr) {
ctx = visitor.createContext(x);
SQLSubqueryTableSource tableSource = new SQLSubqueryTableSource(((SQLQueryExpr) expr).getSubQuery());
tableSource.setParent(x);
tableSource.setAlias(x.getName().getSimpleName());
ctx.setTableSource(tableSource);
}
expr.accept(visitor);
}
if (ctx != null) {
visitor.popContext();
}
}
static void resolve(SchemaResolveVisitor visitor, SQLDeclareItem x) {
SchemaResolveVisitor.Context ctx = visitor.getContext();
if (ctx != null) {
ctx.declare(x);
}
SQLName name = x.getName();
if (name instanceof SQLIdentifierExpr) {
((SQLIdentifierExpr) name).setResolvedDeclareItem(x);
}
}
static void resolve(SchemaResolveVisitor visitor, SQLOver x) {
SQLName of = x.getOf();
SQLOrderBy orderBy = x.getOrderBy();
List<SQLExpr> partitionBy = x.getPartitionBy();
if (of == null // skip if of is not null
&& orderBy != null) {
orderBy.accept(visitor);
}
if (partitionBy != null) {
for (SQLExpr expr : partitionBy) {
expr.accept(visitor);
}
}
}
private static boolean checkParameter(SchemaResolveVisitor visitor, SQLIdentifierExpr x) {
if (x.getResolvedParameter() != null) {
return true;
}
SchemaResolveVisitor.Context ctx = visitor.getContext();
if (ctx == null) {
return false;
}
long hash = x.hashCode64();
for (SchemaResolveVisitor.Context parentCtx = ctx;
parentCtx != null;
parentCtx = parentCtx.parent) {
if (parentCtx.object instanceof SQLBlockStatement) {
SQLBlockStatement block = (SQLBlockStatement) parentCtx.object;
SQLParameter parameter = block.findParameter(hash);
if (parameter != null) {
x.setResolvedParameter(parameter);
return true;
}
}
if (parentCtx.object instanceof SQLCreateProcedureStatement) {
SQLCreateProcedureStatement createProc = (SQLCreateProcedureStatement) parentCtx.object;
SQLParameter parameter = createProc.findParameter(hash);
if (parameter != null) {
x.setResolvedParameter(parameter);
return true;
}
}
if (parentCtx.object instanceof SQLSelect) {
SQLSelect select = (SQLSelect) parentCtx.object;
SQLWithSubqueryClause with = select.getWithSubQuery();
if (with != null) {
SQLWithSubqueryClause.Entry entry = with.findEntry(hash);
if (entry != null) {
x.setResolvedTableSource(entry);
return true;
}
}
SchemaRepository repo = visitor.getRepository();
if (repo != null) {
SchemaObject view = repo.findView(x);
if (view != null && view.getStatement() instanceof SQLCreateViewStatement) {
x.setResolvedOwnerObject(view.getStatement());
}
}
}
SQLDeclareItem declareItem = parentCtx.findDeclare(hash);
if (declareItem != null) {
x.setResolvedDeclareItem(declareItem);
break;
}
}
return false;
}
static void resolve(SchemaResolveVisitor visitor, SQLReplaceStatement x) {
SchemaResolveVisitor.Context ctx = visitor.createContext(x);
SQLExprTableSource tableSource = x.getTableSource();
ctx.setTableSource(tableSource);
visitor.visit(tableSource);
for (SQLExpr column : x.getColumns()) {
column.accept(visitor);
}
SQLQueryExpr queryExpr = x.getQuery();
if (queryExpr != null) {
visitor.visit(queryExpr.getSubQuery());
}
visitor.popContext();
}
static void resolve(SchemaResolveVisitor visitor, SQLFetchStatement x) {
resolveExpr(visitor, x.getCursorName());
for (SQLExpr expr : x.getInto()) {
resolveExpr(visitor, expr);
}
}
static void resolve(SchemaResolveVisitor visitor, SQLForeignKeyConstraint x) {
SchemaRepository repository = visitor.getRepository();
SQLObject parent = x.getParent();
if (parent instanceof SQLCreateTableStatement) {
SQLCreateTableStatement createTableStmt = (SQLCreateTableStatement) parent;
SQLTableSource table = createTableStmt.getTableSource();
for (SQLName item : x.getReferencingColumns()) {
SQLIdentifierExpr columnName = (SQLIdentifierExpr) item;
columnName.setResolvedTableSource(table);
SQLColumnDefinition column = createTableStmt.findColumn(columnName.nameHashCode64());
if (column != null) {
columnName.setResolvedColumn(column);
}
}
} else if (parent instanceof SQLAlterTableAddConstraint) {
SQLAlterTableStatement stmt = (SQLAlterTableStatement) parent.getParent();
SQLTableSource table = stmt.getTableSource();
for (SQLName item : x.getReferencingColumns()) {
SQLIdentifierExpr columnName = (SQLIdentifierExpr) item;
columnName.setResolvedTableSource(table);
}
}
if (repository == null) {
return;
}
SQLExprTableSource table = x.getReferencedTable();
for (SQLName item : x.getReferencedColumns()) {
SQLIdentifierExpr columnName = (SQLIdentifierExpr) item;
columnName.setResolvedTableSource(table);
}
SQLName tableName = table.getName();
SchemaObject tableObject = repository.findTable(tableName);
if (tableObject == null) {
return;
}
SQLStatement tableStmt = tableObject.getStatement();
if (tableStmt instanceof SQLCreateTableStatement) {
SQLCreateTableStatement refCreateTableStmt = (SQLCreateTableStatement) tableStmt;
for (SQLName item : x.getReferencedColumns()) {
SQLIdentifierExpr columnName = (SQLIdentifierExpr) item;
SQLColumnDefinition column = refCreateTableStmt.findColumn(columnName.nameHashCode64());
if (column != null) {
columnName.setResolvedColumn(column);
}
}
}
}
static void resolve(SchemaResolveVisitor visitor, SQLCreateViewStatement x) {
x.getSubQuery()
.accept(visitor);
}
// for performance
static void resolveExpr(SchemaResolveVisitor visitor, SQLExpr x) {
if (x == null) {
return;
}
Class<?> clazz = x.getClass();
if (clazz == SQLIdentifierExpr.class) {
visitor.visit((SQLIdentifierExpr) x);
return;
} else if (clazz == SQLIntegerExpr.class || clazz == SQLCharExpr.class) {
// skip
return;
}
x.accept(visitor);
}
static void resolveUnion(SchemaResolveVisitor visitor, SQLUnionQuery x) {
SQLUnionOperator operator = x.getOperator();
List<SQLSelectQuery> relations = x.getRelations();
if (relations.size() > 2) {
for (SQLSelectQuery relation : relations) {
relation.accept(visitor);
}
return;
}
SQLSelectQuery left = x.getLeft();
SQLSelectQuery right = x.getRight();
boolean bracket = x.isParenthesized() && !(x.getParent() instanceof SQLUnionQueryTableSource);
if ((!bracket)
&& left instanceof SQLUnionQuery
&& ((SQLUnionQuery) left).getOperator() == operator
&& !right.isParenthesized()
&& x.getOrderBy() == null) {
SQLUnionQuery leftUnion = (SQLUnionQuery) left;
List<SQLSelectQuery> rights = new ArrayList<SQLSelectQuery>();
rights.add(right);
if (leftUnion.getRelations().size() > 2) {
rights.addAll(leftUnion.getRelations());
} else {
for (; ; ) {
SQLSelectQuery leftLeft = leftUnion.getLeft();
SQLSelectQuery leftRight = leftUnion.getRight();
if ((!leftUnion.isParenthesized())
&& leftUnion.getOrderBy() == null
&& (!leftLeft.isParenthesized())
&& (!leftRight.isParenthesized())
&& leftLeft instanceof SQLUnionQuery
&& ((SQLUnionQuery) leftLeft).getOperator() == operator) {
rights.add(leftRight);
leftUnion = (SQLUnionQuery) leftLeft;
continue;
} else {
rights.add(leftRight);
rights.add(leftLeft);
}
break;
}
}
for (int i = rights.size() - 1; i >= 0; i--) {
SQLSelectQuery item = rights.get(i);
item.accept(visitor);
}
return;
}
if (left != null) {
left.accept(visitor);
}
if (right != null) {
right.accept(visitor);
}
}
}
| SQLResolveVisitor |
java | apache__dubbo | dubbo-compatible/src/test/java/org/apache/dubbo/cache/MyCacheFactory.java | {
"start": 979,
"end": 1130
} | class ____ extends AbstractCacheFactory {
@Override
protected Cache createCache(URL url) {
return new MyCache(url);
}
}
| MyCacheFactory |
java | quarkusio__quarkus | extensions/cache/deployment/src/test/java/io/quarkus/cache/test/runtime/ExplicitSimpleKeyCacheTest.java | {
"start": 668,
"end": 3942
} | class ____ {
private static final long KEY_1 = 123L;
private static final long KEY_2 = 456L;
@RegisterExtension
static final QuarkusUnitTest TEST = new QuarkusUnitTest().withApplicationRoot(jar -> jar.addClass(CachedService.class));
@Inject
CachedService cachedService;
@Test
public void testAllCacheAnnotations() {
// In most of the cached service methods calls below, a changing second argument will be passed to the methods.
// The fact that it changes each time should not have any effect on the cache because it is not part of the cache key.
// STEP 1
// Action: @CacheResult-annotated method call.
// Expected effect: method invoked and result cached.
// Verified by: STEP 2.
String value1 = cachedService.cachedMethod(KEY_1, new Object());
// STEP 2
// Action: same call as STEP 1.
// Expected effect: method not invoked and result coming from the cache.
// Verified by: same object reference between STEPS 1 and 2 results.
String value2 = cachedService.cachedMethod(KEY_1, new Object());
assertTrue(value1 == value2);
// STEP 3
// Action: same call as STEP 2 with a new key.
// Expected effect: method invoked and result cached.
// Verified by: different objects references between STEPS 2 and 3 results.
String value3 = cachedService.cachedMethod(KEY_2, new Object());
assertTrue(value2 != value3);
// STEP 4
// Action: cache entry invalidation.
// Expected effect: STEP 2 cache entry removed.
// Verified by: STEP 5.
cachedService.invalidate(KEY_1, new Object());
// STEP 5
// Action: same call as STEP 2.
// Expected effect: method invoked because of STEP 4 and result cached.
// Verified by: different objects references between STEPS 2 and 5 results.
String value5 = cachedService.cachedMethod(KEY_1, new Object());
assertTrue(value2 != value5);
// STEP 6
// Action: same call as STEP 3.
// Expected effect: method not invoked and result coming from the cache.
// Verified by: same object reference between STEPS 3 and 6 results.
String value6 = cachedService.cachedMethod(KEY_2, new Object());
assertTrue(value3 == value6);
// STEP 7
// Action: full cache invalidation.
// Expected effect: empty cache.
// Verified by: STEPS 8 and 9.
cachedService.invalidateAll();
// STEP 8
// Action: same call as STEP 5.
// Expected effect: method invoked because of STEP 7 and result cached.
// Verified by: different objects references between STEPS 5 and 8 results.
String value8 = cachedService.cachedMethod(KEY_1, new Object());
assertTrue(value5 != value8);
// STEP 9
// Action: same call as STEP 6.
// Expected effect: method invoked because of STEP 7 and result cached.
// Verified by: different objects references between STEPS 6 and 9 results.
String value9 = cachedService.cachedMethod(KEY_2, new Object());
assertTrue(value6 != value9);
}
@Dependent
static | ExplicitSimpleKeyCacheTest |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/checkpointing/RescaleCheckpointManuallyITCase.java | {
"start": 17348,
"end": 19080
} | class ____
extends RichFlatMapFunction<Integer, Tuple2<Integer, Integer>>
implements CheckpointedFunction {
private static final long serialVersionUID = 1L;
private transient ValueState<Integer> counter;
private transient ValueState<Integer> sum;
private final int numberElements;
public SubtaskIndexFlatMapper(int numberElements) {
this.numberElements = numberElements;
}
@Override
public void flatMap(Integer value, Collector<Tuple2<Integer, Integer>> out)
throws Exception {
Integer counterValue = counter.value();
int count = counterValue == null ? 1 : counterValue + 1;
counter.update(count);
Integer sumValue = sum.value();
int s = sumValue == null ? value : sumValue + value;
sum.update(s);
if (count == numberElements) {
out.collect(
Tuple2.of(getRuntimeContext().getTaskInfo().getIndexOfThisSubtask(), s));
}
}
@Override
public void snapshotState(FunctionSnapshotContext context) throws Exception {
// all managed, nothing to do.
}
@Override
public void initializeState(FunctionInitializationContext context) throws Exception {
counter =
context.getKeyedStateStore()
.getState(new ValueStateDescriptor<>("counter", Integer.class));
sum =
context.getKeyedStateStore()
.getState(new ValueStateDescriptor<>("sum", Integer.class));
}
}
private static | SubtaskIndexFlatMapper |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsNamespace.java | {
"start": 1476,
"end": 1534
} | class ____ {@link TargetApplicationsNamespace}.
*/
public | for |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/interceptor/InterceptorStrategyNotOrderedTest.java | {
"start": 2167,
"end": 2870
} | class ____ implements InterceptStrategy {
@Override
public Processor wrapProcessorInInterceptors(
CamelContext context, NamedNode definition, final Processor target, Processor nextTarget) {
Processor answer = new Processor() {
public void process(Exchange exchange) throws Exception {
String order = exchange.getIn().getHeader("order", "", String.class);
order = order + "foo";
exchange.getIn().setHeader("order", order);
target.process(exchange);
}
};
return answer;
}
}
private static | FooInterceptStrategy |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/MiloServerEndpointBuilderFactory.java | {
"start": 11172,
"end": 11489
} | interface ____
extends
AdvancedMiloServerEndpointConsumerBuilder,
AdvancedMiloServerEndpointProducerBuilder {
default MiloServerEndpointBuilder basic() {
return (MiloServerEndpointBuilder) this;
}
}
public | AdvancedMiloServerEndpointBuilder |
java | resilience4j__resilience4j | resilience4j-core/src/test/java/io/github/resilience4j/core/registry/CompositeRegistryEventConsumerTest.java | {
"start": 815,
"end": 2875
} | class ____ {
@Test
public void testCompositeRegistryEventConsumer() {
List<RegistryEventConsumer<String>> consumers = new ArrayList<>();
TestRegistryEventConsumer registryEventConsumer1 = new TestRegistryEventConsumer();
TestRegistryEventConsumer registryEventConsumer2 = new TestRegistryEventConsumer();
consumers.add(registryEventConsumer1);
consumers.add(registryEventConsumer2);
CompositeRegistryEventConsumer<String> compositeRegistryEventConsumer = new CompositeRegistryEventConsumer<>(
consumers);
TestRegistry testRegistry = new TestRegistry(compositeRegistryEventConsumer);
String addedEntry1 = testRegistry.computeIfAbsent("name", () -> "entry1");
assertThat(addedEntry1).isEqualTo("entry1");
String addedEntry2 = testRegistry.computeIfAbsent("name2", () -> "entry2");
assertThat(addedEntry2).isEqualTo("entry2");
Optional<String> removedEntry = testRegistry.remove("name");
assertThat(removedEntry).isNotEmpty().hasValue("entry1");
Optional<String> replacedEntry = testRegistry.replace("name2", "entry3");
assertThat(replacedEntry).isNotEmpty().hasValue("entry2");
assertConsumer(registryEventConsumer1);
assertConsumer(registryEventConsumer2);
}
public void assertConsumer(TestRegistryEventConsumer consumer) {
assertThat(consumer.addedEvents).hasSize(2);
assertThat(consumer.removedEvents).hasSize(1);
assertThat(consumer.replacedEvents).hasSize(1);
assertThat(consumer.addedEvents).extracting("addedEntry")
.containsExactly("entry1", "entry2");
assertThat(consumer.removedEvents).extracting("removedEntry")
.containsExactly("entry1");
assertThat(consumer.replacedEvents).extracting("oldEntry")
.containsExactly("entry2");
assertThat(consumer.replacedEvents).extracting("newEntry")
.containsExactly("entry3");
}
private static | CompositeRegistryEventConsumerTest |
java | mapstruct__mapstruct | core/src/main/java/org/mapstruct/Mapping.java | {
"start": 1457,
"end": 2011
} | interface ____ {
* HumanDto toHumanDto(Human human)
* }
* </code></pre>
* <pre><code class='java'>
* // generates:
* @Override
* public HumanDto toHumanDto(Human human) {
* humanDto.setFullName( human.getFullName() );
* // ...
* }
* </code></pre>
*
* <p><strong>Example 2:</strong> Mapping properties with different names</p>
* <pre><code class='java'>
* // We need map Human.companyName to HumanDto.company
* // we can use @Mapping with parameters {@link #source()} and {@link #target()}
* @Mapper
* public | HumanMapper |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/Range.java | {
"start": 2720,
"end": 6721
} | class ____ extends ScalarFunction implements TranslationAware.SingleValueTranslationAware {
private static final Logger logger = LogManager.getLogger(Range.class);
private final Expression value, lower, upper;
private final boolean includeLower, includeUpper;
private final ZoneId zoneId;
public Range(Source src, Expression value, Expression lower, boolean inclLower, Expression upper, boolean inclUpper, ZoneId zoneId) {
super(src, asList(value, lower, upper));
this.value = value;
this.lower = lower;
this.upper = upper;
this.includeLower = inclLower;
this.includeUpper = inclUpper;
this.zoneId = zoneId;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public String getWriteableName() {
throw new UnsupportedOperationException();
}
@Override
protected NodeInfo<Range> info() {
return NodeInfo.create(this, Range::new, value, lower, includeLower, upper, includeUpper, zoneId);
}
@Override
public Expression replaceChildren(List<Expression> newChildren) {
return new Range(source(), newChildren.get(0), newChildren.get(1), includeLower, newChildren.get(2), includeUpper, zoneId);
}
public Expression value() {
return value;
}
public Expression lower() {
return lower;
}
public Expression upper() {
return upper;
}
public boolean includeLower() {
return includeLower;
}
public boolean includeUpper() {
return includeUpper;
}
public ZoneId zoneId() {
return zoneId;
}
/**
* In case that the range is empty due to foldable, invalid bounds, but the bounds themselves are not yet folded, the optimizer will
* need two passes to fold this.
* That's because we shouldn't perform folding when trying to determine foldability.
*/
@Override
public boolean foldable() {
// NB: this is likely dead code. See note in areBoundariesInvalid
if (lower.foldable() && upper.foldable()) {
if (value().foldable()) {
return true;
}
// We cannot fold the bounds here; but if they're already literals, we can check if the range is always empty.
if (lower() instanceof Literal l && upper() instanceof Literal u) {
return areBoundariesInvalid(l.value(), u.value());
}
}
return false;
}
@Override
public Object fold(FoldContext ctx) {
// NB: this is likely dead code. See note in areBoundariesInvalid
Object lowerValue = lower.fold(ctx);
Object upperValue = upper.fold(ctx);
if (areBoundariesInvalid(lowerValue, upperValue)) {
return Boolean.FALSE;
}
Object val = value.fold(ctx);
Integer lowerCompare = BinaryComparison.compare(lower.fold(ctx), val);
Integer upperCompare = BinaryComparison.compare(val, upper().fold(ctx));
boolean lowerComparsion = lowerCompare == null ? false : (includeLower ? lowerCompare <= 0 : lowerCompare < 0);
boolean upperComparsion = upperCompare == null ? false : (includeUpper ? upperCompare <= 0 : upperCompare < 0);
return lowerComparsion && upperComparsion;
}
/**
* Check whether the boundaries are invalid ( upper < lower) or not.
* If they are, the value does not have to be evaluated.
*/
protected boolean areBoundariesInvalid(Object lowerValue, Object upperValue) {
/*
NB: I am reasonably sure this code is dead. It can only be reached from foldable(), and as far as I can tell
we never fold ranges. There's no ES|QL syntax for ranges, so they can never be created by the parser. The
PropagateEquals optimizer rule can in theory create ranges, but only from existing ranges. The fact that this
| Range |
java | spring-projects__spring-framework | spring-beans/src/test/java/org/springframework/beans/factory/annotation/AutowiredAnnotationBeanPostProcessorTests.java | {
"start": 152306,
"end": 152493
} | class ____ {
@Autowired
private FactoryBean<?> factoryBean;
public final FactoryBean<?> getFactoryBean() {
return this.factoryBean;
}
}
public static | FactoryBeanDependentBean |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/uri/UriAssert_hasParameter_String_Test.java | {
"start": 782,
"end": 1146
} | class ____ extends UriAssertBaseTest {
private final String name = "article";
@Override
protected UriAssert invoke_api_method() {
return assertions.hasParameter(name);
}
@Override
protected void verify_internal_effects() {
verify(uris).assertHasParameter(getInfo(assertions), getActual(assertions), name);
}
}
| UriAssert_hasParameter_String_Test |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/runtime/watermark/LongWatermarkCombiner.java | {
"start": 1932,
"end": 6709
} | class ____ implements WatermarkCombiner {
private final WatermarkCombinationPolicy combinationPolicy;
/** The number of upstream input channels. */
private final int numberOfInputChannels;
/** A bitset to record whether the watermark has been received from each channel. */
private final BitSet hasReceiveWatermarks;
/** Channel index to {@link LongWatermarkElement}. */
private final Map<Integer, LongWatermarkElement> channelWatermarks;
/** A heap-based priority queue to help find the minimum/maximum watermark. */
private final HeapPriorityQueue<LongWatermarkElement> orderedChannelWatermarks;
/** The comparator to compare the watermark value of two {@link LongWatermarkElement}s. */
private final HeapPriorityQueue.PriorityComparator<LongWatermarkElement> watermarkComparator;
/**
* Send only the watermark that differs from the previous sent one, we need to record the
* previous sent watermark value and whether is the first time sending.
*/
private final LongWatermarkElement previousEmitWatermarkElement = new LongWatermarkElement(-1);
private boolean isFirstTimeEmit = true;
public LongWatermarkCombiner(
WatermarkCombinationPolicy combinationPolicy, int numberOfInputChannels) {
checkState(
combinationPolicy.getWatermarkCombinationFunction()
instanceof
WatermarkCombinationFunction.NumericWatermarkCombinationFunction);
this.combinationPolicy = combinationPolicy;
this.numberOfInputChannels = numberOfInputChannels;
this.hasReceiveWatermarks = new BitSet(numberOfInputChannels);
this.channelWatermarks = new HashMap<>(numberOfInputChannels);
// according to the combination strategy create the comparator and the {@link
// LongWatermarkElement} initial value
long initValue;
if (combinationPolicy.getWatermarkCombinationFunction()
== WatermarkCombinationFunction.NumericWatermarkCombinationFunction.MIN) {
this.watermarkComparator =
(left, right) ->
Long.compare(left.getWatermarkValue(), right.getWatermarkValue());
initValue = Long.MAX_VALUE;
} else {
this.watermarkComparator =
(left, right) ->
Long.compare(right.getWatermarkValue(), left.getWatermarkValue());
initValue = Long.MIN_VALUE;
}
// init the watermark elements of {@code #channelWatermarks} and {@code
// #orderedChannelWatermarks}
this.orderedChannelWatermarks =
new HeapPriorityQueue<>(watermarkComparator, numberOfInputChannels);
for (int i = 0; i < numberOfInputChannels; i++) {
LongWatermarkElement watermarkElement = new LongWatermarkElement(initValue);
channelWatermarks.put(i, watermarkElement);
orderedChannelWatermarks.add(watermarkElement);
}
}
@Override
public void combineWatermark(
Watermark watermark, int channelIndex, Consumer<Watermark> watermarkEmitter) {
checkState(watermark instanceof LongWatermark);
hasReceiveWatermarks.set(channelIndex);
// Update the watermark for the current channel
channelWatermarks
.get(channelIndex)
.setWatermarkValue(((LongWatermark) watermark).getValue());
orderedChannelWatermarks.adjustModifiedElement(channelWatermarks.get(channelIndex));
if (combinationPolicy.isCombineWaitForAllChannels()
&& hasReceiveWatermarks.cardinality() < numberOfInputChannels) {
// Not all watermarks have been received yet
return;
}
// the combined watermark should be the first node of {@code orderedChannelWatermarks}
if (shouldEmitWatermark(orderedChannelWatermarks.peek())) {
// send the combined watermark to downstream
watermarkEmitter.accept(
new LongWatermark(
orderedChannelWatermarks.peek().getWatermarkValue(),
watermark.getIdentifier()));
previousEmitWatermarkElement.setWatermarkValue(
orderedChannelWatermarks.peek().getWatermarkValue());
}
}
private boolean shouldEmitWatermark(LongWatermarkElement combinedWatermarkElement) {
if (isFirstTimeEmit) {
isFirstTimeEmit = false;
return true;
}
return watermarkComparator.comparePriority(
combinedWatermarkElement, previousEmitWatermarkElement)
!= 0;
}
/**
* This | LongWatermarkCombiner |
java | quarkusio__quarkus | extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/typesafe/DataNamespaceMessageBundleFailureTest.java | {
"start": 599,
"end": 2103
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(Hellos.class, Item.class, OtherItem.class, GoodByes.class)
.addAsResource(new StringAsset(
"hello=Hallo {data:item.unknownProperty}!"),
"messages/msg_de.properties"))
.assertException(t -> {
Throwable e = t;
TemplateException te = null;
while (e != null) {
if (e instanceof TemplateException) {
te = (TemplateException) e;
break;
}
e = e.getCause();
}
assertNotNull(te);
assertTrue(te.getMessage().contains(
"Property/method [unknownProperty] not found on class [io.quarkus.qute.deployment.typesafe.Item] nor handled by an extension method"),
te.getMessage());
assertTrue(te.getMessage().contains(
"Property/method [missingProperty] not found on class [io.quarkus.qute.deployment.typesafe.Item] nor handled by an extension method"),
te.getMessage());
});
@Test
public void testValidation() {
fail();
}
@MessageBundle(value = DEFAULT_NAME)
public | DataNamespaceMessageBundleFailureTest |
java | apache__camel | components/camel-test/camel-test-main-junit5/src/test/java/org/apache/camel/test/main/junit5/legacy/ReplaceRouteFromTest.java | {
"start": 1687,
"end": 2132
} | class
____.addConfiguration(MyConfiguration.class);
}
@Test
void shouldReplaceTheFromEndpoint() throws Exception {
MockEndpoint mock = context.getEndpoint("mock:out", MockEndpoint.class);
mock.expectedBodiesReceived("Hello Will!");
String result = template.requestBody("direct:foo", null, String.class);
mock.assertIsSatisfied();
assertEquals("Hello Will!", result);
}
}
| configuration |
java | apache__spark | common/network-common/src/main/java/org/apache/spark/network/client/StreamCallback.java | {
"start": 1282,
"end": 1682
} | interface ____ {
/** Called upon receipt of stream data. */
void onData(String streamId, ByteBuffer buf) throws IOException;
/** Called when all data from the stream has been received. */
void onComplete(String streamId) throws IOException;
/** Called if there's an error reading data from the stream. */
void onFailure(String streamId, Throwable cause) throws IOException;
}
| StreamCallback |
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurationSupport.java | {
"start": 5883,
"end": 6154
} | class ____ the configuration behind the MVC Java config.
* It is typically imported by adding {@link EnableWebMvc @EnableWebMvc} to an
* application {@link Configuration @Configuration} class. An alternative more
* advanced option is to extend directly from this | providing |
java | google__auto | value/src/main/java/com/google/auto/value/processor/BuilderMethodClassifier.java | {
"start": 32936,
"end": 34523
} | class ____ a method {@code abstract String getBar()} then an
* abstract method in its builder with the same signature will query the {@code bar} property.
*/
abstract Optional<String> propertyForBuilderGetter(ExecutableElement method);
/**
* Checks for failed JavaBean usage when a method that looks like a setter doesn't actually match
* anything, and emits a compiler Note if detected. A frequent source of problems is where the
* JavaBeans conventions have been followed for most but not all getters. Then AutoValue considers
* that they haven't been followed at all, so you might have a property called getFoo where you
* thought it was called just foo, and you might not understand why your setter called setFoo is
* rejected (it would have to be called setGetFoo).
*
* <p>This is not relevant for AutoBuilder, which uses parameter names rather than getters. The
* parameter names are unambiguously the same as the property names.
*/
abstract void checkForFailedJavaBean(ExecutableElement rejectedSetter);
/**
* A string describing what sort of Auto this is, {@code "AutoValue"} or {@code "AutoBuilder"}.
*/
abstract String autoWhat();
/**
* A string describing what a builder getter must match: a property method for AutoValue, a
* parameter for AutoBuilder.
*/
abstract String getterMustMatch();
/**
* A string describing what a property builder for property {@code foo} must match, {@code foo()
* or getFoo()} for AutoValue, {@code foo} for AutoBuilder.
*/
abstract String fooBuilderMustMatch();
}
| has |
java | google__dagger | javatests/dagger/internal/codegen/ComponentValidationTest.java | {
"start": 6324,
"end": 6860
} | interface ____ {}");
CompilerTests.daggerCompiler(componentFile)
.compile(
subject -> {
subject.hasErrorCount(1);
subject.hasErrorContaining("interface");
});
}
@Test public void nonModuleModule() {
Source componentFile =
CompilerTests.javaSource(
"test.NotAComponent",
"package test;",
"",
"import dagger.Component;",
"",
"@Component(modules = Object.class)",
" | NotAComponent |
java | resilience4j__resilience4j | resilience4j-spring/src/test/java/io/github/resilience4j/retry/configure/RetryRecoveryTest.java | {
"start": 1252,
"end": 3427
} | class ____ {
@Autowired
@Qualifier("retryDummyService")
TestDummyService testDummyService;
@Test
public void testRecovery() {
assertThat(testDummyService.sync()).isEqualTo("recovered");
}
@Test
public void testAsyncRecovery() throws Exception {
assertThat(testDummyService.async().toCompletableFuture().get(5, TimeUnit.SECONDS))
.isEqualTo("recovered");
}
@Test
public void testMonoRecovery() {
assertThat(testDummyService.mono("test").block()).isEqualTo("test");
}
@Test
public void testFluxRecovery() {
assertThat(testDummyService.flux().blockFirst()).isEqualTo("recovered");
}
@Test
public void testObservableRecovery() {
assertThat(testDummyService.observable().blockingFirst()).isEqualTo("recovered");
}
@Test
public void testSingleRecovery() {
assertThat(testDummyService.single().blockingGet()).isEqualTo("recovered");
}
@Test
public void testCompletableRecovery() {
assertThat(testDummyService.completable().blockingGet()).isNull();
}
@Test
public void testMaybeRecovery() {
assertThat(testDummyService.maybe().blockingGet()).isEqualTo("recovered");
}
@Test
public void testFlowableRecovery() {
assertThat(testDummyService.flowable().blockingFirst()).isEqualTo("recovered");
}
@Test
public void testRx3ObservableRecovery() {
assertThat(testDummyService.rx3Observable().blockingFirst()).isEqualTo("recovered");
}
@Test
public void testRx3SingleRecovery() {
assertThat(testDummyService.rx3Single().blockingGet()).isEqualTo("recovered");
}
@Test
public void testRx3CompletableRecovery() {
assertThat(testDummyService.rx3Completable().blockingAwait(2, TimeUnit.SECONDS)).isTrue();
}
@Test
public void testRx3MaybeRecovery() {
assertThat(testDummyService.rx3Maybe().blockingGet()).isEqualTo("recovered");
}
@Test
public void testRx3FlowableRecovery() {
assertThat(testDummyService.rx3Flowable().blockingFirst()).isEqualTo("recovered");
}
}
| RetryRecoveryTest |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java | {
"start": 1322,
"end": 1550
} | interface ____ be used by HA frameworks to manage the service.
*/
@KerberosInfo(
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
@InterfaceAudience.Public
@InterfaceStability.Evolving
public | could |
java | spring-projects__spring-framework | spring-websocket/src/test/java/org/springframework/web/socket/messaging/WebSocketStompClientIntegrationTests.java | {
"start": 5452,
"end": 7418
} | class ____ extends StompSessionHandlerAdapter {
private final String topic;
private final Object payload;
private final List<String> received = new ArrayList<>();
public TestHandler(String topic, Object payload) {
this.topic = topic;
this.payload = payload;
}
public List<String> getReceived() {
return this.received;
}
@Override
public void afterConnected(StompSession session, StompHeaders connectedHeaders) {
session.subscribe(this.topic, new StompFrameHandler() {
@Override
public Type getPayloadType(StompHeaders headers) {
return String.class;
}
@Override
public void handleFrame(StompHeaders headers, @Nullable Object payload) {
received.add((String) payload);
}
});
try {
// Delay send since server processes concurrently
// Ideally order should be preserved or receipts supported (simple broker)
Thread.sleep(500);
}
catch (InterruptedException ex) {
logger.error(ex);
}
session.send(this.topic, this.payload);
}
public boolean awaitForMessageCount(int expected, long millisToWait) throws InterruptedException {
if (logger.isDebugEnabled()) {
logger.debug("Awaiting for message count: " + expected);
}
long startTime = System.currentTimeMillis();
while (this.received.size() < expected) {
Thread.sleep(500);
if ((System.currentTimeMillis() - startTime) > millisToWait) {
return false;
}
}
return true;
}
@Override
public void handleException(StompSession session, StompCommand command,
StompHeaders headers, byte[] payload, Throwable ex) {
logger.error(command + " " + headers, ex);
}
@Override
public void handleFrame(StompHeaders headers, @Nullable Object payload) {
logger.error("STOMP error frame " + headers + " payload=" + payload);
}
@Override
public void handleTransportError(StompSession session, Throwable exception) {
logger.error(exception);
}
}
}
| TestHandler |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinOperatorTestCase.java | {
"start": 1948,
"end": 8307
} | interface ____ {
IndexableField newPointField();
IndexableField newDocValuesField();
void assertPage(Page page);
AggregatorFunction newAggregatorFunction(DriverContext context);
void assertMinValue(Block block, boolean exactResult);
}
protected abstract NumberTypeTest getNumberTypeTest();
protected abstract LuceneMinFactory.NumberType getNumberType();
protected static final String FIELD_NAME = "field";
private final Directory directory = newDirectory();
private IndexReader reader;
@After
public void closeIndex() throws IOException {
IOUtils.close(reader, directory);
}
@Override
protected LuceneMinFactory simple(SimpleOptions options) {
return simple(getNumberTypeTest(), randomFrom(DataPartitioning.values()), between(1, 10_000), 100);
}
private LuceneMinFactory simple(NumberTypeTest numberTypeTest, DataPartitioning dataPartitioning, int numDocs, int limit) {
final boolean enableShortcut = randomBoolean();
final boolean enableMultiValue = randomBoolean();
final int commitEvery = Math.max(1, numDocs / 10);
try (
RandomIndexWriter writer = new RandomIndexWriter(
random(),
directory,
newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE)
)
) {
for (int d = 0; d < numDocs; d++) {
final var numValues = enableMultiValue ? randomIntBetween(1, 5) : 1;
final var doc = new Document();
for (int i = 0; i < numValues; i++) {
if (enableShortcut) {
doc.add(numberTypeTest.newPointField());
} else {
doc.add(numberTypeTest.newDocValuesField());
}
}
writer.addDocument(doc);
if (d % commitEvery == 0) {
writer.commit();
}
}
reader = writer.getReader();
} catch (IOException e) {
throw new RuntimeException(e);
}
final ShardContext ctx = new LuceneSourceOperatorTests.MockShardContext(reader, 0);
final Query query;
if (enableShortcut && randomBoolean()) {
query = new MatchAllDocsQuery();
} else {
query = SortedNumericDocValuesField.newSlowRangeQuery(FIELD_NAME, Long.MIN_VALUE, Long.MAX_VALUE);
}
return new LuceneMinFactory(
new IndexedByShardIdFromSingleton<>(ctx),
c -> List.of(new LuceneSliceQueue.QueryAndTags(query, List.of())),
dataPartitioning,
between(1, 8),
FIELD_NAME,
getNumberType(),
limit
);
}
public void testSimple() {
testSimple(this::driverContext);
}
public void testSimpleWithCranky() {
try {
testSimple(this::crankyDriverContext);
logger.info("cranky didn't break");
} catch (CircuitBreakingException e) {
logger.info("broken", e);
assertThat(e.getMessage(), equalTo(CrankyCircuitBreakerService.ERROR_MESSAGE));
}
}
private void testSimple(Supplier<DriverContext> contexts) {
int size = between(1_000, 20_000);
int limit = randomBoolean() ? between(10, size) : Integer.MAX_VALUE;
testMin(contexts, size, limit);
}
public void testEmpty() {
testEmpty(this::driverContext);
}
public void testEmptyWithCranky() {
try {
testEmpty(this::crankyDriverContext);
logger.info("cranky didn't break");
} catch (CircuitBreakingException e) {
logger.info("broken", e);
assertThat(e.getMessage(), equalTo(CrankyCircuitBreakerService.ERROR_MESSAGE));
}
}
private void testEmpty(Supplier<DriverContext> contexts) {
int limit = randomBoolean() ? between(10, 10000) : Integer.MAX_VALUE;
testMin(contexts, 0, limit);
}
private void testMin(Supplier<DriverContext> contexts, int size, int limit) {
DataPartitioning dataPartitioning = randomFrom(DataPartitioning.values());
NumberTypeTest numberTypeTest = getNumberTypeTest();
LuceneMinFactory factory = simple(numberTypeTest, dataPartitioning, size, limit);
List<Page> results = new CopyOnWriteArrayList<>();
List<Driver> drivers = new ArrayList<>();
int taskConcurrency = between(1, 8);
for (int i = 0; i < taskConcurrency; i++) {
DriverContext ctx = contexts.get();
drivers.add(TestDriverFactory.create(ctx, factory.get(ctx), List.of(), new TestResultPageSinkOperator(results::add)));
}
OperatorTestCase.runDriver(drivers);
assertThat(results.size(), lessThanOrEqualTo(taskConcurrency));
try (AggregatorFunction aggregatorFunction = numberTypeTest.newAggregatorFunction(contexts.get())) {
for (Page page : results) {
assertThat(page.getPositionCount(), is(1)); // one row
assertThat(page.getBlockCount(), is(2)); // two blocks
numberTypeTest.assertPage(page);
aggregatorFunction.addIntermediateInput(page);
}
final Block[] result = new Block[1];
try {
aggregatorFunction.evaluateFinal(result, 0, contexts.get());
if (result[0].areAllValuesNull() == false) {
boolean exactResult = size <= limit;
numberTypeTest.assertMinValue(result[0], exactResult);
}
} finally {
Releasables.close(result);
}
}
}
@Override
protected final Matcher<String> expectedToStringOfSimple() {
return matchesRegex("LuceneMinMaxOperator\\[shards = \\[test], maxPageSize = \\d+, remainingDocs=100]");
}
@Override
protected final Matcher<String> expectedDescriptionOfSimple() {
return matchesRegex(
"LuceneMinOperator\\[type = "
+ getNumberType().name()
+ ", dataPartitioning = (AUTO|DOC|SHARD|SEGMENT), fieldName = "
+ FIELD_NAME
+ ", limit = 100]"
);
}
}
| NumberTypeTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java | {
"start": 2758,
"end": 18621
} | class ____ extends TransportLocalClusterStateAction<ClusterStateRequest, ClusterStateResponse> {
private static final Logger logger = LogManager.getLogger(TransportClusterStateAction.class);
private final ProjectResolver projectResolver;
private final IndexNameExpressionResolver indexNameExpressionResolver;
private final ThreadPool threadPool;
@Inject
public TransportClusterStateAction(
TransportService transportService,
ClusterService clusterService,
ThreadPool threadPool,
ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
ProjectResolver projectResolver,
Client client
) {
super(
ClusterStateAction.NAME,
actionFilters,
transportService.getTaskManager(),
clusterService,
threadPool.executor(ThreadPool.Names.MANAGEMENT)
);
this.projectResolver = projectResolver;
this.indexNameExpressionResolver = indexNameExpressionResolver;
this.threadPool = threadPool;
// construct to register with TransportService
new TransportRemoteClusterStateAction(transportService, threadPool, actionFilters, client);
}
@Override
protected ClusterBlockException checkBlock(ClusterStateRequest request, ClusterState state) {
// cluster state calls are done also on a fully blocked cluster to figure out what is going
// on in the cluster. For example, which nodes have joined yet the recovery has not yet kicked
// in, we need to make sure we allow those calls
// return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA);
return null;
}
@Override
protected void localClusterStateOperation(
Task task,
final ClusterStateRequest request,
final ClusterState state,
final ActionListener<ClusterStateResponse> listener
) throws IOException {
assert task instanceof CancellableTask : task + " not cancellable";
final CancellableTask cancellableTask = (CancellableTask) task;
final Predicate<ClusterState> acceptableClusterStatePredicate = request.waitForMetadataVersion() == null
? Predicates.always()
: clusterState -> clusterState.metadata().version() >= request.waitForMetadataVersion();
if (cancellableTask.notifyIfCancelled(listener)) {
return;
}
if (acceptableClusterStatePredicate.test(state)) {
ActionListener.completeWith(listener, () -> buildResponse(request, state));
} else {
assert acceptableClusterStatePredicate.test(state) == false;
new ClusterStateObserver(state, clusterService, request.waitForTimeout(), logger, threadPool.getThreadContext())
.waitForNextChange(new ClusterStateObserver.Listener() {
@Override
public void onNewClusterState(ClusterState newState) {
if (cancellableTask.notifyIfCancelled(listener)) {
return;
}
if (acceptableClusterStatePredicate.test(newState)) {
executor.execute(ActionRunnable.supply(listener, () -> buildResponse(request, newState)));
} else {
listener.onFailure(
new NotMasterException(
"master stepped down waiting for metadata version " + request.waitForMetadataVersion()
)
);
}
}
@Override
public void onClusterServiceClose() {
listener.onFailure(new NodeClosedException(clusterService.localNode()));
}
@Override
public void onTimeout(TimeValue timeout) {
ActionListener.run(listener, l -> {
if (cancellableTask.notifyIfCancelled(l) == false) {
l.onResponse(new ClusterStateResponse(state.getClusterName(), null, true));
}
});
}
}, clusterState -> cancellableTask.isCancelled() || acceptableClusterStatePredicate.test(clusterState));
}
}
private ClusterState filterClusterState(final ClusterState inputState) {
final Collection<ProjectId> projectIds = projectResolver.getProjectIds(inputState);
final Metadata metadata = inputState.metadata();
if (projectIds.containsAll(metadata.projects().keySet())
&& projectIds.containsAll(inputState.globalRoutingTable().routingTables().keySet())) {
// no filtering required - everything in the cluster state is within the set of projects
return inputState;
}
final Metadata.Builder mdBuilder = Metadata.builder(inputState.metadata());
final GlobalRoutingTable.Builder rtBuilder = GlobalRoutingTable.builder(inputState.globalRoutingTable());
final ProjectStateRegistry.Builder psBuilder = ProjectStateRegistry.builder(inputState);
for (var projectId : metadata.projects().keySet()) {
if (projectIds.contains(projectId) == false) {
mdBuilder.removeProject(projectId);
rtBuilder.removeProject(projectId);
psBuilder.removeProject(projectId);
}
}
return ClusterState.builder(inputState)
.metadata(mdBuilder.build())
.routingTable(rtBuilder.build())
.putCustom(ProjectStateRegistry.TYPE, psBuilder.build())
.build();
}
@SuppressForbidden(reason = "exposing ClusterState#compatibilityVersions requires reading them")
private static Map<String, CompatibilityVersions> getCompatibilityVersions(ClusterState clusterState) {
return clusterState.compatibilityVersions();
}
@SuppressForbidden(reason = "exposing ClusterState#clusterFeatures requires reading them")
private static Map<String, Set<String>> getClusterFeatures(ClusterState clusterState) {
return clusterState.clusterFeatures().nodeFeatures();
}
private ClusterStateResponse buildResponse(final ClusterStateRequest request, final ClusterState rawState) {
final ClusterState filteredState = filterClusterState(rawState);
ThreadPool.assertCurrentThreadPool(ThreadPool.Names.MANAGEMENT); // too heavy to construct & serialize cluster state without forking
if (request.blocks() == false) {
final var blockException = filteredState.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ);
if (blockException != null) {
// There's a METADATA_READ block in place, but we aren't returning it to the caller, and yet the caller needs to know that
// this block exists (e.g. it's the STATE_NOT_RECOVERED_BLOCK, so the rest of the state is known to be incomplete). Thus we
// must fail the request:
throw blockException;
}
}
logger.trace("Serving cluster state request using version {}", filteredState.version());
ClusterState.Builder builder = ClusterState.builder(filteredState.getClusterName());
builder.version(filteredState.version());
builder.stateUUID(filteredState.stateUUID());
if (request.nodes()) {
builder.nodes(filteredState.nodes());
builder.nodeIdsToCompatibilityVersions(getCompatibilityVersions(filteredState));
builder.nodeFeatures(getClusterFeatures(filteredState));
}
if (request.routingTable()) {
if (request.indices().length > 0) {
final GlobalRoutingTable.Builder globalRoutingTableBuilder = GlobalRoutingTable.builder(filteredState.globalRoutingTable())
.clear();
for (ProjectMetadata project : filteredState.metadata().projects().values()) {
RoutingTable projectRouting = filteredState.routingTable(project.id());
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
String[] indices = indexNameExpressionResolver.concreteIndexNames(project, request);
for (String filteredIndex : indices) {
if (projectRouting.hasIndex(filteredIndex)) {
routingTableBuilder.add(projectRouting.getIndicesRouting().get(filteredIndex));
}
}
globalRoutingTableBuilder.put(project.id(), routingTableBuilder);
}
builder.routingTable(globalRoutingTableBuilder.build());
} else {
builder.routingTable(filteredState.globalRoutingTable());
}
} else {
builder.routingTable(GlobalRoutingTable.builder().build());
}
if (request.blocks()) {
builder.blocks(filteredState.blocks());
}
Metadata.Builder mdBuilder = Metadata.builder();
mdBuilder.clusterUUID(filteredState.metadata().clusterUUID());
mdBuilder.coordinationMetadata(filteredState.coordinationMetadata());
if (request.metadata()) {
// filter out metadata that shouldn't be returned by the API
final BiPredicate<String, Metadata.MetadataCustom<?>> notApi = (ignore, custom) -> custom.context()
.contains(Metadata.XContentContext.API) == false;
if (request.indices().length > 0) {
// if the request specified index names, then we don't want the whole metadata, just the version and projects (which will
// be filtered (below) to only include the relevant indices)
mdBuilder.version(filteredState.metadata().version());
} else {
// If there are no requested indices, then we want all the metadata, except for customs that aren't exposed via the API
mdBuilder = Metadata.builder(filteredState.metadata());
mdBuilder.removeCustomIf(notApi);
if (projectResolver.supportsMultipleProjects() && request.multiproject() == false) {
ProjectStateRegistry projectStateRegistry = ProjectStateRegistry.get(filteredState);
if (projectStateRegistry.size() > 1) {
throw new Metadata.MultiProjectPendingException(
"There are multiple projects " + projectStateRegistry.knownProjects()
);
}
var reservedStateMetadata = new HashMap<>(filteredState.metadata().reservedStateMetadata());
var singleProjectReservedStateMetadata = projectStateRegistry.reservedStateMetadata(projectResolver.getProjectId());
singleProjectReservedStateMetadata.forEach(
(key, value) -> reservedStateMetadata.merge(key, value, this::mergeReservedStateMetadata)
);
mdBuilder.put(reservedStateMetadata);
}
}
for (ProjectMetadata project : filteredState.metadata().projects().values()) {
ProjectMetadata.Builder pBuilder;
if (request.indices().length > 0) {
// if the request specified index names, then only include the project-id and indices
pBuilder = ProjectMetadata.builder(project.id());
String[] indices = indexNameExpressionResolver.concreteIndexNames(project, request);
for (String filteredIndex : indices) {
// If the requested index is part of a data stream then that data stream should also be included:
IndexAbstraction indexAbstraction = project.getIndicesLookup().get(filteredIndex);
if (indexAbstraction.getParentDataStream() != null) {
DataStream dataStream = indexAbstraction.getParentDataStream();
// Also the IMD of other backing indices need to be included, otherwise the cluster state api
// can't create a valid cluster state instance:
for (Index backingIndex : dataStream.getIndices()) {
pBuilder.put(project.index(backingIndex), false);
}
pBuilder.put(dataStream);
} else {
IndexMetadata indexMetadata = project.index(filteredIndex);
if (indexMetadata != null) {
pBuilder.put(indexMetadata, false);
}
}
}
} else {
// if the request did not specify index names, then include everything from the project except non-API customs
pBuilder = ProjectMetadata.builder(project);
pBuilder.removeCustomIf(notApi);
}
mdBuilder.put(pBuilder);
}
} else {
for (ProjectId project : filteredState.metadata().projects().keySet()) {
// Request doesn't want to retrieve metadata, so we just fill in empty projects
// (because we can't have a truly empty Metadata)
mdBuilder.put(ProjectMetadata.builder(project));
}
}
builder.metadata(mdBuilder);
if (request.customs()) {
for (Map.Entry<String, ClusterState.Custom> custom : filteredState.customs().entrySet()) {
if (custom.getValue().isPrivate() == false) {
builder.putCustom(custom.getKey(), custom.getValue());
}
}
}
return new ClusterStateResponse(filteredState.getClusterName(), builder.build(), false);
}
private ReservedStateMetadata mergeReservedStateMetadata(
ReservedStateMetadata clusterReservedMetadata,
ReservedStateMetadata projectReservedMetadata
) {
if (Objects.equals(clusterReservedMetadata.version(), projectReservedMetadata.version()) == false) {
logger.info(
"Reserved state metadata version is different for Metadata ({}) and the requested project ({})",
clusterReservedMetadata.version(),
projectReservedMetadata.version()
);
}
ReservedStateMetadata.Builder builder = ReservedStateMetadata.builder(clusterReservedMetadata.namespace())
.version(Math.max(clusterReservedMetadata.version(), projectReservedMetadata.version()));
for (ReservedStateHandlerMetadata handler : clusterReservedMetadata.handlers().values()) {
builder.putHandler(handler);
}
for (Map.Entry<String, ReservedStateHandlerMetadata> handlerEntry : projectReservedMetadata.handlers().entrySet()) {
assert clusterReservedMetadata.handlers().containsKey(handlerEntry.getKey()) == false
: "Duplicate of handler: " + handlerEntry.getKey();
builder.putHandler(handlerEntry.getValue());
}
if (projectReservedMetadata.errorMetadata() != null) {
builder.errorMetadata(projectReservedMetadata.errorMetadata());
} else if (clusterReservedMetadata.errorMetadata() != null) {
builder.errorMetadata(clusterReservedMetadata.errorMetadata());
}
return builder.build();
}
}
| TransportClusterStateAction |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/Cache.java | {
"start": 3736,
"end": 6711
} | interface ____ an immediate "hard" removal outside any
* current transaction and/or locking scheme.
* <p>
* The {@link org.hibernate.annotations.Cache} annotation also specifies a
* {@link org.hibernate.annotations.CacheConcurrencyStrategy}, a policy governing
* access to the second-level cache by concurrent transactions. Either:
* <ul>
* <li>{@linkplain org.hibernate.annotations.CacheConcurrencyStrategy#READ_ONLY
* read-only access} for immutable data,
* <li>{@linkplain org.hibernate.annotations.CacheConcurrencyStrategy#NONSTRICT_READ_WRITE
* read/write access with no locking}, when concurrent updates are
* extremely improbable,
* <li>{@linkplain org.hibernate.annotations.CacheConcurrencyStrategy#READ_WRITE
* read/write access using soft locks} when concurrent updates are possible
* but not common, or
* <li>{@linkplain org.hibernate.annotations.CacheConcurrencyStrategy#TRANSACTIONAL
* transactional access} when concurrent updates are frequent.
* </ul>
* <p>
* It's important to always explicitly specify an appropriate policy, taking into
* account the expected patterns of data access, most importantly, the frequency
* of updates.
* <p>
* Query result sets may also be stored in the second-level cache. A query is made
* eligible for caching by calling
* {@link org.hibernate.query.SelectionQuery#setCacheable(boolean)}, and may be
* assigned to a region of the second-level cache by calling
* {@link org.hibernate.query.SelectionQuery#setCacheRegion(String)}. It's very
* important to understand that any entity instance in a query result set is cached
* by its id. If the entity itself is not {@linkplain org.hibernate.annotations.Cache
* cacheable}, or if the instance is not available in the second-level cache at the
* time a result set is retrieved from the cache, then the state of the entity must
* be read from the database. <em>This negates the benefits of caching the result
* set.</em> It's therefore very important to carefully "match" the caching policies
* of a query and the entities it returns.
* <p>
* Hibernate does not itself contain a high-quality implementation of a second-level
* cache backend with expiry, persistence, and replication, and depends on a plug-in
* implementation of {@link org.hibernate.cache.spi.RegionFactory} to integrate a
* backend storage mechanism. Therefore, the second-level cache is completely disabled
* by default, unless {@value org.hibernate.cfg.AvailableSettings#CACHE_REGION_FACTORY}
* is explicitly specified. For convenience, the second-level cache may also be enabled
* or disabled using {@value org.hibernate.cfg.AvailableSettings#USE_SECOND_LEVEL_CACHE}.
*
* @author Steve Ebersole
*
* @see org.hibernate.annotations.Cache
* @see org.hibernate.annotations.CacheConcurrencyStrategy
* @see org.hibernate.cfg.AvailableSettings#CACHE_REGION_FACTORY
* @see org.hibernate.cfg.AvailableSettings#USE_SECOND_LEVEL_CACHE
*/
public | causes |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/slotpool/TestingDeclarativeSlotPoolBuilder.java | {
"start": 1981,
"end": 9889
} | class ____ {
private Consumer<ResourceCounter> increaseResourceRequirementsByConsumer = ignored -> {};
private Consumer<ResourceCounter> decreaseResourceRequirementsByConsumer = ignored -> {};
private Supplier<Collection<ResourceRequirement>> getResourceRequirementsSupplier =
Collections::emptyList;
private QuadFunction<
Collection<? extends SlotOffer>,
TaskManagerLocation,
TaskManagerGateway,
Long,
Collection<SlotOffer>>
offerSlotsFunction =
(ignoredA, ignoredB, ignoredC, ignoredD) -> Collections.emptyList();
private Supplier<Collection<PhysicalSlot>> getFreeSlotsInformationSupplier =
Collections::emptyList;
private Supplier<Collection<? extends SlotInfo>> getAllSlotsInformationSupplier =
Collections::emptyList;
private Supplier<FreeSlotTracker> getFreeSlotTrackerSupplier =
() -> TestingFreeSlotTracker.newBuilder().build();
private BiFunction<ResourceID, Exception, ResourceCounter> releaseSlotsFunction =
(ignoredA, ignoredB) -> ResourceCounter.empty();
private BiFunction<AllocationID, Exception, ResourceCounter> releaseSlotFunction =
(ignoredA, ignoredB) -> ResourceCounter.empty();
private BiFunction<AllocationID, ResourceProfile, PhysicalSlot> reserveFreeSlotFunction =
(ignoredA, ignoredB) -> null;
private TriFunction<AllocationID, Throwable, Long, ResourceCounter> freeReservedSlotFunction =
(ignoredA, ignoredB, ignoredC) -> ResourceCounter.empty();
private Function<ResourceID, Boolean> containsSlotsFunction = ignored -> false;
private LongConsumer returnIdleSlotsConsumer = ignored -> {};
private Consumer<ResourceCounter> setResourceRequirementsConsumer = ignored -> {};
private Function<AllocationID, Boolean> containsFreeSlotFunction = ignored -> false;
private QuadFunction<
Collection<? extends SlotOffer>,
TaskManagerLocation,
TaskManagerGateway,
Long,
Collection<SlotOffer>>
registerSlotsFunction =
(slotOffers, ignoredB, ignoredC, ignoredD) -> new ArrayList<>(slotOffers);
private Supplier<Map<ResourceID, LoadingWeight>> taskExecutorsLoadingWeightSupplier =
HashMap::new;
public TestingDeclarativeSlotPoolBuilder setIncreaseResourceRequirementsByConsumer(
Consumer<ResourceCounter> increaseResourceRequirementsByConsumer) {
this.increaseResourceRequirementsByConsumer = increaseResourceRequirementsByConsumer;
return this;
}
public TestingDeclarativeSlotPoolBuilder setDecreaseResourceRequirementsByConsumer(
Consumer<ResourceCounter> decreaseResourceRequirementsByConsumer) {
this.decreaseResourceRequirementsByConsumer = decreaseResourceRequirementsByConsumer;
return this;
}
public TestingDeclarativeSlotPoolBuilder setSetResourceRequirementsConsumer(
Consumer<ResourceCounter> setResourceRequirementsConsumer) {
this.setResourceRequirementsConsumer = setResourceRequirementsConsumer;
return this;
}
public TestingDeclarativeSlotPoolBuilder setGetResourceRequirementsSupplier(
Supplier<Collection<ResourceRequirement>> getResourceRequirementsSupplier) {
this.getResourceRequirementsSupplier = getResourceRequirementsSupplier;
return this;
}
public TestingDeclarativeSlotPoolBuilder setOfferSlotsFunction(
QuadFunction<
Collection<? extends SlotOffer>,
TaskManagerLocation,
TaskManagerGateway,
Long,
Collection<SlotOffer>>
offerSlotsFunction) {
this.offerSlotsFunction = offerSlotsFunction;
return this;
}
public TestingDeclarativeSlotPoolBuilder setRegisterSlotsFunction(
QuadFunction<
Collection<? extends SlotOffer>,
TaskManagerLocation,
TaskManagerGateway,
Long,
Collection<SlotOffer>>
registerSlotsFunction) {
this.registerSlotsFunction = registerSlotsFunction;
return this;
}
public TestingDeclarativeSlotPoolBuilder setGetFreeSlotsInformationSupplier(
Supplier<Collection<PhysicalSlot>> getFreeSlotsInformationSupplier) {
this.getFreeSlotsInformationSupplier = getFreeSlotsInformationSupplier;
return this;
}
public TestingDeclarativeSlotPoolBuilder setGetFreeSlotTrackerSupplier(
Supplier<FreeSlotTracker> getFreeSlotTrackerSupplier) {
this.getFreeSlotTrackerSupplier = getFreeSlotTrackerSupplier;
return this;
}
public TestingDeclarativeSlotPoolBuilder setGetAllSlotsInformationSupplier(
Supplier<Collection<? extends SlotInfo>> getAllSlotsInformationSupplier) {
this.getAllSlotsInformationSupplier = getAllSlotsInformationSupplier;
return this;
}
public TestingDeclarativeSlotPoolBuilder setReleaseSlotsFunction(
BiFunction<ResourceID, Exception, ResourceCounter> failSlotsConsumer) {
this.releaseSlotsFunction = failSlotsConsumer;
return this;
}
public TestingDeclarativeSlotPoolBuilder setReleaseSlotFunction(
BiFunction<AllocationID, Exception, ResourceCounter> failSlotConsumer) {
this.releaseSlotFunction = failSlotConsumer;
return this;
}
public TestingDeclarativeSlotPoolBuilder setReserveFreeSlotFunction(
BiFunction<AllocationID, ResourceProfile, PhysicalSlot>
allocateFreeSlotForResourceFunction) {
this.reserveFreeSlotFunction = allocateFreeSlotForResourceFunction;
return this;
}
public TestingDeclarativeSlotPoolBuilder setFreeReservedSlotFunction(
TriFunction<AllocationID, Throwable, Long, ResourceCounter> freeReservedSlotFunction) {
this.freeReservedSlotFunction = freeReservedSlotFunction;
return this;
}
public TestingDeclarativeSlotPoolBuilder setContainsSlotsFunction(
Function<ResourceID, Boolean> containsSlotsFunction) {
this.containsSlotsFunction = containsSlotsFunction;
return this;
}
public TestingDeclarativeSlotPoolBuilder setContainsFreeSlotFunction(
Function<AllocationID, Boolean> containsFreeSlotFunction) {
this.containsFreeSlotFunction = containsFreeSlotFunction;
return this;
}
public TestingDeclarativeSlotPoolBuilder setReturnIdleSlotsConsumer(
LongConsumer returnIdleSlotsConsumer) {
this.returnIdleSlotsConsumer = returnIdleSlotsConsumer;
return this;
}
public TestingDeclarativeSlotPool build() {
return new TestingDeclarativeSlotPool(
increaseResourceRequirementsByConsumer,
decreaseResourceRequirementsByConsumer,
getResourceRequirementsSupplier,
offerSlotsFunction,
registerSlotsFunction,
getFreeSlotsInformationSupplier,
getFreeSlotTrackerSupplier,
getAllSlotsInformationSupplier,
releaseSlotsFunction,
releaseSlotFunction,
reserveFreeSlotFunction,
freeReservedSlotFunction,
containsSlotsFunction,
containsFreeSlotFunction,
returnIdleSlotsConsumer,
setResourceRequirementsConsumer,
taskExecutorsLoadingWeightSupplier);
}
}
| TestingDeclarativeSlotPoolBuilder |
java | quarkusio__quarkus | test-framework/google-cloud-functions/src/main/java/io/quarkus/google/cloud/functions/test/FunctionType.java | {
"start": 118,
"end": 1905
} | enum ____ {
/** A function that implements <code>com.google.cloud.functions.HttpFunction</code>. **/
HTTP("io.quarkus.gcp.functions.QuarkusHttpFunction", "http", "quarkus.google-cloud-functions.function"),
/** A function that implements <code>com.google.cloud.functions.BackgroundFunction</code>. **/
BACKGROUND("io.quarkus.gcp.functions.QuarkusBackgroundFunction", "event", "quarkus.google-cloud-functions.function"),
/** A function that implements <code>com.google.cloud.functions.RawBackgroundFunction</code>. **/
RAW_BACKGROUND("io.quarkus.gcp.functions.QuarkusBackgroundFunction", "event", "quarkus.google-cloud-functions.function"),
/** A function that implements <code>com.google.cloud.functions.CloudEventsFunction</code>. **/
CLOUD_EVENTS("io.quarkus.gcp.functions.QuarkusCloudEventsFunction", "cloudevent",
"quarkus.google-cloud-functions.function"),
/** A Funqy background function. **/
FUNQY_BACKGROUND("io.quarkus.funqy.gcp.functions.FunqyBackgroundFunction", "event", "quarkus.funqy.export"),
/** A Funqy cloud events function. **/
FUNQY_CLOUD_EVENTS("io.quarkus.funqy.gcp.functions.FunqyCloudEventsFunction", "cloudevent", "quarkus.funqy.export");
private final String target;
private final String signatureType;
private final String functionProperty;
FunctionType(String target, String signatureType, String functionProperty) {
this.target = target;
this.signatureType = signatureType;
this.functionProperty = functionProperty;
}
public String getTarget() {
return target;
}
public String getSignatureType() {
return signatureType;
}
public String getFunctionProperty() {
return functionProperty;
}
}
| FunctionType |
java | grpc__grpc-java | auth/src/main/java/io/grpc/auth/MoreCallCredentials.java | {
"start": 1046,
"end": 1401
} | class ____ {@code CallCredentials} and pass the instance to other code, but the
* instance can't be called directly from code expecting stable behavior. See {@link
* CallCredentials}.
*/
public static CallCredentials from(Credentials creds) {
return new GoogleAuthLibraryCallCredentials(creds);
}
private MoreCallCredentials() {
}
}
| name |
java | elastic__elasticsearch | x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoGridTiler.java | {
"start": 546,
"end": 1485
} | class ____ {
protected final int precision;
public GeoGridTiler(int precision) {
this.precision = precision;
}
/**
* returns the precision of this tiler
*/
public int precision() {
return precision;
}
/**
* encodes a single point to its long-encoded bucket key value.
*
* @param x the x-coordinate
* @param y the y-coordinate
*/
public abstract long encode(double x, double y);
/**
*
* @param docValues the array of long-encoded bucket keys to fill
* @param geoValue the input shape
*
* @return the number of cells the geoValue intersects
*/
public abstract int setValues(GeoShapeCellValues docValues, GeoShapeValues.GeoShapeValue geoValue) throws IOException;
/** Maximum number of cells that can be created by this tiler */
protected abstract long getMaxCells();
}
| GeoGridTiler |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webflux/src/test/java/org/springframework/cloud/gateway/filter/headers/TransferEncodingNormalizationHeadersFilterIntegrationTests.java | {
"start": 5399,
"end": 5684
} | class ____ {
private String message;
public Message(@JsonProperty("message") String message) {
this.message = message;
}
public String getMessage() {
return message;
}
public void setMessage(String message) {
this.message = message;
}
}
public static | Message |
java | google__guava | android/guava/src/com/google/common/collect/Sets.java | {
"start": 5479,
"end": 11719
} | enum ____ order, not encounter order.
*
* @since 33.2.0 (available since 21.0 in guava-jre)
*/
@IgnoreJRERequirement // Users will use this only if they're already using streams.
public static <E extends Enum<E>> Collector<E, ?, ImmutableSet<E>> toImmutableEnumSet() {
return CollectCollectors.toImmutableEnumSet();
}
/**
* Returns a new, <i>mutable</i> {@code EnumSet} instance containing the given elements in their
* natural order. This method behaves identically to {@link EnumSet#copyOf(Collection)}, but also
* accepts non-{@code Collection} iterables and empty iterables.
*/
public static <E extends Enum<E>> EnumSet<E> newEnumSet(
Iterable<E> iterable, Class<E> elementType) {
EnumSet<E> set = EnumSet.noneOf(elementType);
Iterables.addAll(set, iterable);
return set;
}
// HashSet
/**
* Creates a <i>mutable</i>, initially empty {@code HashSet} instance.
*
* <p><b>Note:</b> if mutability is not required, use {@link ImmutableSet#of()} instead. If {@code
* E} is an {@link Enum} type, use {@link EnumSet#noneOf} instead. Otherwise, strongly consider
* using a {@code LinkedHashSet} instead, at the cost of increased memory footprint, to get
* deterministic iteration behavior.
*
* <p><b>Note:</b> this method is now unnecessary and should be treated as deprecated. Instead,
* use the {@code HashSet} constructor directly, taking advantage of <a
* href="https://docs.oracle.com/javase/tutorial/java/generics/genTypeInference.html#type-inference-instantiation">"diamond"
* syntax</a>.
*/
@SuppressWarnings("NonApiType") // acts as a direct substitute for a constructor call
public static <E extends @Nullable Object> HashSet<E> newHashSet() {
return new HashSet<>();
}
/**
* Creates a <i>mutable</i> {@code HashSet} instance initially containing the given elements.
*
* <p><b>Note:</b> if elements are non-null and won't be added or removed after this point, use
* {@link ImmutableSet#of()} or {@link ImmutableSet#copyOf(Object[])} instead. If {@code E} is an
* {@link Enum} type, use {@link EnumSet#of(Enum, Enum[])} instead. Otherwise, strongly consider
* using a {@code LinkedHashSet} instead, at the cost of increased memory footprint, to get
* deterministic iteration behavior.
*
* <p>This method is just a small convenience, either for {@code newHashSet(}{@link Arrays#asList
* asList}{@code (...))}, or for creating an empty set then calling {@link Collections#addAll}.
* This method is not actually very useful and will likely be deprecated in the future.
*/
@SuppressWarnings("NonApiType") // acts as a direct substitute for a constructor call
public static <E extends @Nullable Object> HashSet<E> newHashSet(E... elements) {
HashSet<E> set = newHashSetWithExpectedSize(elements.length);
Collections.addAll(set, elements);
return set;
}
/**
* Creates a <i>mutable</i> {@code HashSet} instance containing the given elements. A very thin
* convenience for creating an empty set then calling {@link Collection#addAll} or {@link
* Iterables#addAll}.
*
* <p><b>Note:</b> if mutability is not required and the elements are non-null, use {@link
* ImmutableSet#copyOf(Iterable)} instead. (Or, change {@code elements} to be a {@link
* FluentIterable} and call {@code elements.toSet()}.)
*
* <p><b>Note:</b> if {@code E} is an {@link Enum} type, use {@link #newEnumSet(Iterable, Class)}
* instead.
*
* <p><b>Note:</b> if {@code elements} is a {@link Collection}, you don't need this method.
* Instead, use the {@code HashSet} constructor directly, taking advantage of <a
* href="https://docs.oracle.com/javase/tutorial/java/generics/genTypeInference.html#type-inference-instantiation">"diamond"
* syntax</a>.
*
* <p>Overall, this method is not very useful and will likely be deprecated in the future.
*/
@SuppressWarnings("NonApiType") // acts as a direct substitute for a constructor call
public static <E extends @Nullable Object> HashSet<E> newHashSet(Iterable<? extends E> elements) {
return (elements instanceof Collection)
? new HashSet<E>((Collection<? extends E>) elements)
: newHashSet(elements.iterator());
}
/**
* Creates a <i>mutable</i> {@code HashSet} instance containing the given elements. A very thin
* convenience for creating an empty set and then calling {@link Iterators#addAll}.
*
* <p><b>Note:</b> if mutability is not required and the elements are non-null, use {@link
* ImmutableSet#copyOf(Iterator)} instead.
*
* <p><b>Note:</b> if {@code E} is an {@link Enum} type, you should create an {@link EnumSet}
* instead.
*
* <p>Overall, this method is not very useful and will likely be deprecated in the future.
*/
@SuppressWarnings("NonApiType") // acts as a direct substitute for a constructor call
public static <E extends @Nullable Object> HashSet<E> newHashSet(Iterator<? extends E> elements) {
HashSet<E> set = new HashSet<>();
Iterators.addAll(set, elements);
return set;
}
/**
* Returns a new hash set using the smallest initial table size that can hold {@code expectedSize}
* elements without resizing. Note that this is not what {@link HashSet#HashSet(int)} does, but it
* is what most users want and expect it to do.
*
* <p>This behavior can't be broadly guaranteed, but has been tested with OpenJDK 1.7 and 1.8.
*
* @param expectedSize the number of elements you expect to add to the returned set
* @return a new, empty hash set with enough capacity to hold {@code expectedSize} elements
* without resizing
* @throws IllegalArgumentException if {@code expectedSize} is negative
*/
@SuppressWarnings("NonApiType") // acts as a direct substitute for a constructor call
public static <E extends @Nullable Object> HashSet<E> newHashSetWithExpectedSize(
int expectedSize) {
return new HashSet<>(Maps.capacity(expectedSize));
}
/**
* Creates a thread-safe set backed by a hash map. The set is backed by a {@link
* ConcurrentHashMap} instance, and thus carries the same concurrency guarantees.
*
* <p>Unlike {@code HashSet}, this | definition |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/IdentifierNameTest.java | {
"start": 15910,
"end": 16473
} | class ____ {
void unnamed() {
try (var _ = new Scanner("discarded")) {
Function<String, String> f = _ -> "bar";
String _ = f.apply("foo");
} catch (Exception _) {
}
}
}
""")
.setArgs("--enable-preview", "--release", Integer.toString(Runtime.version().feature()))
.doTest();
}
@Test
public void bindingVariables() {
helper
.addSourceLines(
"Test.java",
"""
| Test |
java | elastic__elasticsearch | libs/logstash-bridge/src/main/java/org/elasticsearch/logstashbridge/script/ScriptServiceBridge.java | {
"start": 6331,
"end": 7088
} | class ____ implements ProjectResolver {
public static final LockdownOnlyDefaultProjectIdResolver INSTANCE = new LockdownOnlyDefaultProjectIdResolver();
@Override
public ProjectId getProjectId() {
return ProjectId.DEFAULT;
}
@Override
public <E extends Exception> void executeOnProject(ProjectId projectId, CheckedRunnable<E> body) throws E {
if (projectId.equals(ProjectId.DEFAULT)) {
body.run();
} else {
throw new IllegalArgumentException("Cannot execute on a project other than [" + ProjectId.DEFAULT + "]");
}
}
}
}
}
| LockdownOnlyDefaultProjectIdResolver |
java | spring-projects__spring-security | oauth2/oauth2-jose/src/test/java/org/springframework/security/oauth2/jwt/ReactiveJwtDecodersTests.java | {
"start": 1675,
"end": 13256
} | class ____ {
/**
* Contains those parameters required to construct a ReactiveJwtDecoder as well as any
* required parameters
*/
// @formatter:off
private static final String DEFAULT_RESPONSE_TEMPLATE = "{\n"
+ " \"authorization_endpoint\": \"https://example.com/o/oauth2/v2/auth\", \n"
+ " \"id_token_signing_alg_values_supported\": [\n"
+ " \"RS256\"\n"
+ " ], \n"
+ " \"issuer\": \"%s\", \n"
+ " \"jwks_uri\": \"%s/.well-known/jwks.json\", \n"
+ " \"response_types_supported\": [\n"
+ " \"code\", \n"
+ " \"token\", \n"
+ " \"id_token\", \n"
+ " \"code token\", \n"
+ " \"code id_token\", \n"
+ " \"token id_token\", \n"
+ " \"code token id_token\", \n"
+ " \"none\"\n"
+ " ], \n"
+ " \"subject_types_supported\": [\n"
+ " \"public\"\n"
+ " ], \n"
+ " \"token_endpoint\": \"https://example.com/oauth2/v4/token\"\n"
+ "}";
// @formatter:on
private static final String JWK_SET = "{\"keys\":[{\"kty\":\"RSA\",\"e\":\"AQAB\",\"use\":\"sig\",\"kid\":\"one\",\"n\":\"oXJ8OyOv_eRnce4akdanR4KYRfnC2zLV4uYNQpcFn6oHL0dj7D6kxQmsXoYgJV8ZVDn71KGmuLvolxsDncc2UrhyMBY6DVQVgMSVYaPCTgW76iYEKGgzTEw5IBRQL9w3SRJWd3VJTZZQjkXef48Ocz06PGF3lhbz4t5UEZtdF4rIe7u-977QwHuh7yRPBQ3sII-cVoOUMgaXB9SHcGF2iZCtPzL_IffDUcfhLQteGebhW8A6eUHgpD5A1PQ-JCw_G7UOzZAjjDjtNM2eqm8j-Ms_gqnm4MiCZ4E-9pDN77CAAPVN7kuX6ejs9KBXpk01z48i9fORYk9u7rAkh1HuQw\"}]}";
private static final String ISSUER_MISMATCH = "eyJhbGciOiJSUzI1NiJ9.eyJpc3MiOiJodHRwczpcL1wvd3Jvbmdpc3N1ZXIiLCJleHAiOjQ2ODcyNTYwNDl9.Ax8LMI6rhB9Pv_CE3kFi1JPuLj9gZycifWrLeDpkObWEEVAsIls9zAhNFyJlG-Oo7up6_mDhZgeRfyKnpSF5GhKJtXJDCzwg0ZDVUE6rS0QadSxsMMGbl7c4y0lG_7TfLX2iWeNJukJj_oSW9KzW4FsBp1BoocWjrreesqQU3fZHbikH-c_Fs2TsAIpHnxflyEzfOFWpJ8D4DtzHXqfvieMwpy42xsPZK3LR84zlasf0Ne1tC_hLHvyHRdAXwn0CMoKxc7-8j0r9Mq8kAzUsPn9If7bMLqGkxUcTPdk5x7opAUajDZx95SXHLmtztNtBa2S6EfPJXuPKG6tM5Wq5Ug";
private static final String OIDC_METADATA_PATH = "/.well-known/openid-configuration";
private static final String OAUTH_METADATA_PATH = "/.well-known/oauth-authorization-server";
private MockWebServer server;
private String issuer;
@BeforeEach
public void setup() throws Exception {
this.server = new MockWebServer();
this.server.start();
this.issuer = createIssuerFromServer();
this.issuer += "path";
}
@AfterEach
public void cleanup() throws Exception {
this.server.shutdown();
}
@Test
public void issuerWhenResponseIsTypicalThenReturnedDecoderValidatesIssuer() {
prepareConfigurationResponse();
ReactiveJwtDecoder decoder = ReactiveJwtDecoders.fromOidcIssuerLocation(this.issuer);
// @formatter:off
assertThatExceptionOfType(JwtValidationException.class)
.isThrownBy(() -> decoder.decode(ISSUER_MISMATCH).block())
.withMessageContaining("The iss claim is not valid");
// @formatter:on
}
@Test
public void issuerWhenOidcFallbackResponseIsTypicalThenReturnedDecoderValidatesIssuer() {
prepareConfigurationResponseOidc();
ReactiveJwtDecoder decoder = ReactiveJwtDecoders.fromIssuerLocation(this.issuer);
// @formatter:off
assertThatExceptionOfType(JwtValidationException.class)
.isThrownBy(() -> decoder.decode(ISSUER_MISMATCH).block())
.withMessageContaining("The iss claim is not valid");
// @formatter:on
}
@Test
public void issuerWhenOAuth2ResponseIsTypicalThenReturnedDecoderValidatesIssuer() {
prepareConfigurationResponseOAuth2();
ReactiveJwtDecoder decoder = ReactiveJwtDecoders.fromIssuerLocation(this.issuer);
// @formatter:off
assertThatExceptionOfType(JwtValidationException.class)
.isThrownBy(() -> decoder.decode(ISSUER_MISMATCH).block())
.withMessageContaining("The iss claim is not valid");
// @formatter:on
}
@Test
public void issuerWhenResponseIsNonCompliantThenThrowsRuntimeException() {
prepareConfigurationResponse("{ \"missing_required_keys\" : \"and_values\" }");
// @formatter:off
assertThatExceptionOfType(RuntimeException.class)
.isThrownBy(() -> ReactiveJwtDecoders.fromOidcIssuerLocation(this.issuer));
// @formatter:on
}
@Test
public void issuerWhenOidcFallbackResponseIsNonCompliantThenThrowsRuntimeException() {
prepareConfigurationResponseOidc("{ \"missing_required_keys\" : \"and_values\" }");
// @formatter:off
assertThatExceptionOfType(RuntimeException.class)
.isThrownBy(() -> ReactiveJwtDecoders.fromIssuerLocation(this.issuer));
// @formatter:on
}
@Test
public void issuerWhenOAuth2ResponseIsNonCompliantThenThrowsRuntimeException() {
prepareConfigurationResponseOAuth2("{ \"missing_required_keys\" : \"and_values\" }");
// @formatter:off
assertThatExceptionOfType(RuntimeException.class)
.isThrownBy(() -> ReactiveJwtDecoders.fromIssuerLocation(this.issuer));
// @formatter:on
}
// gh-7512
@Test
public void issuerWhenResponseDoesNotContainJwksUriThenThrowsIllegalArgumentException() {
prepareConfigurationResponse(this.buildResponseWithMissingJwksUri());
// @formatter:off
assertThatIllegalArgumentException()
.isThrownBy(() -> ReactiveJwtDecoders.fromOidcIssuerLocation(this.issuer))
.withMessage("The public JWK set URI must not be null");
// @formatter:on
}
// gh-7512
@Test
public void issuerWhenOidcFallbackResponseDoesNotContainJwksUriThenThrowsIllegalArgumentException() {
prepareConfigurationResponseOidc(this.buildResponseWithMissingJwksUri());
// @formatter:off
assertThatIllegalArgumentException()
.isThrownBy(() -> ReactiveJwtDecoders.fromIssuerLocation(this.issuer))
.withMessage("The public JWK set URI must not be null");
// @formatter:on
}
// gh-7512
@Test
public void issuerWhenOAuth2ResponseDoesNotContainJwksUriThenThrowsIllegalArgumentException() {
prepareConfigurationResponseOAuth2(this.buildResponseWithMissingJwksUri());
// @formatter:off
assertThatIllegalArgumentException()
.isThrownBy(() -> ReactiveJwtDecoders.fromIssuerLocation(this.issuer))
.withMessage("The public JWK set URI must not be null");
// @formatter:on
}
@Test
public void issuerWhenResponseIsMalformedThenThrowsRuntimeException() {
prepareConfigurationResponse("malformed");
// @formatter:off
assertThatExceptionOfType(RuntimeException.class)
.isThrownBy(() -> ReactiveJwtDecoders.fromOidcIssuerLocation(this.issuer));
// @formatter:on
}
@Test
public void issuerWhenOidcFallbackResponseIsMalformedThenThrowsRuntimeException() {
prepareConfigurationResponseOidc("malformed");
// @formatter:off
assertThatExceptionOfType(RuntimeException.class)
.isThrownBy(() -> ReactiveJwtDecoders.fromIssuerLocation(this.issuer));
// @formatter:on
}
@Test
public void issuerWhenOAuth2ResponseIsMalformedThenThrowsRuntimeException() {
prepareConfigurationResponseOAuth2("malformed");
// @formatter:off
assertThatExceptionOfType(RuntimeException.class)
.isThrownBy(() -> ReactiveJwtDecoders.fromIssuerLocation(this.issuer));
// @formatter:on
}
@Test
public void issuerWhenRespondingIssuerMismatchesRequestedIssuerThenThrowsIllegalStateException() {
prepareConfigurationResponse(String.format(DEFAULT_RESPONSE_TEMPLATE, this.issuer + "/wrong", this.issuer));
// @formatter:off
assertThatIllegalStateException()
.isThrownBy(() -> ReactiveJwtDecoders.fromOidcIssuerLocation(this.issuer));
// @formatter:on
}
@Test
public void issuerWhenOidcFallbackRespondingIssuerMismatchesRequestedIssuerThenThrowsIllegalStateException() {
prepareConfigurationResponseOidc(String.format(DEFAULT_RESPONSE_TEMPLATE, this.issuer + "/wrong", this.issuer));
// @formatter:off
assertThatIllegalStateException()
.isThrownBy(() -> ReactiveJwtDecoders.fromIssuerLocation(this.issuer));
// @formatter:on
}
@Test
public void issuerWhenOAuth2RespondingIssuerMismatchesRequestedIssuerThenThrowsIllegalStateException() {
prepareConfigurationResponseOAuth2(
String.format(DEFAULT_RESPONSE_TEMPLATE, this.issuer + "/wrong", this.issuer));
// @formatter:off
assertThatIllegalStateException()
.isThrownBy(() -> ReactiveJwtDecoders.fromIssuerLocation(this.issuer));
// @formatter:on
}
@Test
public void issuerWhenRequestedIssuerIsUnresponsiveThenThrowsIllegalArgumentException() throws Exception {
this.server.shutdown();
// @formatter:off
assertThatIllegalArgumentException()
.isThrownBy(() -> ReactiveJwtDecoders.fromOidcIssuerLocation("https://issuer"));
// @formatter:on
}
@Test
public void issuerWhenOidcFallbackRequestedIssuerIsUnresponsiveThenThrowsIllegalArgumentException()
throws Exception {
this.server.shutdown();
// @formatter:off
assertThatIllegalArgumentException()
.isThrownBy(() -> ReactiveJwtDecoders.fromIssuerLocation("https://issuer"));
// @formatter:on
}
private void prepareConfigurationResponse() {
String body = String.format(DEFAULT_RESPONSE_TEMPLATE, this.issuer, this.issuer);
prepareConfigurationResponse(body);
}
private void prepareConfigurationResponse(String body) {
this.server.enqueue(response(body));
this.server.enqueue(response(JWK_SET));
}
private void prepareConfigurationResponseOidc() {
String body = String.format(DEFAULT_RESPONSE_TEMPLATE, this.issuer, this.issuer);
prepareConfigurationResponseOidc(body);
}
private void prepareConfigurationResponseOidc(String body) {
Map<String, MockResponse> responses = new HashMap<>();
responses.put(oidc(), response(body));
responses.put(jwks(), response(JWK_SET));
prepareConfigurationResponses(responses);
}
private void prepareConfigurationResponseOAuth2() {
String body = String.format(DEFAULT_RESPONSE_TEMPLATE, this.issuer, this.issuer);
prepareConfigurationResponseOAuth2(body);
}
private void prepareConfigurationResponseOAuth2(String body) {
Map<String, MockResponse> responses = new HashMap<>();
responses.put(oauth(), response(body));
responses.put(jwks(), response(JWK_SET));
prepareConfigurationResponses(responses);
}
private void prepareConfigurationResponses(Map<String, MockResponse> responses) {
Dispatcher dispatcher = new Dispatcher() {
@Override
public MockResponse dispatch(RecordedRequest request) {
// @formatter:off
return Optional.of(request)
.map(RecordedRequest::getRequestUrl)
.map(HttpUrl::toString)
.map(responses::get)
.orElse(new MockResponse().setResponseCode(404));
// @formatter:on
}
};
this.server.setDispatcher(dispatcher);
}
private String createIssuerFromServer() {
return this.server.url("").toString();
}
private String oidc() {
URI uri = URI.create(this.issuer);
// @formatter:off
return UriComponentsBuilder.fromUri(uri)
.replacePath(uri.getPath() + OIDC_METADATA_PATH)
.toUriString();
// @formatter:on
}
private String oauth() {
URI uri = URI.create(this.issuer);
// @formatter:off
return UriComponentsBuilder.fromUri(uri)
.replacePath(OAUTH_METADATA_PATH + uri.getPath())
.toUriString();
// @formatter:on
}
private String jwks() {
return this.issuer + "/.well-known/jwks.json";
}
private MockResponse response(String body) {
// @formatter:off
return new MockResponse().setBody(body)
.setHeader(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON_VALUE);
// @formatter:on
}
public String buildResponseWithMissingJwksUri() {
JsonMapper mapper = new JsonMapper();
Map<String, Object> response = mapper.readValue(DEFAULT_RESPONSE_TEMPLATE,
new TypeReference<Map<String, Object>>() {
});
response.remove("jwks_uri");
return mapper.writeValueAsString(response);
}
}
| ReactiveJwtDecodersTests |
java | spring-projects__spring-security | core/src/test/java/org/springframework/security/core/context/SecurityContextHolderThreadLocalAccessorTests.java | {
"start": 1125,
"end": 3300
} | class ____ {
private SecurityContextHolderThreadLocalAccessor threadLocalAccessor;
@BeforeEach
public void setUp() {
this.threadLocalAccessor = new SecurityContextHolderThreadLocalAccessor();
}
@AfterEach
public void tearDown() {
this.threadLocalAccessor.setValue();
}
@Test
public void keyAlwaysReturnsSecurityContextClassName() {
assertThat(this.threadLocalAccessor.key()).isEqualTo(SecurityContext.class.getName());
}
@Test
public void getValueWhenSecurityContextHolderNotSetThenReturnsNull() {
assertThat(this.threadLocalAccessor.getValue()).isNull();
}
@Test
public void getValueWhenSecurityContextHolderSetThenReturnsSecurityContext() {
SecurityContext securityContext = SecurityContextHolder.createEmptyContext();
securityContext.setAuthentication(new TestingAuthenticationToken("user", "password"));
SecurityContextHolder.setContext(securityContext);
assertThat(this.threadLocalAccessor.getValue()).isSameAs(securityContext);
}
@Test
public void setValueWhenSecurityContextThenSetsSecurityContextHolder() {
SecurityContext securityContext = SecurityContextHolder.createEmptyContext();
securityContext.setAuthentication(new TestingAuthenticationToken("user", "password"));
this.threadLocalAccessor.setValue(securityContext);
assertThat(SecurityContextHolder.getContext()).isSameAs(securityContext);
}
@Test
public void setValueWhenNullThenThrowsIllegalArgumentException() {
// @formatter:off
assertThatIllegalArgumentException()
.isThrownBy(() -> this.threadLocalAccessor.setValue(null))
.withMessage("securityContext cannot be null");
// @formatter:on
}
@Test
public void setValueWhenSecurityContextSetThenClearsSecurityContextHolder() {
SecurityContext securityContext = SecurityContextHolder.createEmptyContext();
securityContext.setAuthentication(new TestingAuthenticationToken("user", "password"));
SecurityContextHolder.setContext(securityContext);
this.threadLocalAccessor.setValue();
SecurityContext emptyContext = SecurityContextHolder.createEmptyContext();
assertThat(SecurityContextHolder.getContext()).isEqualTo(emptyContext);
}
}
| SecurityContextHolderThreadLocalAccessorTests |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolPB.java | {
"start": 1495,
"end": 1592
} | interface ____ extends
GenericRefreshProtocolService.BlockingInterface {
}
| GenericRefreshProtocolPB |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/LocalCachedMapCacheOptions.java | {
"start": 1233,
"end": 1780
} | enum ____ {
/**
* No reconnect handling.
*/
NONE,
/**
* Clear local cache if map instance disconnected.
*/
CLEAR,
/**
* Store invalidated entry hash in invalidation log for 10 minutes.
* Cache keys for stored invalidated entry hashes will be removed
* if LocalCachedMap instance has been disconnected less than 10 minutes
* or whole local cache will be cleaned otherwise.
*/
LOAD
}
public | ReconnectionStrategy |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/selectkey/SelectKeyTest.java | {
"start": 1343,
"end": 12684
} | class ____ {
protected static SqlSessionFactory sqlSessionFactory;
@BeforeEach
void setUp() throws Exception {
try (Reader reader = Resources.getResourceAsReader("org/apache/ibatis/submitted/selectkey/MapperConfig.xml")) {
sqlSessionFactory = new SqlSessionFactoryBuilder().build(reader);
sqlSessionFactory.getConfiguration().addMapper(AnnotatedMapper.class);
}
BaseDataTest.runScript(sqlSessionFactory.getConfiguration().getEnvironment().getDataSource(),
"org/apache/ibatis/submitted/selectkey/CreateDB.sql");
}
@Test
void selectKey() throws Exception {
// this test checks to make sure that we can have select keys with the same
// insert id in different namespaces
String resource = "org/apache/ibatis/submitted/selectkey/MapperConfig.xml";
Reader reader = Resources.getResourceAsReader(resource);
SqlSessionFactoryBuilder builder = new SqlSessionFactoryBuilder();
SqlSessionFactory sqlMapper = builder.build(reader);
assertNotNull(sqlMapper);
}
@Test
void insertTable1() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Map<String, Object> parms = new HashMap<>();
parms.put("name", "Fred");
int rows = sqlSession.insert("org.apache.ibatis.submitted.selectkey.Table1.insert", parms);
assertEquals(1, rows);
assertEquals(11, parms.get("id"));
}
}
@Test
void insertTable2() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Map<String, Object> parms = new HashMap<>();
parms.put("name", "Fred");
int rows = sqlSession.insert("org.apache.ibatis.submitted.selectkey.Table2.insert", parms);
assertEquals(1, rows);
assertEquals(22, parms.get("id"));
}
}
@Test
void seleckKeyReturnsNoData() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Map<String, String> parms = new HashMap<>();
parms.put("name", "Fred");
Assertions.assertThrows(PersistenceException.class,
() -> sqlSession.insert("org.apache.ibatis.submitted.selectkey.Table2.insertNoValuesInSelectKey", parms));
}
}
@Test
void seleckKeyReturnsTooManyData() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Map<String, String> parms = new HashMap<>();
parms.put("name", "Fred");
sqlSession.insert("org.apache.ibatis.submitted.selectkey.Table2.insertTooManyValuesInSelectKey", parms);
Assertions.assertThrows(PersistenceException.class, () -> sqlSession
.insert("org.apache.ibatis.submitted.selectkey.Table2.insertTooManyValuesInSelectKey", parms));
}
}
@Test
void annotatedInsertTable2() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Name name = new Name();
name.setName("barney");
AnnotatedMapper mapper = sqlSession.getMapper(AnnotatedMapper.class);
int rows = mapper.insertTable2(name);
assertEquals(1, rows);
assertEquals(22, name.getNameId());
}
}
@Test
void annotatedInsertTable2WithGeneratedKey() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Name name = new Name();
name.setName("barney");
AnnotatedMapper mapper = sqlSession.getMapper(AnnotatedMapper.class);
int rows = mapper.insertTable2WithGeneratedKey(name);
assertEquals(1, rows);
assertEquals(22, name.getNameId());
assertEquals("barney_fred", name.getGeneratedName());
}
}
@Test
@Disabled("HSQLDB is not returning the generated column after the update")
void annotatedUpdateTable2WithGeneratedKey() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Name name = new Name();
name.setName("barney");
AnnotatedMapper mapper = sqlSession.getMapper(AnnotatedMapper.class);
int rows = mapper.insertTable2WithGeneratedKey(name);
assertEquals(1, rows);
assertEquals(22, name.getNameId());
assertEquals("barney_fred", name.getGeneratedName());
name.setName("Wilma");
rows = mapper.updateTable2WithGeneratedKey(name);
assertEquals(1, rows);
assertEquals(22, name.getNameId());
assertEquals("Wilma_fred", name.getGeneratedName());
}
}
@Test
@Disabled("HSQLDB is not returning the generated column after the update")
void annotatedUpdateTable2WithGeneratedKeyXml() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Name name = new Name();
name.setName("barney");
AnnotatedMapper mapper = sqlSession.getMapper(AnnotatedMapper.class);
int rows = mapper.insertTable2WithGeneratedKeyXml(name);
assertEquals(1, rows);
assertEquals(22, name.getNameId());
assertEquals("barney_fred", name.getGeneratedName());
name.setName("Wilma");
rows = mapper.updateTable2WithGeneratedKeyXml(name);
assertEquals(1, rows);
assertEquals(22, name.getNameId());
assertEquals("Wilma_fred", name.getGeneratedName());
}
}
@Test
void annotatedInsertTable2WithGeneratedKeyXml() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Name name = new Name();
name.setName("barney");
AnnotatedMapper mapper = sqlSession.getMapper(AnnotatedMapper.class);
int rows = mapper.insertTable2WithGeneratedKeyXml(name);
assertEquals(1, rows);
assertEquals(22, name.getNameId());
assertEquals("barney_fred", name.getGeneratedName());
}
}
@Test
void annotatedInsertTable2WithSelectKeyWithKeyMap() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Name name = new Name();
name.setName("barney");
AnnotatedMapper mapper = sqlSession.getMapper(AnnotatedMapper.class);
int rows = mapper.insertTable2WithSelectKeyWithKeyMap(name);
assertEquals(1, rows);
assertEquals(22, name.getNameId());
assertEquals("barney_fred", name.getGeneratedName());
}
}
@Test
void annotatedUpdateTable2WithSelectKeyWithKeyMap() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Name name = new Name();
name.setName("barney");
AnnotatedMapper mapper = sqlSession.getMapper(AnnotatedMapper.class);
int rows = mapper.insertTable2WithSelectKeyWithKeyMap(name);
assertEquals(1, rows);
assertEquals(22, name.getNameId());
assertEquals("barney_fred", name.getGeneratedName());
name.setName("Wilma");
rows = mapper.updateTable2WithSelectKeyWithKeyMap(name);
assertEquals(1, rows);
assertEquals(22, name.getNameId());
assertEquals("Wilma_fred", name.getGeneratedName());
}
}
@Test
void annotatedInsertTable2WithSelectKeyWithKeyMapXml() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Name name = new Name();
name.setName("barney");
AnnotatedMapper mapper = sqlSession.getMapper(AnnotatedMapper.class);
int rows = mapper.insertTable2WithSelectKeyWithKeyMapXml(name);
assertEquals(1, rows);
assertEquals(22, name.getNameId());
assertEquals("barney_fred", name.getGeneratedName());
}
}
@Test
void annotatedUpdateTable2WithSelectKeyWithKeyMapXml() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Name name = new Name();
name.setName("barney");
AnnotatedMapper mapper = sqlSession.getMapper(AnnotatedMapper.class);
int rows = mapper.insertTable2WithSelectKeyWithKeyMapXml(name);
assertEquals(1, rows);
assertEquals(22, name.getNameId());
assertEquals("barney_fred", name.getGeneratedName());
name.setName("Wilma");
rows = mapper.updateTable2WithSelectKeyWithKeyMapXml(name);
assertEquals(1, rows);
assertEquals(22, name.getNameId());
assertEquals("Wilma_fred", name.getGeneratedName());
}
}
@Test
void annotatedInsertTable2WithSelectKeyWithKeyObject() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Name name = new Name();
name.setName("barney");
AnnotatedMapper mapper = sqlSession.getMapper(AnnotatedMapper.class);
int rows = mapper.insertTable2WithSelectKeyWithKeyObject(name);
assertEquals(1, rows);
assertEquals(22, name.getNameId());
assertEquals("barney_fred", name.getGeneratedName());
}
}
@Test
void annotatedUpdateTable2WithSelectKeyWithKeyObject() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Name name = new Name();
name.setName("barney");
AnnotatedMapper mapper = sqlSession.getMapper(AnnotatedMapper.class);
int rows = mapper.insertTable2WithSelectKeyWithKeyObject(name);
assertEquals(1, rows);
assertEquals(22, name.getNameId());
assertEquals("barney_fred", name.getGeneratedName());
name.setName("Wilma");
rows = mapper.updateTable2WithSelectKeyWithKeyObject(name);
assertEquals(1, rows);
assertEquals(22, name.getNameId());
assertEquals("Wilma_fred", name.getGeneratedName());
}
}
@Test
void annotatedUpdateTable2WithSelectKeyWithKeyObjectXml() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Name name = new Name();
name.setName("barney");
AnnotatedMapper mapper = sqlSession.getMapper(AnnotatedMapper.class);
int rows = mapper.insertTable2WithSelectKeyWithKeyObjectXml(name);
assertEquals(1, rows);
assertEquals(22, name.getNameId());
assertEquals("barney_fred", name.getGeneratedName());
name.setName("Wilma");
rows = mapper.updateTable2WithSelectKeyWithKeyObjectXml(name);
assertEquals(1, rows);
assertEquals(22, name.getNameId());
assertEquals("Wilma_fred", name.getGeneratedName());
}
}
@Test
void annotatedInsertTable2WithSelectKeyWithKeyObjectXml() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Name name = new Name();
name.setName("barney");
AnnotatedMapper mapper = sqlSession.getMapper(AnnotatedMapper.class);
int rows = mapper.insertTable2WithSelectKeyWithKeyObjectXml(name);
assertEquals(1, rows);
assertEquals(22, name.getNameId());
assertEquals("barney_fred", name.getGeneratedName());
}
}
@Test
void annotatedInsertTable3() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Name name = new Name();
name.setName("barney");
AnnotatedMapper mapper = sqlSession.getMapper(AnnotatedMapper.class);
int rows = mapper.insertTable3(name);
assertEquals(1, rows);
assertEquals(33, name.getNameId());
}
}
@Test
void annotatedInsertTable32() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Name name = new Name();
name.setName("barney");
AnnotatedMapper mapper = sqlSession.getMapper(AnnotatedMapper.class);
int rows = mapper.insertTable3_2(name);
assertEquals(1, rows);
assertEquals(33, name.getNameId());
}
}
@Test
void seleckKeyWithWrongKeyProperty() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Name name = new Name();
name.setName("Kyoto");
Assertions.assertThrows(PersistenceException.class,
() -> sqlSession.insert("org.apache.ibatis.submitted.selectkey.Table2.insertWrongKeyProperty", name));
}
}
}
| SelectKeyTest |
java | grpc__grpc-java | api/src/main/java/io/grpc/MethodDescriptor.java | {
"start": 5941,
"end": 15224
} | interface ____<T> extends ReflectableMarshaller<T> {
/**
* An instance of the expected message type, typically used as a schema and helper for producing
* other message instances. The {@code null} value may be a special value for the marshaller
* (like the equivalent of {@link Void}), so it is a valid return value. {@code null} does
* <em>not</em> mean "unsupported" or "unknown".
*
* <p>It is generally expected this would return the same instance each invocation, but it is
* not a requirement.
*/
@Nullable
public T getMessagePrototype();
}
/**
* Creates a new {@code MethodDescriptor}.
*
* @param type the call type of this method
* @param fullMethodName the fully qualified name of this method
* @param requestMarshaller the marshaller used to encode and decode requests
* @param responseMarshaller the marshaller used to encode and decode responses
* @since 1.0.0
* @deprecated use {@link #newBuilder()}.
*/
@Deprecated
public static <RequestT, ResponseT> MethodDescriptor<RequestT, ResponseT> create(
MethodType type, String fullMethodName,
Marshaller<RequestT> requestMarshaller,
Marshaller<ResponseT> responseMarshaller) {
return new MethodDescriptor<>(
type, fullMethodName, requestMarshaller, responseMarshaller, null, false, false, false);
}
private MethodDescriptor(
MethodType type,
String fullMethodName,
Marshaller<ReqT> requestMarshaller,
Marshaller<RespT> responseMarshaller,
Object schemaDescriptor,
boolean idempotent,
boolean safe,
boolean sampledToLocalTracing) {
assert !safe || idempotent : "safe should imply idempotent";
this.type = Preconditions.checkNotNull(type, "type");
this.fullMethodName = Preconditions.checkNotNull(fullMethodName, "fullMethodName");
this.serviceName = extractFullServiceName(fullMethodName);
this.requestMarshaller = Preconditions.checkNotNull(requestMarshaller, "requestMarshaller");
this.responseMarshaller = Preconditions.checkNotNull(responseMarshaller, "responseMarshaller");
this.schemaDescriptor = schemaDescriptor;
this.idempotent = idempotent;
this.safe = safe;
this.sampledToLocalTracing = sampledToLocalTracing;
}
/**
* The call type of the method.
*
* @since 1.0.0
*/
public MethodType getType() {
return type;
}
/**
* The fully qualified name of the method.
*
* @since 1.0.0
*/
public String getFullMethodName() {
return fullMethodName;
}
/**
* A convenience method for {@code extractFullServiceName(getFullMethodName())}.
*
* @since 1.21.0
*/
@Nullable
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/5635")
public String getServiceName() {
return serviceName;
}
/**
* A convenience method for {@code extractBareMethodName(getFullMethodName())}.
*
* @since 1.33.0
*/
@Nullable
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/5635")
public String getBareMethodName() {
return extractBareMethodName(fullMethodName);
}
/**
* Parse a response payload from the given {@link InputStream}.
*
* @param input stream containing response message to parse.
* @return parsed response message object.
* @since 1.0.0
*/
public RespT parseResponse(InputStream input) {
return responseMarshaller.parse(input);
}
/**
* Convert a request message to an {@link InputStream}.
* The returned InputStream should be closed by the caller.
*
* @param requestMessage to serialize using the request {@link Marshaller}.
* @return serialized request message.
* @since 1.0.0
*/
public InputStream streamRequest(ReqT requestMessage) {
return requestMarshaller.stream(requestMessage);
}
/**
* Parse an incoming request message.
*
* @param input the serialized message as a byte stream.
* @return a parsed instance of the message.
* @since 1.0.0
*/
public ReqT parseRequest(InputStream input) {
return requestMarshaller.parse(input);
}
/**
* Serialize an outgoing response message.
* The returned InputStream should be closed by the caller.
*
* @param response the response message to serialize.
* @return the serialized message as a byte stream.
* @since 1.0.0
*/
public InputStream streamResponse(RespT response) {
return responseMarshaller.stream(response);
}
/**
* Returns the marshaller for the request type. Allows introspection of the request marshaller.
*
* @since 1.1.0
*/
public Marshaller<ReqT> getRequestMarshaller() {
return requestMarshaller;
}
/**
* Returns the marshaller for the response type. Allows introspection of the response marshaller.
*
* @since 1.1.0
*/
public Marshaller<RespT> getResponseMarshaller() {
return responseMarshaller;
}
/**
* Returns the schema descriptor for this method. A schema descriptor is an object that is not
* used by gRPC core but includes information related to the service method. The type of the
* object is specific to the consumer, so both the code setting the schema descriptor and the code
* calling {@link #getSchemaDescriptor()} must coordinate. For example, protobuf generated code
* sets this value, in order to be consumed by the server reflection service. See also:
* {@code io.grpc.protobuf.ProtoMethodDescriptorSupplier}.
*
* @since 1.7.0
*/
public @Nullable Object getSchemaDescriptor() {
return schemaDescriptor;
}
/**
* Returns whether this method is idempotent.
*
* @since 1.0.0
*/
public boolean isIdempotent() {
return idempotent;
}
/**
* Returns whether this method is safe.
*
* <p>A safe request does nothing except retrieval so it has no side effects on the server side.
*
* @since 1.1.0
*/
public boolean isSafe() {
return safe;
}
/**
* Returns whether RPCs for this method may be sampled into the local tracing store.
*/
public boolean isSampledToLocalTracing() {
return sampledToLocalTracing;
}
/**
* Generate the fully qualified method name. This matches the name
*
* @param fullServiceName the fully qualified service name that is prefixed with the package name
* @param methodName the short method name
* @since 1.0.0
*/
public static String generateFullMethodName(String fullServiceName, String methodName) {
return checkNotNull(fullServiceName, "fullServiceName")
+ "/"
+ checkNotNull(methodName, "methodName");
}
/**
* Extract the fully qualified service name out of a fully qualified method name. May return
* {@code null} if the input is malformed, but you cannot rely on it for the validity of the
* input.
*
* @since 1.0.0
*/
@Nullable
public static String extractFullServiceName(String fullMethodName) {
int index = checkNotNull(fullMethodName, "fullMethodName").lastIndexOf('/');
if (index == -1) {
return null;
}
return fullMethodName.substring(0, index);
}
/**
* Extract the method name out of a fully qualified method name. May return {@code null}
* if the input is malformed, but you cannot rely on it for the validity of the input.
*
* @since 1.33.0
*/
@Nullable
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/5635")
public static String extractBareMethodName(String fullMethodName) {
int index = checkNotNull(fullMethodName, "fullMethodName").lastIndexOf('/');
if (index == -1) {
return null;
}
return fullMethodName.substring(index + 1);
}
/**
* Creates a new builder for a {@link MethodDescriptor}.
*
* @since 1.1.0
*/
@CheckReturnValue
public static <ReqT, RespT> Builder<ReqT, RespT> newBuilder() {
return newBuilder(null, null);
}
/**
* Creates a new builder for a {@link MethodDescriptor}.
*
* @since 1.1.0
*/
@CheckReturnValue
public static <ReqT, RespT> Builder<ReqT, RespT> newBuilder(
Marshaller<ReqT> requestMarshaller, Marshaller<RespT> responseMarshaller) {
return new Builder<ReqT, RespT>()
.setRequestMarshaller(requestMarshaller)
.setResponseMarshaller(responseMarshaller);
}
/**
* Turns this descriptor into a builder.
*
* @since 1.1.0
*/
@CheckReturnValue
public Builder<ReqT, RespT> toBuilder() {
return toBuilder(requestMarshaller, responseMarshaller);
}
/**
* Turns this descriptor into a builder, replacing the request and response marshallers.
*
* @since 1.1.0
*/
@CheckReturnValue
public <NewReqT, NewRespT> Builder<NewReqT, NewRespT> toBuilder(
Marshaller<NewReqT> requestMarshaller, Marshaller<NewRespT> responseMarshaller) {
return MethodDescriptor.<NewReqT, NewRespT>newBuilder()
.setRequestMarshaller(requestMarshaller)
.setResponseMarshaller(responseMarshaller)
.setType(type)
.setFullMethodName(fullMethodName)
.setIdempotent(idempotent)
.setSafe(safe)
.setSampledToLocalTracing(sampledToLocalTracing)
.setSchemaDescriptor(schemaDescriptor);
}
/**
* A builder for a {@link MethodDescriptor}.
*
* @since 1.1.0
*/
public static final | PrototypeMarshaller |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/audit/impl/ActiveAuditManagerS3A.java | {
"start": 23098,
"end": 29721
} | class ____ extends AbstractAuditSpanImpl {
/**
* Inner span.
*/
private final AuditSpanS3A span;
/**
* Is this span considered valid?
*/
private final boolean isValid;
/**
* Create, wrapped.
* The spanID, name, timestamp etc copied from the span being wrapped.
* Why not the isValid state? We want to set our unbonded span without
* relying on the auditor doing the right thing.
* @param span inner span.
* @param isValid is the span valid
*/
private WrappingAuditSpan(
final AuditSpanS3A span, final boolean isValid) {
super(span.getSpanId(), span.getTimestamp(), span.getOperationName());
this.span = requireNonNull(span);
this.isValid = isValid;
}
/**
* Is the span active?
* @return true if this span is the active one for the current thread.
*/
private boolean isActive() {
return this == getActiveAuditSpan();
}
/**
* Makes this the thread's active span and activate.
* If the span was already active: no-op.
*/
@Override
public AuditSpanS3A activate() {
if (!isActive()) {
switchToActiveSpan(this);
span.activate();
}
return this;
}
/**
* Switch to the unbounded span and then deactivate this span.
* No-op for invalid spans,
* so as to prevent the unbounded span from being closed
* and everything getting very confused.
*/
@Override
public void deactivate() {
// span is inactive; ignore
if (!isActive()) {
return;
}
// skipped for invalid spans,
// so as to prevent the unbounded span from being closed
// and everything getting very confused.
if (isValid) {
// deactivate the span
span.deactivate();
}
// remove the span from the reference map,
// sporadically triggering a prune operation.
removeActiveSpanFromMap();
}
/**
* Forward to the wrapped span.
* {@inheritDoc}
*/
@Override
public void requestCreated(final SdkRequest.Builder builder) {
span.requestCreated(builder);
}
/**
* This span is valid if the span isn't closed and the inner
* span is valid.
* @return true if the span is considered valid.
*/
@Override
public boolean isValidSpan() {
return isValid && span.isValidSpan();
}
/**
* Get the inner span.
* @return the span.
*/
@VisibleForTesting
AuditSpanS3A getSpan() {
return span;
}
/**
* Forward to the inner span.
* {@inheritDoc}
*/
@Override
public void set(final String key, final String value) {
span.set(key, value);
}
/**
* Forward to the inner span.
* {@inheritDoc}
*/
@Override
public void beforeExecution(Context.BeforeExecution context,
ExecutionAttributes executionAttributes) {
span.beforeExecution(context, executionAttributes);
}
/**
* Forward to the inner span.
* {@inheritDoc}
*/
@Override
public void afterExecution(Context.AfterExecution context,
ExecutionAttributes executionAttributes) {
span.afterExecution(context, executionAttributes);
}
/**
* Forward to the inner span.
* {@inheritDoc}
*/
@Override
public void onExecutionFailure(Context.FailedExecution context,
ExecutionAttributes executionAttributes) {
span.onExecutionFailure(context, executionAttributes);
}
/**
* Forward to the inner span.
* {@inheritDoc}
*/
@Override
public void beforeMarshalling(Context.BeforeMarshalling context,
ExecutionAttributes executionAttributes) {
span.beforeMarshalling(context, executionAttributes);
}
/**
* Forward to the inner span.
* {@inheritDoc}
*/
@Override
public SdkRequest modifyRequest(Context.ModifyRequest context,
ExecutionAttributes executionAttributes) {
return span.modifyRequest(context, executionAttributes);
}
/**
* Forward to the inner span.
* {@inheritDoc}
*/
@Override
public void afterMarshalling(Context.AfterMarshalling context,
ExecutionAttributes executionAttributes) {
span.afterMarshalling(context, executionAttributes);
}
/**
* Forward to the inner span.
* {@inheritDoc}
*/
@Override
public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context,
ExecutionAttributes executionAttributes) {
return span.modifyHttpRequest(context, executionAttributes);
}
/**
* Forward to the inner span.
* {@inheritDoc}
*/
@Override
public void beforeTransmission(Context.BeforeTransmission context,
ExecutionAttributes executionAttributes) {
span.beforeTransmission(context, executionAttributes);
}
/**
* Forward to the inner span.
* {@inheritDoc}
*/
@Override
public void afterTransmission(Context.AfterTransmission context,
ExecutionAttributes executionAttributes) {
span.afterTransmission(context, executionAttributes);
}
/**
* Forward to the inner span.
* {@inheritDoc}
*/
@Override
public SdkHttpResponse modifyHttpResponse(Context.ModifyHttpResponse context,
ExecutionAttributes executionAttributes) {
return span.modifyHttpResponse(context, executionAttributes);
}
/**
* Forward to the inner span.
* {@inheritDoc}
*/
@Override
public void beforeUnmarshalling(Context.BeforeUnmarshalling context,
ExecutionAttributes executionAttributes) {
span.beforeUnmarshalling(context, executionAttributes);
}
/**
* Forward to the inner span.
* {@inheritDoc}
*/
@Override
public void afterUnmarshalling(Context.AfterUnmarshalling context,
ExecutionAttributes executionAttributes) {
span.afterUnmarshalling(context, executionAttributes);
}
/**
* Forward to the inner span.
* {@inheritDoc}
*/
@Override
public SdkResponse modifyResponse(Context.ModifyResponse context,
ExecutionAttributes executionAttributes) {
return span.modifyResponse(context, executionAttributes);
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder(
"WrappingAuditSpan{");
sb.append("span=").append(span);
sb.append(", valid=").append(isValidSpan());
sb.append('}');
return sb.toString();
}
}
}
| WrappingAuditSpan |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/sql/exec/internal/JdbcOperationQueryUpdate.java | {
"start": 479,
"end": 863
} | class ____
extends AbstractJdbcOperationQuery
implements JdbcOperationQueryMutation {
public JdbcOperationQueryUpdate(
String sql,
List<JdbcParameterBinder> parameterBinders,
Set<String> affectedTableNames,
Map<JdbcParameter, JdbcParameterBinding> appliedParameters) {
super( sql, parameterBinders, affectedTableNames, appliedParameters );
}
}
| JdbcOperationQueryUpdate |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/context/annotation/ConfigurationClass.java | {
"start": 10154,
"end": 10623
} | class ____ extends Problem {
BeanMethodOverloadingProblem(String methodName) {
super(String.format("@Configuration class '%s' contains overloaded @Bean methods with name '%s'. Use " +
"unique method names for separate bean definitions (with individual conditions etc) " +
"or switch '@Configuration.enforceUniqueMethods' to 'false'.",
getSimpleName(), methodName), new Location(getResource(), getMetadata()));
}
}
}
| BeanMethodOverloadingProblem |
java | square__retrofit | retrofit-converters/moshi/src/main/java/retrofit2/converter/moshi/MoshiStreamingRequestBody.java | {
"start": 870,
"end": 1308
} | class ____<T> extends RequestBody {
private final JsonAdapter<T> adapter;
private final T value;
public MoshiStreamingRequestBody(JsonAdapter<T> adapter, T value) {
this.adapter = adapter;
this.value = value;
}
@Override
public MediaType contentType() {
return MEDIA_TYPE;
}
@Override
public void writeTo(BufferedSink sink) throws IOException {
adapter.toJson(sink, value);
}
}
| MoshiStreamingRequestBody |
java | spring-projects__spring-boot | integration-test/spring-boot-test-integration-tests/src/test/java/org/springframework/boot/web/server/test/SpringBootTestReactiveWebEnvironmentDefinedPortTests.java | {
"start": 1624,
"end": 1669
} | class ____ extends AbstractConfig {
}
}
| Config |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/support/EntityManagerBeanDefinitionRegistrarPostProcessorUnitTests.java | {
"start": 3535,
"end": 3614
} | interface ____ extends EntityManagerFactory {}
static | SpecialEntityManagerFactory |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/select/MySqlSelectTest_169_not_between.java | {
"start": 315,
"end": 1326
} | class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "select * from t where (a && b) not between 1 and 2;";
//
List<SQLStatement> statementList = SQLUtils.parseStatements(sql, JdbcConstants.MYSQL);
SQLSelectStatement stmt = (SQLSelectStatement) statementList.get(0);
assertEquals(1, statementList.size());
assertEquals("SELECT *\n" +
"FROM t\n" +
"WHERE (a\n" +
"AND b) NOT BETWEEN 1 AND 2;", stmt.toString());
}
public void test_1() throws Exception {
String sql = "select (a && b) not between 1 and 2;";
//
List<SQLStatement> statementList = SQLUtils.parseStatements(sql, JdbcConstants.MYSQL);
SQLSelectStatement stmt = (SQLSelectStatement) statementList.get(0);
assertEquals(1, statementList.size());
assertEquals("SELECT (a\n" +
"\tAND b) NOT BETWEEN 1 AND 2;", stmt.toString());
}
}
| MySqlSelectTest_169_not_between |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java | {
"start": 761,
"end": 6432
} | interface ____ extends Vector permits ConstantBooleanVector, BooleanArrayVector, BooleanBigArrayVector,
ConstantNullVector {
boolean getBoolean(int position);
@Override
BooleanBlock asBlock();
@Override
BooleanVector filter(int... positions);
@Override
BooleanBlock keepMask(BooleanVector mask);
/**
* Make a deep copy of this {@link Vector} using the provided {@link BlockFactory},
* likely copying all data.
*/
@Override
default BooleanVector deepCopy(BlockFactory blockFactory) {
try (BooleanBlock.Builder builder = blockFactory.newBooleanBlockBuilder(getPositionCount())) {
builder.copyFrom(asBlock(), 0, getPositionCount());
builder.mvOrdering(Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING);
return builder.build().asVector();
}
}
@Override
ReleasableIterator<? extends BooleanBlock> lookup(IntBlock positions, ByteSizeValue targetBlockSize);
/**
* Are all values {@code true}? This will scan all values to check and always answer accurately.
*/
boolean allTrue();
/**
* Are all values {@code false}? This will scan all values to check and always answer accurately.
*/
boolean allFalse();
/**
* Compares the given object with this vector for equality. Returns {@code true} if and only if the
* given object is a BooleanVector, and both vectors are {@link #equals(BooleanVector, BooleanVector) equal}.
*/
@Override
boolean equals(Object obj);
/** Returns the hash code of this vector, as defined by {@link #hash(BooleanVector)}. */
@Override
int hashCode();
/**
* Returns {@code true} if the given vectors are equal to each other, otherwise {@code false}.
* Two vectors are considered equal if they have the same position count, and contain the same
* values in the same order. This definition ensures that the equals method works properly
* across different implementations of the BooleanVector interface.
*/
static boolean equals(BooleanVector vector1, BooleanVector vector2) {
final int positions = vector1.getPositionCount();
if (positions != vector2.getPositionCount()) {
return false;
}
for (int pos = 0; pos < positions; pos++) {
if (vector1.getBoolean(pos) != vector2.getBoolean(pos)) {
return false;
}
}
return true;
}
/**
* Generates the hash code for the given vector. The hash code is computed from the vector's values.
* This ensures that {@code vector1.equals(vector2)} implies that {@code vector1.hashCode()==vector2.hashCode()}
* for any two vectors, {@code vector1} and {@code vector2}, as required by the general contract of
* {@link Object#hashCode}.
*/
static int hash(BooleanVector vector) {
final int len = vector.getPositionCount();
int result = 1;
for (int pos = 0; pos < len; pos++) {
result = 31 * result + Boolean.hashCode(vector.getBoolean(pos));
}
return result;
}
/** Deserializes a Vector from the given stream input. */
static BooleanVector readFrom(BlockFactory blockFactory, StreamInput in) throws IOException {
final int positions = in.readVInt();
final byte serializationType = in.readByte();
return switch (serializationType) {
case SERIALIZE_VECTOR_VALUES -> readValues(positions, in, blockFactory);
case SERIALIZE_VECTOR_CONSTANT -> blockFactory.newConstantBooleanVector(in.readBoolean(), positions);
case SERIALIZE_VECTOR_ARRAY -> BooleanArrayVector.readArrayVector(positions, in, blockFactory);
case SERIALIZE_VECTOR_BIG_ARRAY -> BooleanBigArrayVector.readArrayVector(positions, in, blockFactory);
default -> {
assert false : "invalid vector serialization type [" + serializationType + "]";
throw new IllegalStateException("invalid vector serialization type [" + serializationType + "]");
}
};
}
/** Serializes this Vector to the given stream output. */
default void writeTo(StreamOutput out) throws IOException {
final int positions = getPositionCount();
final var version = out.getTransportVersion();
out.writeVInt(positions);
if (isConstant() && positions > 0) {
out.writeByte(SERIALIZE_VECTOR_CONSTANT);
out.writeBoolean(getBoolean(0));
} else if (this instanceof BooleanArrayVector v) {
out.writeByte(SERIALIZE_VECTOR_ARRAY);
v.writeArrayVector(positions, out);
} else if (this instanceof BooleanBigArrayVector v) {
out.writeByte(SERIALIZE_VECTOR_BIG_ARRAY);
v.writeArrayVector(positions, out);
} else {
out.writeByte(SERIALIZE_VECTOR_VALUES);
writeValues(this, positions, out);
}
}
private static BooleanVector readValues(int positions, StreamInput in, BlockFactory blockFactory) throws IOException {
try (var builder = blockFactory.newBooleanVectorFixedBuilder(positions)) {
for (int i = 0; i < positions; i++) {
builder.appendBoolean(i, in.readBoolean());
}
return builder.build();
}
}
private static void writeValues(BooleanVector v, int positions, StreamOutput out) throws IOException {
for (int i = 0; i < positions; i++) {
out.writeBoolean(v.getBoolean(i));
}
}
/**
* A builder that grows as needed.
*/
sealed | BooleanVector |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/AbstractLaunchableService.java | {
"start": 1348,
"end": 2396
} | class ____ extends AbstractService
implements LaunchableService {
private static final Logger LOG =
LoggerFactory.getLogger(AbstractLaunchableService.class);
/**
* Construct an instance with the given name.
*
* @param name input name.
*/
protected AbstractLaunchableService(String name) {
super(name);
}
/**
* {@inheritDoc}
* <p>
* The base implementation logs all arguments at the debug level,
* then returns the passed in config unchanged.
*/
@Override
public Configuration bindArgs(Configuration config, List<String> args) throws
Exception {
if (LOG.isDebugEnabled()) {
LOG.debug("Service {} passed in {} arguments:", getName(), args.size());
for (String arg : args) {
LOG.debug(arg);
}
}
return config;
}
/**
* {@inheritDoc}
* <p>
* The action is to signal success by returning the exit code 0.
*/
@Override
public int execute() throws Exception {
return LauncherExitCodes.EXIT_SUCCESS;
}
}
| AbstractLaunchableService |
java | apache__logging-log4j2 | log4j-core-test/src/test/java/org/apache/logging/log4j/core/PatternResolverDoesNotEvaluateThreadContextTest.java | {
"start": 1250,
"end": 4997
} | class ____ {
private static final String CONFIG = "log4j2-pattern-layout-with-context.xml";
private static final String PARAMETER = "user";
private ListAppender listAppender;
@ClassRule
public static LoggerContextRule context = new LoggerContextRule(CONFIG);
@Before
public void before() {
listAppender = context.getRequiredAppender("list", ListAppender.class);
listAppender.clear();
}
@Test
public void testNoUserSet() {
final Logger logger = context.getLogger(getClass());
logger.info("This is a test");
final List<String> messages = listAppender.getMessages();
assertTrue(messages != null && !messages.isEmpty(), "No messages returned");
final String message = messages.get(0);
assertEquals(
"INFO org.apache.logging.log4j.core."
+ "PatternResolverDoesNotEvaluateThreadContextTest ${ctx:user} This is a test",
message);
}
@Test
public void testMessageIsNotLookedUp() {
final Logger logger = context.getLogger(getClass());
logger.info("This is a ${upper:test}");
final List<String> messages = listAppender.getMessages();
assertTrue(messages != null && !messages.isEmpty(), "No messages returned");
final String message = messages.get(0);
assertEquals(
"INFO org.apache.logging.log4j.core."
+ "PatternResolverDoesNotEvaluateThreadContextTest ${ctx:user} This is a ${upper:test}",
message);
}
@Test
public void testUser() {
final Logger logger = context.getLogger(getClass());
ThreadContext.put(PARAMETER, "123");
try {
logger.info("This is a test");
} finally {
ThreadContext.remove(PARAMETER);
}
final List<String> messages = listAppender.getMessages();
assertTrue(messages != null && !messages.isEmpty(), "No messages returned");
final String message = messages.get(0);
assertEquals(
"INFO org.apache.logging.log4j.core."
+ "PatternResolverDoesNotEvaluateThreadContextTest 123 This is a test",
message);
}
@Test
public void testUserIsLookup() {
final Logger logger = context.getLogger(getClass());
ThreadContext.put(PARAMETER, "${java:version}");
try {
logger.info("This is a test");
} finally {
ThreadContext.remove(PARAMETER);
}
final List<String> messages = listAppender.getMessages();
assertTrue(messages != null && !messages.isEmpty(), "No messages returned");
final String message = messages.get(0);
assertEquals(
"INFO org.apache.logging.log4j.core."
+ "PatternResolverDoesNotEvaluateThreadContextTest ${java:version} This is a test",
message);
}
@Test
public void testUserHasLookup() {
final Logger logger = context.getLogger(getClass());
ThreadContext.put(PARAMETER, "user${java:version}name");
try {
logger.info("This is a test");
} finally {
ThreadContext.remove(PARAMETER);
}
final List<String> messages = listAppender.getMessages();
assertTrue(messages != null && !messages.isEmpty(), "No messages returned");
final String message = messages.get(0);
assertEquals(
"INFO org.apache.logging.log4j.core."
+ "PatternResolverDoesNotEvaluateThreadContextTest user${java:version}name This is a test",
message);
}
}
| PatternResolverDoesNotEvaluateThreadContextTest |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/BiFunctionRaisingIOE.java | {
"start": 1098,
"end": 1646
} | interface ____<T, U, R> {
/**
* Apply the function.
* @param t argument 1
* @param u argument 2
* @return result
* @throws IOException Any IO failure
*/
R apply(T t, U u) throws IOException;
/**
* Apply unchecked.
* @param t argument
* @param u argument 2
* @return the evaluated function
* @throws UncheckedIOException IOE raised.
*/
default R unchecked(T t, U u) {
try {
return apply(t, u);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
| BiFunctionRaisingIOE |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/util/unit/DataUnit.java | {
"start": 954,
"end": 1633
} | class ____ corresponding values.
*
* <p>
* <table border="1">
* <tr><th>Constant</th><th>Data Size</th><th>Power of 2</th><th>Size in Bytes</th></tr>
* <tr><td>{@link #BYTES}</td><td>1B</td><td>2^0</td><td>1</td></tr>
* <tr><td>{@link #KILOBYTES}</td><td>1KB</td><td>2^10</td><td>1,024</td></tr>
* <tr><td>{@link #MEGABYTES}</td><td>1MB</td><td>2^20</td><td>1,048,576</td></tr>
* <tr><td>{@link #GIGABYTES}</td><td>1GB</td><td>2^30</td><td>1,073,741,824</td></tr>
* <tr><td>{@link #TERABYTES}</td><td>1TB</td><td>2^40</td><td>1,099,511,627,776</td></tr>
* </table>
*
* @author Stephane Nicoll
* @author Sam Brannen
* @since 5.1
* @see DataSize
*/
public | and |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillTaskAttemptRequestPBImpl.java | {
"start": 1439,
"end": 3516
} | class ____ extends ProtoBase<KillTaskAttemptRequestProto> implements KillTaskAttemptRequest {
KillTaskAttemptRequestProto proto = KillTaskAttemptRequestProto.getDefaultInstance();
KillTaskAttemptRequestProto.Builder builder = null;
boolean viaProto = false;
private TaskAttemptId taskAttemptId = null;
public KillTaskAttemptRequestPBImpl() {
builder = KillTaskAttemptRequestProto.newBuilder();
}
public KillTaskAttemptRequestPBImpl(KillTaskAttemptRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public KillTaskAttemptRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.taskAttemptId != null) {
builder.setTaskAttemptId(convertToProtoFormat(this.taskAttemptId));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = KillTaskAttemptRequestProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public TaskAttemptId getTaskAttemptId() {
KillTaskAttemptRequestProtoOrBuilder p = viaProto ? proto : builder;
if (this.taskAttemptId != null) {
return this.taskAttemptId;
}
if (!p.hasTaskAttemptId()) {
return null;
}
this.taskAttemptId = convertFromProtoFormat(p.getTaskAttemptId());
return this.taskAttemptId;
}
@Override
public void setTaskAttemptId(TaskAttemptId taskAttemptId) {
maybeInitBuilder();
if (taskAttemptId == null)
builder.clearTaskAttemptId();
this.taskAttemptId = taskAttemptId;
}
private TaskAttemptIdPBImpl convertFromProtoFormat(TaskAttemptIdProto p) {
return new TaskAttemptIdPBImpl(p);
}
private TaskAttemptIdProto convertToProtoFormat(TaskAttemptId t) {
return ((TaskAttemptIdPBImpl)t).getProto();
}
}
| KillTaskAttemptRequestPBImpl |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/function/array/HSQLArrayFillFunction.java | {
"start": 504,
"end": 1192
} | class ____ extends AbstractArrayFillFunction {
public HSQLArrayFillFunction(boolean list) {
super( list );
}
@Override
public void render(
SqlAppender sqlAppender,
List<? extends SqlAstNode> sqlAstArguments,
ReturnableType<?> returnType,
SqlAstTranslator<?> walker) {
sqlAppender.append( "coalesce(case when " );
sqlAstArguments.get( 1 ).accept( walker );
sqlAppender.append( "<>0 then (select array_agg(" );
walker.render( sqlAstArguments.get( 0 ), SqlAstNodeRenderingMode.NO_UNTYPED );
sqlAppender.append( ") from unnest(sequence_array(1," );
sqlAstArguments.get( 1 ).accept( walker );
sqlAppender.append( ",1))) end,array[])" );
}
}
| HSQLArrayFillFunction |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java | {
"start": 2675,
"end": 7321
} | class ____<Request extends MasterNodeRequest<Request>, Response extends ActionResponse> extends
HandledTransportAction<Request, Response>
implements
ActionWithReservedState<Request> {
private static final Logger logger = LogManager.getLogger(TransportMasterNodeAction.class);
protected final ThreadPool threadPool;
protected final TransportService transportService;
protected final ClusterService clusterService;
private final Writeable.Reader<Response> responseReader;
protected final Executor executor;
protected TransportMasterNodeAction(
String actionName,
TransportService transportService,
ClusterService clusterService,
ThreadPool threadPool,
ActionFilters actionFilters,
Writeable.Reader<Request> request,
Writeable.Reader<Response> response,
Executor executor
) {
this(actionName, true, transportService, clusterService, threadPool, actionFilters, request, response, executor);
}
protected TransportMasterNodeAction(
String actionName,
boolean canTripCircuitBreaker,
TransportService transportService,
ClusterService clusterService,
ThreadPool threadPool,
ActionFilters actionFilters,
Writeable.Reader<Request> request,
Writeable.Reader<Response> response,
Executor executor
) {
super(actionName, canTripCircuitBreaker, transportService, actionFilters, request, EsExecutors.DIRECT_EXECUTOR_SERVICE);
this.transportService = transportService;
this.clusterService = clusterService;
this.threadPool = threadPool;
this.executor = executor;
this.responseReader = response;
}
protected abstract void masterOperation(Task task, Request request, ClusterState state, ActionListener<Response> listener)
throws Exception;
private void executeMasterOperation(Task task, Request request, ClusterState state, ActionListener<Response> listener)
throws Exception {
if (task instanceof CancellableTask cancellableTask && cancellableTask.isCancelled()) {
throw new TaskCancelledException("Task was cancelled");
}
masterOperation(task, request, state, listener);
}
protected boolean localExecute(Request request) {
return false;
}
protected abstract ClusterBlockException checkBlock(Request request, ClusterState state);
private ClusterBlockException checkBlockIfStateRecovered(Request request, ClusterState state) {
try {
return checkBlock(request, state);
} catch (IndexNotFoundException e) {
if (state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) {
// no index metadata is exposed yet, but checkBlock depends on an index, so keep trying until the cluster forms
assert GatewayService.STATE_NOT_RECOVERED_BLOCK.contains(ClusterBlockLevel.METADATA_READ);
assert state.blocks().global(ClusterBlockLevel.METADATA_READ).stream().allMatch(ClusterBlock::retryable);
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ);
} else {
throw e;
}
}
}
@FixForMultiProject // this is overridden for project-specific reserved metadata checks. A common subclass needs to exist for this.
protected void validateForReservedState(Request request, ClusterState state) {
Optional<String> handlerName = reservedStateHandlerName();
assert handlerName.isPresent();
validateForReservedState(
state.metadata().reservedStateMetadata().values(),
handlerName.get(),
modifiedKeys(request),
request::toString
);
}
// package private for testing
boolean supportsReservedState() {
return reservedStateHandlerName().isPresent();
}
@Override
protected void doExecute(Task task, final Request request, ActionListener<Response> listener) {
ClusterState state = clusterService.state();
if (supportsReservedState()) {
validateForReservedState(request, state);
}
logger.trace("starting processing request [{}] with cluster state version [{}]", request, state.version());
if (task != null) {
request.setParentTask(clusterService.localNode().getId(), task.getId());
}
request.mustIncRef();
new AsyncSingleAction(task, request, ActionListener.runBefore(listener, request::decRef)).doStart(state);
}
| TransportMasterNodeAction |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/hql/HqlParserMemoryUsageTest.java | {
"start": 3961,
"end": 4208
} | class ____ {
@Id
private Long id;
@ManyToOne(fetch = FetchType.LAZY)
private AppUser user;
@OneToMany(mappedBy = "order")
private Set<OrderItem> orderItems;
}
@Entity(name = "OrderItem")
@Table(name = "order_items")
public static | Order |
java | netty__netty | codec-http2/src/test/java/io/netty/handler/codec/http2/Http2FrameCodecTest.java | {
"start": 3828,
"end": 16426
} | class ____ {
// For verifying outbound frames
private Http2FrameWriter frameWriter;
private Http2FrameCodec frameCodec;
private EmbeddedChannel channel;
// For injecting inbound frames
private Http2FrameInboundWriter frameInboundWriter;
private LastInboundHandler inboundHandler;
private final Http2Headers request = new DefaultHttp2Headers()
.method(HttpMethod.GET.asciiName()).scheme(HttpScheme.HTTPS.name())
.authority(new AsciiString("example.org")).path(new AsciiString("/foo"));
private final Http2Headers response = new DefaultHttp2Headers()
.status(HttpResponseStatus.OK.codeAsText());
@BeforeEach
public void setUp() throws Exception {
setUp(Http2FrameCodecBuilder.forServer(), new Http2Settings());
}
@AfterEach
public void tearDown() throws Exception {
if (inboundHandler != null) {
inboundHandler.finishAndReleaseAll();
inboundHandler = null;
}
if (channel != null) {
channel.finishAndReleaseAll();
channel.close();
channel = null;
}
}
private void setUp(Http2FrameCodecBuilder frameCodecBuilder, Http2Settings initialRemoteSettings) throws Exception {
setUp(frameCodecBuilder, initialRemoteSettings, true);
}
private void setUp(Http2FrameCodecBuilder frameCodecBuilder, Http2Settings initialRemoteSettings, boolean preface)
throws Exception {
/*
* Some tests call this method twice. Once with JUnit's @Before and once directly to pass special settings.
* This call ensures that in case of two consecutive calls to setUp(), the previous channel is shutdown and
* ByteBufs are released correctly.
*/
tearDown();
frameWriter = Http2TestUtil.mockedFrameWriter();
frameCodec = frameCodecBuilder.frameWriter(frameWriter).frameLogger(new Http2FrameLogger(LogLevel.TRACE))
.initialSettings(initialRemoteSettings).build();
inboundHandler = new LastInboundHandler();
channel = new EmbeddedChannel();
frameInboundWriter = new Http2FrameInboundWriter(channel);
channel.connect(new InetSocketAddress(0));
channel.pipeline().addLast(frameCodec);
channel.pipeline().addLast(inboundHandler);
channel.pipeline().fireChannelActive();
// Handshake
verify(frameWriter).writeSettings(eqFrameCodecCtx(), anyHttp2Settings(), anyChannelPromise());
verifyNoMoreInteractions(frameWriter);
if (preface) {
channel.writeInbound(Http2CodecUtil.connectionPrefaceBuf());
frameInboundWriter.writeInboundSettings(initialRemoteSettings);
verify(frameWriter).writeSettingsAck(eqFrameCodecCtx(), anyChannelPromise());
frameInboundWriter.writeInboundSettingsAck();
Http2SettingsFrame settingsFrame = inboundHandler.readInbound();
assertNotNull(settingsFrame);
Http2SettingsAckFrame settingsAckFrame = inboundHandler.readInbound();
assertNotNull(settingsAckFrame);
}
}
@Test
public void stateChanges() throws Exception {
frameInboundWriter.writeInboundHeaders(1, request, 31, true);
Http2Stream stream = frameCodec.connection().stream(1);
assertNotNull(stream);
assertEquals(State.HALF_CLOSED_REMOTE, stream.state());
Http2FrameStreamEvent event = inboundHandler.readInboundMessageOrUserEvent();
assertEquals(State.HALF_CLOSED_REMOTE, event.stream().state());
Http2StreamFrame inboundFrame = inboundHandler.readInbound();
Http2FrameStream stream2 = inboundFrame.stream();
assertNotNull(stream2);
assertEquals(1, stream2.id());
assertEquals(inboundFrame, new DefaultHttp2HeadersFrame(request, true, 31).stream(stream2));
assertNull(inboundHandler.readInbound());
channel.writeOutbound(new DefaultHttp2HeadersFrame(response, true, 27).stream(stream2));
verify(frameWriter).writeHeaders(
eqFrameCodecCtx(), eq(1), eq(response),
eq(27), eq(true), anyChannelPromise());
verify(frameWriter, never()).writeRstStream(
eqFrameCodecCtx(), anyInt(), anyLong(), anyChannelPromise());
assertEquals(State.CLOSED, stream.state());
event = inboundHandler.readInboundMessageOrUserEvent();
assertEquals(State.CLOSED, event.stream().state());
assertTrue(channel.isActive());
}
@Test
public void headerRequestHeaderResponse() throws Exception {
frameInboundWriter.writeInboundHeaders(1, request, 31, true);
Http2Stream stream = frameCodec.connection().stream(1);
assertNotNull(stream);
assertEquals(State.HALF_CLOSED_REMOTE, stream.state());
Http2StreamFrame inboundFrame = inboundHandler.readInbound();
Http2FrameStream stream2 = inboundFrame.stream();
assertNotNull(stream2);
assertEquals(1, stream2.id());
assertEquals(inboundFrame, new DefaultHttp2HeadersFrame(request, true, 31).stream(stream2));
assertNull(inboundHandler.readInbound());
channel.writeOutbound(new DefaultHttp2HeadersFrame(response, true, 27).stream(stream2));
verify(frameWriter).writeHeaders(
eqFrameCodecCtx(), eq(1), eq(response),
eq(27), eq(true), anyChannelPromise());
verify(frameWriter, never()).writeRstStream(
eqFrameCodecCtx(), anyInt(), anyLong(), anyChannelPromise());
assertEquals(State.CLOSED, stream.state());
assertTrue(channel.isActive());
}
@Test
public void flowControlShouldBeResilientToMissingStreams() throws Http2Exception {
Http2Connection conn = new DefaultHttp2Connection(true);
Http2ConnectionEncoder enc = new DefaultHttp2ConnectionEncoder(conn, new DefaultHttp2FrameWriter());
Http2ConnectionDecoder dec = new DefaultHttp2ConnectionDecoder(conn, enc, new DefaultHttp2FrameReader());
Http2FrameCodec codec = new Http2FrameCodec(enc, dec, new Http2Settings(), false, true);
EmbeddedChannel em = new EmbeddedChannel(codec);
// We call #consumeBytes on a stream id which has not been seen yet to emulate the case
// where a stream is deregistered which in reality can happen in response to a RST.
assertFalse(codec.consumeBytes(1, 1));
assertTrue(em.finishAndReleaseAll());
}
@Test
public void canCreateCustomUnknownFrame() {
Http2Connection conn = new DefaultHttp2Connection(true);
Http2ConnectionEncoder enc = new DefaultHttp2ConnectionEncoder(conn, new DefaultHttp2FrameWriter());
Http2ConnectionDecoder dec = new DefaultHttp2ConnectionDecoder(conn, enc, new DefaultHttp2FrameReader());
new Http2FrameCodec(enc, dec, new Http2Settings(), false, true) {
@Override
protected Http2StreamFrame newHttp2UnknownFrame(byte frameType,
int streamId,
Http2Flags flags,
ByteBuf payload) {
return super.newHttp2UnknownFrame(frameType, streamId, flags, payload);
}
};
}
@Test
public void entityRequestEntityResponse() throws Exception {
frameInboundWriter.writeInboundHeaders(1, request, 0, false);
Http2Stream stream = frameCodec.connection().stream(1);
assertNotNull(stream);
assertEquals(State.OPEN, stream.state());
Http2HeadersFrame inboundHeaders = inboundHandler.readInbound();
Http2FrameStream stream2 = inboundHeaders.stream();
assertNotNull(stream2);
assertEquals(1, stream2.id());
assertEquals(new DefaultHttp2HeadersFrame(request, false).stream(stream2), inboundHeaders);
assertNull(inboundHandler.readInbound());
ByteBuf hello = bb("hello");
frameInboundWriter.writeInboundData(1, hello, 31, true);
Http2DataFrame inboundData = inboundHandler.readInbound();
Http2DataFrame expected = new DefaultHttp2DataFrame(bb("hello"), true, 31).stream(stream2);
assertEqualsAndRelease(expected, inboundData);
assertNull(inboundHandler.readInbound());
channel.writeOutbound(new DefaultHttp2HeadersFrame(response, false).stream(stream2));
verify(frameWriter).writeHeaders(eqFrameCodecCtx(), eq(1), eq(response),
eq(0), eq(false), anyChannelPromise());
channel.writeOutbound(new DefaultHttp2DataFrame(bb("world"), true, 27).stream(stream2));
ArgumentCaptor<ByteBuf> outboundData = ArgumentCaptor.forClass(ByteBuf.class);
verify(frameWriter).writeData(eqFrameCodecCtx(), eq(1), outboundData.capture(), eq(27),
eq(true), anyChannelPromise());
ByteBuf bb = bb("world");
assertEquals(bb, outboundData.getValue());
assertEquals(1, outboundData.getValue().refCnt());
bb.release();
outboundData.getValue().release();
verify(frameWriter, never()).writeRstStream(eqFrameCodecCtx(), anyInt(), anyLong(), anyChannelPromise());
assertTrue(channel.isActive());
}
@Test
public void sendRstStream() throws Exception {
frameInboundWriter.writeInboundHeaders(3, request, 31, true);
Http2Stream stream = frameCodec.connection().stream(3);
assertNotNull(stream);
assertEquals(State.HALF_CLOSED_REMOTE, stream.state());
Http2HeadersFrame inboundHeaders = inboundHandler.readInbound();
assertNotNull(inboundHeaders);
assertTrue(inboundHeaders.isEndStream());
Http2FrameStream stream2 = inboundHeaders.stream();
assertNotNull(stream2);
assertEquals(3, stream2.id());
channel.writeOutbound(new DefaultHttp2ResetFrame(314 /* non-standard error */).stream(stream2));
verify(frameWriter).writeRstStream(eqFrameCodecCtx(), eq(3), eq(314L), anyChannelPromise());
assertEquals(State.CLOSED, stream.state());
assertTrue(channel.isActive());
}
@Test
public void receiveRstStream() throws Exception {
frameInboundWriter.writeInboundHeaders(3, request, 31, false);
Http2Stream stream = frameCodec.connection().stream(3);
assertNotNull(stream);
assertEquals(State.OPEN, stream.state());
Http2HeadersFrame expectedHeaders = new DefaultHttp2HeadersFrame(request, false, 31);
Http2HeadersFrame actualHeaders = inboundHandler.readInbound();
assertEquals(expectedHeaders.stream(actualHeaders.stream()), actualHeaders);
frameInboundWriter.writeInboundRstStream(3, NO_ERROR.code());
Http2ResetFrame expectedRst = new DefaultHttp2ResetFrame(NO_ERROR).stream(actualHeaders.stream());
Http2ResetFrame actualRst = inboundHandler.readInbound();
assertEquals(expectedRst, actualRst);
assertNull(inboundHandler.readInbound());
}
@Test
public void sendGoAway() throws Exception {
frameInboundWriter.writeInboundHeaders(3, request, 31, false);
Http2Stream stream = frameCodec.connection().stream(3);
assertNotNull(stream);
assertEquals(State.OPEN, stream.state());
ByteBuf debugData = bb("debug");
ByteBuf expected = debugData.copy();
Http2GoAwayFrame goAwayFrame = new DefaultHttp2GoAwayFrame(NO_ERROR.code(),
debugData.retainedDuplicate());
goAwayFrame.setExtraStreamIds(2);
channel.writeOutbound(goAwayFrame);
verify(frameWriter).writeGoAway(eqFrameCodecCtx(), eq(7),
eq(NO_ERROR.code()), eq(expected), anyChannelPromise());
assertEquals(State.OPEN, stream.state());
assertTrue(channel.isActive());
expected.release();
debugData.release();
}
@Test
public void receiveGoaway() throws Exception {
ByteBuf debugData = bb("foo");
frameInboundWriter.writeInboundGoAway(2, NO_ERROR.code(), debugData);
Http2GoAwayFrame expectedFrame = new DefaultHttp2GoAwayFrame(2, NO_ERROR.code(), bb("foo"));
Http2GoAwayFrame actualFrame = inboundHandler.readInbound();
assertEqualsAndRelease(expectedFrame, actualFrame);
assertNull(inboundHandler.readInbound());
}
@Test
public void unknownFrameTypeShouldThrowAndBeReleased() throws Exception {
| Http2FrameCodecTest |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/impl/DefaultProducerCacheTest.java | {
"start": 10986,
"end": 11336
} | class ____ extends DefaultComponent {
public MyComponent(CamelContext context) {
super(context);
}
@Override
protected Endpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) {
throw new UnsupportedOperationException();
}
}
private final | MyComponent |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/internal/creation/bytebuddy/ByteBuddyCrossClassLoaderSerializationSupport.java | {
"start": 17215,
"end": 17644
} | class ____ marks a Mockito mock.
*/
private boolean notMarkedAsAMockitoMock(Object marker) {
return !MOCKITO_PROXY_MARKER.equals(marker);
}
}
/**
* Special Mockito aware <code>ObjectOutputStream</code>.
* <p/>
* <p>
* This output stream has the role of marking in the stream the Mockito class. This
* marking process is necessary to identify the proxy | annotation |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/migrationsupport/conditions/IgnoreAnnotationIntegrationTests.java | {
"start": 1650,
"end": 2174
} | class ____ {
private static List<String> tests = new ArrayList<>();
@BeforeAll
void clearTracking() {
tests.clear();
}
@AfterAll
void verifyTracking() {
assertThat(tests).containsExactly("notIgnored");
}
@BeforeEach
void track(TestInfo testInfo) {
tests.add(testInfo.getTestMethod().get().getName());
}
@Test
@Ignore
void ignored() {
fail("This method should have been disabled via @Ignore");
}
@Test
// @Ignore
void notIgnored() {
/* no-op */
}
}
}
| BaseNestedTestCase |
java | spring-projects__spring-boot | module/spring-boot-session-data-redis/src/main/java/org/springframework/boot/session/data/redis/autoconfigure/SessionDataRedisProperties.java | {
"start": 1076,
"end": 3211
} | class ____ {
/**
* Namespace for keys used to store sessions.
*/
private String namespace = "spring:session";
/**
* Sessions flush mode. Determines when session changes are written to the session
* store. Not supported with a reactive session repository.
*/
private FlushMode flushMode = FlushMode.ON_SAVE;
/**
* Sessions save mode. Determines how session changes are tracked and saved to the
* session store.
*/
private SaveMode saveMode = SaveMode.ON_SET_ATTRIBUTE;
/**
* The configure action to apply when no user-defined ConfigureRedisAction or
* ConfigureReactiveRedisAction bean is present.
*/
private ConfigureAction configureAction = ConfigureAction.NOTIFY_KEYSPACE_EVENTS;
/**
* Cron expression for expired session cleanup job. Only supported when
* repository-type is set to indexed. Not supported with a reactive session
* repository.
*/
private @Nullable String cleanupCron;
/**
* Type of Redis session repository to configure.
*/
private RepositoryType repositoryType = RepositoryType.DEFAULT;
public String getNamespace() {
return this.namespace;
}
public void setNamespace(String namespace) {
this.namespace = namespace;
}
public FlushMode getFlushMode() {
return this.flushMode;
}
public void setFlushMode(FlushMode flushMode) {
this.flushMode = flushMode;
}
public SaveMode getSaveMode() {
return this.saveMode;
}
public void setSaveMode(SaveMode saveMode) {
this.saveMode = saveMode;
}
public @Nullable String getCleanupCron() {
return this.cleanupCron;
}
public void setCleanupCron(@Nullable String cleanupCron) {
this.cleanupCron = cleanupCron;
}
public ConfigureAction getConfigureAction() {
return this.configureAction;
}
public void setConfigureAction(ConfigureAction configureAction) {
this.configureAction = configureAction;
}
public RepositoryType getRepositoryType() {
return this.repositoryType;
}
public void setRepositoryType(RepositoryType repositoryType) {
this.repositoryType = repositoryType;
}
/**
* Strategies for configuring and validating Redis.
*/
public | SessionDataRedisProperties |
java | apache__camel | test-infra/camel-test-infra-ollama/src/main/java/org/apache/camel/test/infra/ollama/commons/OllamaProperties.java | {
"start": 863,
"end": 1801
} | class ____ {
public static final String CONTAINER = "ollama.container";
public static final String MODEL = "ollama.model";
public static final String API_KEY = "ollama.api.key";
public static final String ENDPOINT = "ollama.endpoint";
/**
* URL for connecting to a local Ollama instance running on the host. Default: http://localhost:11434
* <p>
* This property is used when auto-detecting local Ollama availability. If a local Ollama instance is found at this
* URL, it will be used instead of starting a Docker container.
* </p>
*/
public static final String OLLAMA_HOST_URL = "ollama.host.url";
public static final String CPU_COUNT = "ollama.container.cpu.count";
public static final String MEMORY_LIMIT = "ollama.container.memory.limit";
public static final String ENABLE_GPU = "ollama.container.enable.gpu";
private OllamaProperties() {
}
}
| OllamaProperties |
java | quarkusio__quarkus | extensions/smallrye-jwt/deployment/src/test/java/io/quarkus/jwt/test/dev/SmallryeJwtProcessorDevModeTest.java | {
"start": 6344,
"end": 6666
} | class ____ {
@GET
@Produces(MediaType.TEXT_PLAIN)
@PermitAll
public String hello() {
return Jwt.upn("jdoe@quarkus.io")
.groups("User")
.claim(Claims.birthdate.name(), "2001-07-13")
.sign();
}
}
}
| TokenResource |
java | playframework__playframework | core/play/src/main/java/play/libs/reflect/MemberUtils.java | {
"start": 10807,
"end": 11010
} | class ____ a subset of the API of java.lang.reflect.Executable in Java 1.8, providing a
* common representation for function signatures for Constructors and Methods.
*/
private static final | providing |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/annotation/ProcedureHint.java | {
"start": 3576,
"end": 3986
} | class ____ implements Procedure {
* @ProcedureHint(
* input = [@DataTypeHint("INT"), @DataTypeHint("STRING")]
* )
* @ProcedureHint(
* input = [@DataTypeHint("BOOLEAN")]
* )
* Object[] call(Object... o) { ... }
* }
*
* // accepts (INT) or (BOOLEAN) and always returns an array of ROW<f0 BOOLEAN, f1 INT>
* @ProcedureHint(
* output = @DataTypeHint("ROW<f0 BOOLEAN, f1 INT>")
* )
* | X |
java | elastic__elasticsearch | x-pack/plugin/fleet/src/test/java/org/elasticsearch/xpack/fleet/action/GetSecretResponseTests.java | {
"start": 425,
"end": 986
} | class ____ extends AbstractWireSerializingTestCase<GetSecretResponse> {
@Override
protected Writeable.Reader<GetSecretResponse> instanceReader() {
return GetSecretResponse::new;
}
@Override
protected GetSecretResponse createTestInstance() {
return new GetSecretResponse(randomAlphaOfLength(10), randomAlphaOfLength(10));
}
@Override
protected GetSecretResponse mutateInstance(GetSecretResponse instance) {
return new GetSecretResponse(instance.id(), randomAlphaOfLength(10));
}
}
| GetSecretResponseTests |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/FluxMaterialize.java | {
"start": 1038,
"end": 1487
} | class ____<T> extends InternalFluxOperator<T, Signal<T>> {
FluxMaterialize(Flux<T> source) {
super(source);
}
@Override
public CoreSubscriber<? super T> subscribeOrReturn(CoreSubscriber<? super Signal<T>> actual) {
return new MaterializeSubscriber<>(actual);
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return super.scanUnsafe(key);
}
final static | FluxMaterialize |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeSearchContext.java | {
"start": 1343,
"end": 1471
} | class ____ be closed when the reference count in the {@link ShardContext} returned by {@link #shardContext()}
* reaches 0.
*/
| will |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlAsyncGetResultsAction.java | {
"start": 1741,
"end": 5452
} | class ____ extends AbstractTransportQlAsyncGetResultsAction<EsqlQueryResponse, EsqlQueryTask> {
private final BlockFactory blockFactory;
private final ThreadPool threadPool;
@Inject
public TransportEsqlAsyncGetResultsAction(
TransportService transportService,
ActionFilters actionFilters,
ClusterService clusterService,
NamedWriteableRegistry registry,
Client client,
BigArrays bigArrays,
BlockFactoryProvider blockFactoryProvider,
ThreadPool threadPool
) {
super(
EsqlAsyncGetResultAction.NAME,
transportService,
actionFilters,
clusterService,
registry,
client,
threadPool,
bigArrays,
EsqlQueryTask.class
);
this.blockFactory = blockFactoryProvider.blockFactory();
this.threadPool = threadPool;
}
@Override
protected void doExecute(Task task, GetAsyncResultRequest request, ActionListener<EsqlQueryResponse> listener) {
super.doExecute(task, request, unwrapListener(request.getId(), listener));
}
@Override
public Writeable.Reader<EsqlQueryResponse> responseReader() {
return EsqlQueryResponse.reader(blockFactory);
}
static final String PARSE_EX_NAME = ElasticsearchException.getExceptionName(new ParsingException(Source.EMPTY, ""));
static final String VERIFY_EX_NAME = ElasticsearchException.getExceptionName(new VerificationException(""));
/**
* Adds async headers, and unwraps the exception in the case of failure.
* <p>
* This keeps the exception types the same as the sync API, namely ParsingException and VerificationException.
* </p>
*/
ActionListener<EsqlQueryResponse> unwrapListener(String asyncExecutionId, ActionListener<EsqlQueryResponse> listener) {
return new ActionListener<>() {
@Override
public void onResponse(EsqlQueryResponse response) {
boolean isRunning = response.isRunning();
threadPool.getThreadContext()
.addResponseHeader(AsyncExecutionId.ASYNC_EXECUTION_IS_RUNNING_HEADER, isRunning ? "?1" : "?0");
threadPool.getThreadContext().addResponseHeader(AsyncExecutionId.ASYNC_EXECUTION_ID_HEADER, asyncExecutionId);
listener.onResponse(response);
}
@Override
public void onFailure(Exception e) {
if (e instanceof ElasticsearchWrapperException && e instanceof ElasticsearchException ee) {
e = unwrapEsException(ee);
}
if (e instanceof NotSerializableExceptionWrapper wrapper) {
String name = wrapper.getExceptionName();
if (PARSE_EX_NAME.equals(name)) {
e = new ParsingException(Source.EMPTY, e.getMessage());
e.setStackTrace(wrapper.getStackTrace());
e.addSuppressed(wrapper);
} else if (VERIFY_EX_NAME.contains(name)) {
e = new VerificationException(e.getMessage());
e.setStackTrace(wrapper.getStackTrace());
e.addSuppressed(wrapper);
}
}
listener.onFailure(e);
}
};
}
static RuntimeException unwrapEsException(ElasticsearchException esEx) {
Throwable root = esEx.unwrapCause();
if (root instanceof RuntimeException runtimeException) {
return runtimeException;
}
return esEx;
}
}
| TransportEsqlAsyncGetResultsAction |
java | apache__rocketmq | auth/src/main/java/org/apache/rocketmq/auth/authentication/strategy/AbstractAuthenticationStrategy.java | {
"start": 1432,
"end": 3339
} | class ____ implements AuthenticationStrategy {
protected final AuthConfig authConfig;
protected final Set<String> authenticationWhiteSet = new HashSet<>();
protected final AuthenticationProvider<AuthenticationContext> authenticationProvider;
public AbstractAuthenticationStrategy(AuthConfig authConfig, Supplier<?> metadataService) {
this.authConfig = authConfig;
this.authenticationProvider = AuthenticationFactory.getProvider(authConfig);
if (this.authenticationProvider != null) {
this.authenticationProvider.initialize(authConfig, metadataService);
}
if (StringUtils.isNotBlank(authConfig.getAuthenticationWhitelist())) {
String[] whitelist = StringUtils.split(authConfig.getAuthenticationWhitelist(), ",");
for (String rpcCode : whitelist) {
this.authenticationWhiteSet.add(StringUtils.trim(rpcCode));
}
}
}
protected void doEvaluate(AuthenticationContext context) {
if (context == null) {
return;
}
if (!authConfig.isAuthenticationEnabled()) {
return;
}
if (this.authenticationProvider == null) {
return;
}
if (this.authenticationWhiteSet.contains(context.getRpcCode())) {
return;
}
try {
this.authenticationProvider.authenticate(context).join();
} catch (AuthenticationException ex) {
throw ex;
} catch (Throwable ex) {
Throwable exception = ExceptionUtils.getRealException(ex);
if (exception instanceof AuthenticationException) {
throw (AuthenticationException) exception;
}
throw new AuthenticationException("Authentication failed. Please verify the credentials and try again.", exception);
}
}
}
| AbstractAuthenticationStrategy |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/MessageParams.java | {
"start": 862,
"end": 3967
} | class ____<V> implements MessageArgs<V> {
private long retentionDuration;
private int receiveLimit = 0;
private final Map<String, Object> headers = new HashMap<>();
private final V payload;
private int priority;
private DeduplicationMode deduplicationMode;
private Duration deduplicationInterval;
private Object deduplicationId;
private Duration delayInterval = Duration.ZERO;
public MessageParams(V value) {
this.payload = value;
}
@Override
public MessageArgs<V> priority(int priority) {
if (priority < 0 || priority > 9) {
throw new IllegalArgumentException("Priority should be from 0 to 9, but was " + priority);
}
this.priority = priority;
return this;
}
@Override
public MessageArgs<V> delay(Duration interval) {
this.delayInterval = interval;
return this;
}
@Override
public MessageArgs<V> timeToLive(Duration duration) {
this.retentionDuration = duration.toMillis();
return this;
}
@Override
public MessageArgs<V> deliveryLimit(int value) {
if (value < 1) {
throw new IllegalArgumentException("value can't be lower than 1");
}
this.receiveLimit = value;
return this;
}
@Override
public MessageArgs<V> header(String key, Object value) {
headers.put(key, value);
return this;
}
@Override
public MessageArgs<V> headers(Map<String, Object> entries) {
headers.putAll(entries);
return this;
}
@Override
public MessageArgs<V> deduplicationByHash(Duration interval) {
this.deduplicationMode = DeduplicationMode.HASH;
this.deduplicationInterval = interval;
return this;
}
@Override
public MessageArgs<V> deduplicationById(Object id, Duration interval) {
Objects.requireNonNull(id);
this.deduplicationMode = DeduplicationMode.ID;
this.deduplicationId = id;
this.deduplicationInterval = interval;
return this;
}
public MessageArgs<V> deduplicationByHash() {
this.deduplicationMode = DeduplicationMode.HASH;
return this;
}
public MessageArgs<V> deduplicationById(Object id) {
this.deduplicationMode = DeduplicationMode.ID;
this.deduplicationId = id;
return this;
}
public long getRetentionDuration() {
return retentionDuration;
}
public int getReceiveLimit() {
return receiveLimit;
}
public Map<String, Object> getHeaders() {
return headers;
}
public V getPayload() {
return payload;
}
public DeduplicationMode getDeduplicationMode() {
return deduplicationMode;
}
public Duration getDeduplicationInterval() {
return deduplicationInterval;
}
public Object getDeduplicationId() {
return deduplicationId;
}
public Duration getDelayInterval() {
return delayInterval;
}
public int getPriority() {
return priority;
}
}
| MessageParams |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/serialization/DeepStubsSerializableTest.java | {
"start": 2990,
"end": 3176
} | class ____ implements Serializable {
boolean isFalse() {
return false;
}
int number() {
return 100;
}
}
static | SampleClass2 |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/ReferenceEqualityTest.java | {
"start": 1488,
"end": 2048
} | class ____ {
void something(TestProtoMessage f1, TestProtoMessage f2) {
// BUG: Diagnostic contains: boolean b = Objects.equals(f1, f2);
boolean b = f1 == f2;
// BUG: Diagnostic contains: b = f1.getMessage().equals(f2.getMessage())
b = f1.getMessage() == f2.getMessage();
}
}
""")
.doTest();
}
@Test
public void negative_const() {
compilationHelper
.addSourceLines(
"Foo.java",
"""
| Foo |
java | redisson__redisson | redisson/src/main/java/org/redisson/RedissonDoubleAdder.java | {
"start": 931,
"end": 2077
} | class ____ extends RedissonBaseAdder<Double> implements RDoubleAdder {
private final DoubleAdder counter = new DoubleAdder();
private final RedissonClient redisson;
public RedissonDoubleAdder(CommandAsyncExecutor connectionManager, String name, RedissonClient redisson) {
super(connectionManager, name, redisson);
this.redisson = redisson;
}
@Override
protected void doReset() {
counter.reset();
}
@Override
protected RFuture<Double> addAndGetAsync(String id) {
return redisson.getAtomicDouble(getCounterName(id)).getAndAddAsync(counter.sum());
}
@Override
protected RFuture<Double> getAndDeleteAsync(String id) {
return redisson.getAtomicDouble(getCounterName(id)).getAndDeleteAsync();
}
@Override
public void add(double x) {
counter.add(x);
}
@Override
public void increment() {
add(1L);
}
@Override
public void decrement() {
add(-1L);
}
@Override
public double sum() {
return get(sumAsync(60, TimeUnit.SECONDS));
}
}
| RedissonDoubleAdder |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/collection/wildcard/Target.java | {
"start": 289,
"end": 705
} | class ____ {
private List<Plan> elements;
private Map<Plan, Plan> entries;
public List<Plan> getElements() {
return elements;
}
public void setElements(List<Plan> elements) {
this.elements = elements;
}
public Map<Plan, Plan> getEntries() {
return entries;
}
public void setEntries(Map<Plan, Plan> entries) {
this.entries = entries;
}
}
| Target |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/script/field/vectors/BitRankVectorsDocValuesField.java | {
"start": 656,
"end": 1085
} | class ____ extends ByteRankVectorsDocValuesField {
public BitRankVectorsDocValuesField(BinaryDocValues input, BinaryDocValues magnitudes, String name, ElementType elementType, int dims) {
super(input, magnitudes, name, elementType, dims / 8);
}
@Override
protected RankVectors getVector() {
return new BitRankVectors(vectorValue, magnitudesValue, numVecs, dims);
}
}
| BitRankVectorsDocValuesField |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/java/typeutils/EnumTypeInfo.java | {
"start": 1647,
"end": 1960
} | class ____<T extends Enum<T>> extends TypeInformation<T> implements AtomicType<T> {
private static final long serialVersionUID = 8936740290137178660L;
private final Class<T> typeClass;
@PublicEvolving
public EnumTypeInfo(Class<T> typeClass) {
checkNotNull(typeClass, "Enum type | EnumTypeInfo |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/configuration/ConfigurationDependency.java | {
"start": 1165,
"end": 1256
} | class ____ used to encapsulate configuration dependencies in a structured format.
*/
public | is |
java | quarkusio__quarkus | test-framework/common/src/test/java/io/quarkus/test/common/TestResourceManagerTest.java | {
"start": 7762,
"end": 8589
} | class ____
implements QuarkusTestResourceConfigurableLifecycleManager<WithAnnotationBasedTestResource2> {
private String key;
@Override
public void init(WithAnnotationBasedTestResource2 annotation) {
this.key = annotation.key();
}
@Override
public Map<String, String> start() {
Map<String, String> props = new HashMap<>();
props.put(key, "value");
return props;
}
@Override
public void stop() {
}
}
@WithTestResource(value = AnnotationBasedQuarkusTestResource.class, scope = TestResourceScope.GLOBAL)
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
@Repeatable(WithAnnotationBasedTestResource.List.class)
public @ | AnnotationBasedQuarkusTestResource2 |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/transport/Compression.java | {
"start": 3644,
"end": 3718
} | enum ____ {
TRUE,
INDEXING_DATA,
FALSE
}
}
| Enabled |
java | google__dagger | javatests/dagger/functional/subcomponent/multibindings/MultibindingSubcomponents.java | {
"start": 5955,
"end": 6130
} | interface ____
extends ParentWithProvision, HasChildWithoutProvision {}
@Component(modules = ParentMultibindingModule.class)
| ParentWithProvisionHasChildWithoutProvision |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java | {
"start": 8683,
"end": 9913
} | class ____ extends TransportRetentionLeaseAction<RemoveRequest> {
@Inject
public TransportRemoveAction(
final ThreadPool threadPool,
final ClusterService clusterService,
final TransportService transportService,
final ActionFilters actionFilters,
final ProjectResolver projectResolver,
final IndexNameExpressionResolver indexNameExpressionResolver,
final IndicesService indicesService
) {
super(
REMOVE.name(),
threadPool,
clusterService,
transportService,
actionFilters,
projectResolver,
indexNameExpressionResolver,
indicesService,
RemoveRequest::new
);
}
@Override
void doRetentionLeaseAction(
final IndexShard indexShard,
final RemoveRequest request,
final ActionListener<ActionResponse.Empty> listener
) {
indexShard.removeRetentionLease(request.getId(), listener.map(r -> ActionResponse.Empty.INSTANCE));
}
}
private abstract static | TransportRemoveAction |
java | google__guice | extensions/assistedinject/test/com/google/inject/assistedinject/FactoryProvider2Test.java | {
"start": 45616,
"end": 45678
} | enum ____ {
SHALLOW,
DEEP;
}
| ComparisonMethod |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/superclass/auditoverride/EmbeddableTest.java | {
"start": 12515,
"end": 13440
} | class ____ {
@Id
@GeneratedValue
private Integer id;
@Embedded
@AuditOverride(forClass = SimpleAbstractMappedSuperclass.class)
private SimpleEmbeddable embeddable;
public SimpleEmbeddable getEmbeddable() {
return embeddable;
}
public void setEmbeddable(SimpleEmbeddable embeddable) {
this.embeddable = embeddable;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
}
private PersistentClass getPersistentClass(
DomainModelScope dm,
SessionFactoryScope sf,
Class<?> clazz,
Object id,
Number revision) {
return sf.fromSession( session -> {
final var auditReader = AuditReaderFactory.get( session );
final Object entity = auditReader.find( clazz, id, revision );
return dm.getDomainModel()
.getEntityBinding( auditReader.getEntityName( id, revision, entity ) + "_AUD" );
} );
}
}
| SimpleEmbeddableWithOverrideEntity |
java | apache__camel | components/camel-kubernetes/src/test/java/org/apache/camel/component/kubernetes/consumer/integration/KubernetesNodesConsumerIT.java | {
"start": 3345,
"end": 3761
} | class ____ implements Processor {
@Override
public void process(Exchange exchange) {
Message in = exchange.getIn();
Node node = exchange.getIn().getBody(Node.class);
log.info("Got event with node name: {} and action {}", node.getMetadata().getName(),
in.getHeader(KubernetesConstants.KUBERNETES_EVENT_ACTION));
}
}
}
| KubernetesProcessor |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/test/AbstractXContentSerializingTestCase.java | {
"start": 745,
"end": 1584
} | class ____<T extends ToXContent & Writeable> extends AbstractSerializationTestCase<T> {
/**
* Parses to a new instance using the provided {@link XContentParser}
*/
protected abstract T doParseInstance(XContentParser parser) throws IOException;
@Override
protected T createXContextTestInstance(XContentType xContentType) {
return createTestInstance();
}
@Override
protected AbstractXContentTestCase.XContentTester<T> createXContentTester() {
return AbstractXContentTestCase.xContentTester(
this::createParser,
this::createXContextTestInstance,
getToXContentParams(),
this::doParseInstance
);
}
@Override
protected ToXContent asXContent(T instance) {
return instance;
}
}
| AbstractXContentSerializingTestCase |
java | apache__flink | flink-connectors/flink-connector-datagen-test/src/test/java/org/apache/flink/connector/datagen/source/DataGeneratorSourceTest.java | {
"start": 2142,
"end": 8173
} | class ____ {
@Test
@DisplayName("Correctly restores SplitEnumerator from a snapshot.")
void testRestoreEnumerator() throws Exception {
final GeneratorFunction<Long, Long> generatorFunctionStateless = index -> index;
final DataGeneratorSource<Long> dataGeneratorSource =
new DataGeneratorSource<>(generatorFunctionStateless, 100, Types.LONG);
final int parallelism = 2;
final MockSplitEnumeratorContext<NumberSequenceSource.NumberSequenceSplit> context =
new MockSplitEnumeratorContext<>(parallelism);
SplitEnumerator<
NumberSequenceSource.NumberSequenceSplit,
Collection<NumberSequenceSource.NumberSequenceSplit>>
enumerator = dataGeneratorSource.createEnumerator(context);
// start() is not strictly necessary in the current implementation, but should logically be
// executed in this order (protect against any breaking changes in the start() method).
enumerator.start();
Collection<NumberSequenceSource.NumberSequenceSplit> enumeratorState =
enumerator.snapshotState(0);
assertThat(enumeratorState).hasSize(parallelism);
enumerator = dataGeneratorSource.restoreEnumerator(context, enumeratorState);
// Verify that splits were restored and can be assigned
assertThat(context.getSplitsAssignmentSequence()).isEmpty();
for (NumberSequenceSource.NumberSequenceSplit ignored : enumeratorState) {
enumerator.handleSplitRequest(0, "hostname");
}
assertThat(context.getSplitsAssignmentSequence()).hasSize(enumeratorState.size());
}
@Test
@DisplayName("Uses the underlying NumberSequenceSource correctly for checkpointing.")
void testReaderCheckpoints() throws Exception {
final int numCycles = 3;
final long from = 0;
final long mid = 156;
final long to = 383;
final long elementsPerCycle = (to - from + 1) / numCycles;
final TestingReaderOutput<Long> out = new TestingReaderOutput<>();
SourceReader<Long, NumberSequenceSource.NumberSequenceSplit> reader = createReader();
reader.addSplits(
Arrays.asList(
new NumberSequenceSource.NumberSequenceSplit("split-1", from, mid),
new NumberSequenceSource.NumberSequenceSplit("split-2", mid + 1, to)));
for (int cycle = 0; cycle < numCycles; cycle++) {
// this call is not required but mimics what happens at runtime
assertThat(reader.pollNext(out))
.as(
"Each poll should return a NOTHING_AVAILABLE status to explicitly trigger the availability check through in SourceReader.isAvailable")
.isSameAs(InputStatus.NOTHING_AVAILABLE);
for (int elementInCycle = 0; elementInCycle < elementsPerCycle; elementInCycle++) {
assertThat(reader.isAvailable())
.as(
"There should be always data available because the test utilizes no rate-limiting strategy and splits are provided.")
.isCompleted();
// this never returns END_OF_INPUT because IteratorSourceReaderBase#pollNext does
// not immediately return END_OF_INPUT when the input is exhausted
assertThat(reader.pollNext(out))
.as(
"Each poll should return a NOTHING_AVAILABLE status to explicitly trigger the availability check through in SourceReader.isAvailable")
.isSameAs(InputStatus.NOTHING_AVAILABLE);
}
// checkpoint
List<NumberSequenceSource.NumberSequenceSplit> splits = reader.snapshotState(1L);
// first cycle partially consumes the first split
// second cycle consumes the remaining first split and partially consumes the second
// third cycle consumes remaining second split
assertThat(splits).hasSize(numCycles - cycle - 1);
// re-create and restore
reader = createReader();
if (splits.isEmpty()) {
reader.notifyNoMoreSplits();
} else {
reader.addSplits(splits);
}
}
// we need to go again through isAvailable because IteratorSourceReaderBase#pollNext does
// not immediately return END_OF_INPUT when the input is exhausted
assertThat(reader.isAvailable())
.as(
"There should be always data available because the test utilizes no rate-limiting strategy and splits are provided.")
.isCompleted();
assertThat(reader.pollNext(out)).isSameAs(InputStatus.END_OF_INPUT);
final List<Long> result = out.getEmittedRecords();
final Iterable<Long> expected = LongStream.range(from, to + 1)::iterator;
assertThat(result).containsExactlyElementsOf(expected);
}
private static SourceReader<Long, NumberSequenceSource.NumberSequenceSplit> createReader()
throws Exception {
// the arguments passed in the source constructor matter only to the enumerator
GeneratorFunction<Long, Long> generatorFunctionStateless = index -> index;
DataGeneratorSource<Long> dataGeneratorSource =
new DataGeneratorSource<>(generatorFunctionStateless, Long.MAX_VALUE, Types.LONG);
return dataGeneratorSource.createReader(new DummyReaderContext());
}
// ------------------------------------------------------------------------
// test utils / mocks
//
// the "flink-connector-test-utils module has proper mocks and utils,
// but cannot be used here, because it would create a cyclic dependency.
// ------------------------------------------------------------------------
private static final | DataGeneratorSourceTest |
java | eclipse-vertx__vert.x | vertx-core/src/test/java/io/vertx/tests/vertx/ThreadPerTaskExecutorServiceTest.java | {
"start": 319,
"end": 2738
} | class ____ extends AsyncTestBase {
@Test
public void testExecute() throws Exception {
ThreadPerTaskExecutorService exec = new ThreadPerTaskExecutorService(Executors.defaultThreadFactory());
int numTasks = 100;
Set<Thread> threads = Collections.synchronizedSet(new HashSet<>());
for (int i = 0;i < numTasks;i++) {
exec.execute(() -> threads.add(Thread.currentThread()));
}
exec.shutdown();
exec.awaitTermination(5, TimeUnit.SECONDS);
assertEquals(numTasks, threads.size());
}
@Test
public void testShutdown() throws Exception {
ThreadPerTaskExecutorService exec = new ThreadPerTaskExecutorService(Executors.defaultThreadFactory());
int numTasks = 10;
CountDownLatch latch = new CountDownLatch(1);
CyclicBarrier barrier = new CyclicBarrier(numTasks + 1);
for (int i = 0;i < numTasks;i++) {
exec.execute(() -> {
try {
barrier.await();
latch.await();
} catch (Exception e) {
fail(e);
}
});
}
barrier.await();
exec.shutdown();
latch.countDown();
long now = System.currentTimeMillis();
exec.awaitTermination(5, TimeUnit.SECONDS);
assertTrue(System.currentTimeMillis() - now < 1000);
}
@Test
public void testShutdownEmpty() throws Exception {
ThreadPerTaskExecutorService exec = new ThreadPerTaskExecutorService(Executors.defaultThreadFactory());
exec.shutdown();
long now = System.currentTimeMillis();
exec.awaitTermination(5, TimeUnit.SECONDS);
assertTrue(System.currentTimeMillis() - now < 1000);
}
@Test
public void testInterrupt() throws Exception {
ThreadPerTaskExecutorService exec = new ThreadPerTaskExecutorService(Executors.defaultThreadFactory());
int numTasks = 100;
CyclicBarrier barrier = new CyclicBarrier(numTasks + 1);
CountDownLatch latch = new CountDownLatch(1);
AtomicInteger interrupts = new AtomicInteger();
for (int i = 0;i < numTasks;i++) {
exec.execute(() -> {
try {
barrier.await();
latch.await();
} catch (InterruptedException e) {
interrupts.incrementAndGet();
} catch (BrokenBarrierException e) {
fail(e);
}
});
}
barrier.await();
exec.shutdownNow();
exec.awaitTermination(5, TimeUnit.SECONDS);
assertEquals(numTasks, interrupts.get());
}
}
| ThreadPerTaskExecutorServiceTest |
java | apache__flink | flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/runtime/operators/window/groupwindow/internal/MergingWindowSetTest.java | {
"start": 23585,
"end": 25161
} | class ____<UK, UV> implements MapState<UK, UV> {
private final Map<UK, UV> internalMap;
HeapMapState() {
internalMap = new HashMap<>();
}
@Override
public UV get(UK key) throws Exception {
return internalMap.get(key);
}
@Override
public void put(UK key, UV value) throws Exception {
internalMap.put(key, value);
}
@Override
public void putAll(Map<UK, UV> map) throws Exception {
internalMap.putAll(map);
}
@Override
public void remove(UK key) throws Exception {
internalMap.remove(key);
}
@Override
public boolean contains(UK key) throws Exception {
return internalMap.containsKey(key);
}
@Override
public Iterable<Map.Entry<UK, UV>> entries() throws Exception {
return internalMap.entrySet();
}
@Override
public Iterable<UK> keys() throws Exception {
return internalMap.keySet();
}
@Override
public Iterable<UV> values() throws Exception {
return internalMap.values();
}
@Override
public Iterator<Map.Entry<UK, UV>> iterator() throws Exception {
return internalMap.entrySet().iterator();
}
@Override
public boolean isEmpty() {
return internalMap.isEmpty();
}
@Override
public void clear() {
internalMap.clear();
}
}
}
| HeapMapState |
java | quarkusio__quarkus | extensions/redis-client/runtime/src/main/java/io/quarkus/redis/datasource/bitmap/BitFieldArgs.java | {
"start": 239,
"end": 539
} | enum ____ {
WRAP,
SAT,
FAIL;
}
/**
* Represents a bit field type with details about signed/unsigned and the number of bits.
* Instances can be created from a boolean/bits or from strings like i8 (signed) or u10 (unsigned).
*/
public static | OverflowType |
java | apache__camel | dsl/camel-jbang/camel-jbang-core/src/main/java/org/apache/camel/dsl/jbang/core/commands/process/ListPlatformHttp.java | {
"start": 1727,
"end": 7393
} | class ____ extends ProcessWatchCommand {
@CommandLine.Parameters(description = "Name or pid of running Camel integration", arity = "0..1")
String name = "*";
@CommandLine.Option(names = { "--sort" }, completionCandidates = PidNameAgeCompletionCandidates.class,
description = "Sort by pid, name or age", defaultValue = "pid")
String sort;
@CommandLine.Option(names = { "--all" },
description = "Include management endpoints")
boolean all;
public ListPlatformHttp(CamelJBangMain main) {
super(main);
}
@Override
public Integer doProcessWatchCall() throws Exception {
List<Row> rows = new ArrayList<>();
List<Long> pids = findPids(name);
ProcessHandle.allProcesses()
.filter(ph -> pids.contains(ph.pid()))
.forEach(ph -> {
JsonObject root = loadStatus(ph.pid());
// there must be a status file for the running Camel integration
if (root != null) {
Row row = new Row();
JsonObject context = (JsonObject) root.get("context");
if (context == null) {
return;
}
row.name = context.getString("name");
if ("CamelJBang".equals(row.name)) {
row.name = ProcessHelper.extractName(root, ph);
}
row.pid = Long.toString(ph.pid());
row.uptime = extractSince(ph);
row.age = TimeUtils.printSince(row.uptime);
JsonObject jo = (JsonObject) root.get("platform-http");
if (jo != null) {
String server = jo.getString("server");
JsonArray arr = (JsonArray) jo.get("endpoints");
if (arr != null) {
for (int i = 0; i < arr.size(); i++) {
row = row.copy();
jo = (JsonObject) arr.get(i);
row.server = server;
row.url = jo.getString("url");
row.path = jo.getString("path");
row.verbs = jo.getString("verbs");
row.consumes = jo.getString("consumes");
row.produces = jo.getString("produces");
rows.add(row);
}
}
if (all) {
arr = (JsonArray) jo.get("managementEndpoints");
if (arr != null) {
for (int i = 0; i < arr.size(); i++) {
row = row.copy();
jo = (JsonObject) arr.get(i);
row.server = server;
row.url = jo.getString("url");
row.path = jo.getString("path");
row.verbs = jo.getString("verbs");
row.consumes = jo.getString("consumes");
row.produces = jo.getString("produces");
rows.add(row);
}
}
}
}
}
});
// sort rows
rows.sort(this::sortRow);
if (!rows.isEmpty()) {
printer().println(AsciiTable.getTable(AsciiTable.NO_BORDERS, rows, Arrays.asList(
new Column().header("PID").headerAlign(HorizontalAlign.CENTER).with(r -> r.pid),
new Column().header("NAME").dataAlign(HorizontalAlign.LEFT).maxWidth(30, OverflowBehaviour.ELLIPSIS_RIGHT)
.with(r -> r.name),
new Column().header("URL").dataAlign(HorizontalAlign.LEFT).with(r -> r.url),
new Column().header("METHOD").dataAlign(HorizontalAlign.LEFT).with(r -> r.verbs),
new Column().header("CONTENT-TYPE").dataAlign(HorizontalAlign.LEFT).with(this::getContent))));
}
return 0;
}
private String getContent(Row r) {
StringJoiner sj = new StringJoiner(" ");
if (r.consumes != null || r.produces != null) {
if (r.consumes != null) {
sj.add("accept: " + r.consumes);
}
if (r.produces != null) {
sj.add("produces: " + r.produces);
}
}
if (sj.length() > 0) {
return sj.toString();
}
return "";
}
protected int sortRow(Row o1, Row o2) {
String s = sort;
int negate = 1;
if (s.startsWith("-")) {
s = s.substring(1);
negate = -1;
}
switch (s) {
case "pid":
return Long.compare(Long.parseLong(o1.pid), Long.parseLong(o2.pid)) * negate;
case "name":
return o1.name.compareToIgnoreCase(o2.name) * negate;
case "age":
return Long.compare(o1.uptime, o2.uptime) * negate;
default:
return 0;
}
}
private static | ListPlatformHttp |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java | {
"start": 1286,
"end": 1385
} | class ____ {
/**
* LinuxResourceCalculatorPlugin with a fake timer
*/
static | TestSysInfoLinux |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.