index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/streaming/connectors/elasticsearch/table/StaticIndexGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.connectors.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.table.data.RowData;
/** A static {@link IndexGenerator} which generate fixed index name. */
@Internal
final class StaticIndexGenerator extends IndexGeneratorBase {
public StaticIndexGenerator(String index) {
super(index);
}
public String generate(RowData row) {
return index;
}
}
| 5,800 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/sink/NetworkClientConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import javax.annotation.Nullable;
import java.io.Serializable;
class NetworkClientConfig implements Serializable {
@Nullable private final String username;
@Nullable private final String password;
@Nullable private final String connectionPathPrefix;
@Nullable private final Integer connectionRequestTimeout;
@Nullable private final Integer connectionTimeout;
@Nullable private final Integer socketTimeout;
NetworkClientConfig(
@Nullable String username,
@Nullable String password,
@Nullable String connectionPathPrefix,
@Nullable Integer connectionRequestTimeout,
@Nullable Integer connectionTimeout,
@Nullable Integer socketTimeout) {
this.username = username;
this.password = password;
this.connectionPathPrefix = connectionPathPrefix;
this.connectionRequestTimeout = connectionRequestTimeout;
this.connectionTimeout = connectionTimeout;
this.socketTimeout = socketTimeout;
}
@Nullable
public String getUsername() {
return username;
}
@Nullable
public String getPassword() {
return password;
}
@Nullable
public Integer getConnectionRequestTimeout() {
return connectionRequestTimeout;
}
@Nullable
public Integer getConnectionTimeout() {
return connectionTimeout;
}
@Nullable
public Integer getSocketTimeout() {
return socketTimeout;
}
@Nullable
public String getConnectionPathPrefix() {
return connectionPathPrefix;
}
}
| 5,801 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/sink/FlushBackoffType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import org.apache.flink.annotation.PublicEvolving;
/**
* Used to control whether the sink should retry failed requests at all or with which kind back off
* strategy.
*/
@PublicEvolving
public enum FlushBackoffType {
/** After every failure, it waits a configured time until the retries are exhausted. */
CONSTANT,
/**
* After every failure, it waits initially the configured time and increases the waiting time
* exponentially until the retries are exhausted.
*/
EXPONENTIAL,
/** The failure is not retried. */
NONE,
}
| 5,802 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/sink/RequestIndexer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import org.apache.flink.annotation.PublicEvolving;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
/**
* Users add multiple delete, index or update requests to a {@link RequestIndexer} to prepare them
* for sending to an Elasticsearch cluster.
*/
@PublicEvolving
public interface RequestIndexer {
/**
* Add multiple {@link DeleteRequest} to the indexer to prepare for sending requests to
* Elasticsearch.
*
* @param deleteRequests The multiple {@link DeleteRequest} to add.
*/
void add(DeleteRequest... deleteRequests);
/**
* Add multiple {@link IndexRequest} to the indexer to prepare for sending requests to
* Elasticsearch.
*
* @param indexRequests The multiple {@link IndexRequest} to add.
*/
void add(IndexRequest... indexRequests);
/**
* Add multiple {@link UpdateRequest} to the indexer to prepare for sending requests to
* Elasticsearch.
*
* @param updateRequests The multiple {@link UpdateRequest} to add.
*/
void add(UpdateRequest... updateRequests);
}
| 5,803 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/sink/ElasticsearchSink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.api.connector.sink2.Sink;
import org.apache.flink.api.connector.sink2.SinkWriter;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.http.HttpHost;
import java.io.IOException;
import java.util.List;
import static org.apache.flink.util.Preconditions.checkArgument;
import static org.apache.flink.util.Preconditions.checkNotNull;
/**
* Flink Sink to insert or update data in an Elasticsearch index. The sink supports the following
* delivery guarantees.
*
* <ul>
* <li>{@link DeliveryGuarantee#NONE} does not provide any guarantees: actions are flushed to
* Elasticsearch only depending on the configurations of the bulk processor. In case of a
* failure, it might happen that actions are lost if the bulk processor still has buffered
* actions.
* <li>{@link DeliveryGuarantee#AT_LEAST_ONCE} on a checkpoint the sink will wait until all
* buffered actions are flushed to and acknowledged by Elasticsearch. No actions will be lost
* but actions might be sent to Elasticsearch multiple times when Flink restarts. These
* additional requests may cause inconsistent data in ElasticSearch right after the restart,
* but eventually everything will be consistent again.
* </ul>
*
* @param <IN> type of the records converted to Elasticsearch actions
* @see ElasticsearchSinkBuilderBase on how to construct a ElasticsearchSink
*/
@PublicEvolving
public class ElasticsearchSink<IN> implements Sink<IN> {
private final List<HttpHost> hosts;
private final ElasticsearchEmitter<? super IN> emitter;
private final BulkProcessorConfig buildBulkProcessorConfig;
private final BulkProcessorBuilderFactory bulkProcessorBuilderFactory;
private final NetworkClientConfig networkClientConfig;
private final DeliveryGuarantee deliveryGuarantee;
ElasticsearchSink(
List<HttpHost> hosts,
ElasticsearchEmitter<? super IN> emitter,
DeliveryGuarantee deliveryGuarantee,
BulkProcessorBuilderFactory bulkProcessorBuilderFactory,
BulkProcessorConfig buildBulkProcessorConfig,
NetworkClientConfig networkClientConfig) {
this.hosts = checkNotNull(hosts);
this.bulkProcessorBuilderFactory = checkNotNull(bulkProcessorBuilderFactory);
checkArgument(!hosts.isEmpty(), "Hosts cannot be empty.");
this.emitter = checkNotNull(emitter);
this.deliveryGuarantee = checkNotNull(deliveryGuarantee);
this.buildBulkProcessorConfig = checkNotNull(buildBulkProcessorConfig);
this.networkClientConfig = checkNotNull(networkClientConfig);
}
@Override
public SinkWriter<IN> createWriter(InitContext context) throws IOException {
return new ElasticsearchWriter<>(
hosts,
emitter,
deliveryGuarantee == DeliveryGuarantee.AT_LEAST_ONCE,
buildBulkProcessorConfig,
bulkProcessorBuilderFactory,
networkClientConfig,
context.metricGroup(),
context.getMailboxExecutor());
}
@VisibleForTesting
DeliveryGuarantee getDeliveryGuarantee() {
return deliveryGuarantee;
}
}
| 5,804 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/sink/ElasticsearchEmitter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.api.common.functions.Function;
import org.apache.flink.api.connector.sink2.SinkWriter;
import org.elasticsearch.action.ActionRequest;
/**
* Creates none or multiple {@link ActionRequest ActionRequests} from the incoming elements.
*
* <p>This is used by sinks to prepare elements for sending them to Elasticsearch.
*
* <p>Example:
*
* <pre>{@code
* private static class TestElasticsearchEmitter implements ElasticsearchEmitter<Tuple2<Integer, String>> {
*
* public IndexRequest createIndexRequest(Tuple2<Integer, String> element) {
* Map<String, Object> document = new HashMap<>();
* document.put("data", element.f1);
*
* return Requests.indexRequest()
* .index("my-index")
* .type("my-type")
* .id(element.f0.toString())
* .source(document);
* }
*
* public void emit(Tuple2<Integer, String> element, RequestIndexer indexer) {
* indexer.add(createIndexRequest(element));
* }
* }
*
* }</pre>
*
* @param <T> The type of the element handled by this {@link ElasticsearchEmitter}
*/
@PublicEvolving
public interface ElasticsearchEmitter<T> extends Function {
/**
* Initialization method for the function. It is called once before the actual working process
* methods.
*/
default void open() throws Exception {}
/** Tear-down method for the function. It is called when the sink closes. */
default void close() throws Exception {}
/**
* Process the incoming element to produce multiple {@link ActionRequest ActionRequests}. The
* produced requests should be added to the provided {@link RequestIndexer}.
*
* @param element incoming element to process
* @param context to access additional information about the record
* @param indexer request indexer that {@code ActionRequest} should be added to
*/
void emit(T element, SinkWriter.Context context, RequestIndexer indexer);
}
| 5,805 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/sink/ElasticsearchWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.api.common.operators.MailboxExecutor;
import org.apache.flink.api.connector.sink2.SinkWriter;
import org.apache.flink.metrics.Counter;
import org.apache.flink.metrics.groups.SinkWriterMetricGroup;
import org.apache.flink.util.FlinkRuntimeException;
import org.apache.flink.util.function.ThrowingRunnable;
import org.apache.http.HttpHost;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.impl.client.BasicCredentialsProvider;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.rest.RestStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.List;
import static org.apache.flink.util.ExceptionUtils.firstOrSuppressed;
import static org.apache.flink.util.Preconditions.checkNotNull;
class ElasticsearchWriter<IN> implements SinkWriter<IN> {
private static final Logger LOG = LoggerFactory.getLogger(ElasticsearchWriter.class);
private final ElasticsearchEmitter<? super IN> emitter;
private final MailboxExecutor mailboxExecutor;
private final boolean flushOnCheckpoint;
private final BulkProcessor bulkProcessor;
private final RestHighLevelClient client;
private final RequestIndexer requestIndexer;
private final Counter numBytesOutCounter;
private long pendingActions = 0;
private boolean checkpointInProgress = false;
private volatile long lastSendTime = 0;
private volatile long ackTime = Long.MAX_VALUE;
private volatile boolean closed = false;
/**
* Constructor creating an elasticsearch writer.
*
* @param hosts the reachable elasticsearch cluster nodes
* @param emitter converting incoming records to elasticsearch actions
* @param flushOnCheckpoint if true all until now received records are flushed after every
* checkpoint
* @param bulkProcessorConfig describing the flushing and failure handling of the used {@link
* BulkProcessor}
* @param bulkProcessorBuilderFactory configuring the {@link BulkProcessor}'s builder
* @param networkClientConfig describing properties of the network connection used to connect to
* the elasticsearch cluster
* @param metricGroup for the sink writer
* @param mailboxExecutor Flink's mailbox executor
*/
ElasticsearchWriter(
List<HttpHost> hosts,
ElasticsearchEmitter<? super IN> emitter,
boolean flushOnCheckpoint,
BulkProcessorConfig bulkProcessorConfig,
BulkProcessorBuilderFactory bulkProcessorBuilderFactory,
NetworkClientConfig networkClientConfig,
SinkWriterMetricGroup metricGroup,
MailboxExecutor mailboxExecutor) {
this.emitter = checkNotNull(emitter);
this.flushOnCheckpoint = flushOnCheckpoint;
this.mailboxExecutor = checkNotNull(mailboxExecutor);
this.client =
new RestHighLevelClient(
configureRestClientBuilder(
RestClient.builder(hosts.toArray(new HttpHost[0])),
networkClientConfig));
this.bulkProcessor = createBulkProcessor(bulkProcessorBuilderFactory, bulkProcessorConfig);
this.requestIndexer = new DefaultRequestIndexer(metricGroup.getNumRecordsSendCounter());
checkNotNull(metricGroup);
metricGroup.setCurrentSendTimeGauge(() -> ackTime - lastSendTime);
this.numBytesOutCounter = metricGroup.getIOMetricGroup().getNumBytesOutCounter();
try {
emitter.open();
} catch (Exception e) {
throw new FlinkRuntimeException("Failed to open the ElasticsearchEmitter", e);
}
}
@Override
public void write(IN element, Context context) throws IOException, InterruptedException {
// do not allow new bulk writes until all actions are flushed
while (checkpointInProgress) {
mailboxExecutor.yield();
}
emitter.emit(element, context, requestIndexer);
}
@Override
public void flush(boolean endOfInput) throws IOException, InterruptedException {
checkpointInProgress = true;
while (pendingActions != 0 && (flushOnCheckpoint || endOfInput)) {
bulkProcessor.flush();
LOG.info("Waiting for the response of {} pending actions.", pendingActions);
mailboxExecutor.yield();
}
checkpointInProgress = false;
}
@VisibleForTesting
void blockingFlushAllActions() throws InterruptedException {
while (pendingActions != 0) {
bulkProcessor.flush();
LOG.info("Waiting for the response of {} pending actions.", pendingActions);
mailboxExecutor.yield();
}
}
@Override
public void close() throws Exception {
closed = true;
emitter.close();
bulkProcessor.close();
client.close();
}
private static RestClientBuilder configureRestClientBuilder(
RestClientBuilder builder, NetworkClientConfig networkClientConfig) {
if (networkClientConfig.getConnectionPathPrefix() != null) {
builder.setPathPrefix(networkClientConfig.getConnectionPathPrefix());
}
if (networkClientConfig.getPassword() != null
&& networkClientConfig.getUsername() != null) {
final CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
credentialsProvider.setCredentials(
AuthScope.ANY,
new UsernamePasswordCredentials(
networkClientConfig.getUsername(), networkClientConfig.getPassword()));
builder.setHttpClientConfigCallback(
httpClientBuilder ->
httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider));
}
if (networkClientConfig.getConnectionRequestTimeout() != null
|| networkClientConfig.getConnectionTimeout() != null
|| networkClientConfig.getSocketTimeout() != null) {
builder.setRequestConfigCallback(
requestConfigBuilder -> {
if (networkClientConfig.getConnectionRequestTimeout() != null) {
requestConfigBuilder.setConnectionRequestTimeout(
networkClientConfig.getConnectionRequestTimeout());
}
if (networkClientConfig.getConnectionTimeout() != null) {
requestConfigBuilder.setConnectTimeout(
networkClientConfig.getConnectionTimeout());
}
if (networkClientConfig.getSocketTimeout() != null) {
requestConfigBuilder.setSocketTimeout(
networkClientConfig.getSocketTimeout());
}
return requestConfigBuilder;
});
}
return builder;
}
private BulkProcessor createBulkProcessor(
BulkProcessorBuilderFactory bulkProcessorBuilderFactory,
BulkProcessorConfig bulkProcessorConfig) {
BulkProcessor.Builder builder =
bulkProcessorBuilderFactory.apply(client, bulkProcessorConfig, new BulkListener());
// This makes flush() blocking
builder.setConcurrentRequests(0);
return builder.build();
}
private class BulkListener implements BulkProcessor.Listener {
@Override
public void beforeBulk(long executionId, BulkRequest request) {
LOG.info("Sending bulk of {} actions to Elasticsearch.", request.numberOfActions());
lastSendTime = System.currentTimeMillis();
numBytesOutCounter.inc(request.estimatedSizeInBytes());
}
@Override
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
ackTime = System.currentTimeMillis();
enqueueActionInMailbox(
() -> extractFailures(request, response), "elasticsearchSuccessCallback");
}
@Override
public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
enqueueActionInMailbox(
() -> {
throw new FlinkRuntimeException("Complete bulk has failed.", failure);
},
"elasticsearchErrorCallback");
}
}
private void enqueueActionInMailbox(
ThrowingRunnable<? extends Exception> action, String actionName) {
// If the writer is cancelled before the last bulk response (i.e. no flush on checkpoint
// configured or shutdown without a final
// checkpoint) the mailbox might already be shutdown, so we should not enqueue any
// actions.
if (isClosed()) {
return;
}
mailboxExecutor.execute(action, actionName);
}
private void extractFailures(BulkRequest request, BulkResponse response) {
if (!response.hasFailures()) {
pendingActions -= request.numberOfActions();
return;
}
Throwable chainedFailures = null;
for (int i = 0; i < response.getItems().length; i++) {
final BulkItemResponse itemResponse = response.getItems()[i];
if (!itemResponse.isFailed()) {
continue;
}
final Throwable failure = itemResponse.getFailure().getCause();
if (failure == null) {
continue;
}
final RestStatus restStatus = itemResponse.getFailure().getStatus();
final DocWriteRequest<?> actionRequest = request.requests().get(i);
chainedFailures =
firstOrSuppressed(
wrapException(restStatus, failure, actionRequest), chainedFailures);
}
if (chainedFailures == null) {
return;
}
throw new FlinkRuntimeException(chainedFailures);
}
private static Throwable wrapException(
RestStatus restStatus, Throwable rootFailure, DocWriteRequest<?> actionRequest) {
if (restStatus == null) {
return new FlinkRuntimeException(
String.format("Single action %s of bulk request failed.", actionRequest),
rootFailure);
} else {
return new FlinkRuntimeException(
String.format(
"Single action %s of bulk request failed with status %s.",
actionRequest, restStatus.getStatus()),
rootFailure);
}
}
private boolean isClosed() {
if (closed) {
LOG.warn("Writer was closed before all records were acknowledged by Elasticsearch.");
}
return closed;
}
private class DefaultRequestIndexer implements RequestIndexer {
private final Counter numRecordsSendCounter;
public DefaultRequestIndexer(Counter numRecordsSendCounter) {
this.numRecordsSendCounter = checkNotNull(numRecordsSendCounter);
}
@Override
public void add(DeleteRequest... deleteRequests) {
for (final DeleteRequest deleteRequest : deleteRequests) {
numRecordsSendCounter.inc();
pendingActions++;
bulkProcessor.add(deleteRequest);
}
}
@Override
public void add(IndexRequest... indexRequests) {
for (final IndexRequest indexRequest : indexRequests) {
numRecordsSendCounter.inc();
pendingActions++;
bulkProcessor.add(indexRequest);
}
}
@Override
public void add(UpdateRequest... updateRequests) {
for (final UpdateRequest updateRequest : updateRequests) {
numRecordsSendCounter.inc();
pendingActions++;
bulkProcessor.add(updateRequest);
}
}
}
}
| 5,806 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/sink/MapElasticsearchEmitter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import org.apache.flink.api.connector.sink2.SinkWriter;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
import javax.annotation.Nullable;
import java.util.Map;
import java.util.function.Function;
import static org.apache.flink.util.Preconditions.checkNotNull;
/** A simple ElasticsearchEmitter which is currently used in PyFlink ES connector. */
public class MapElasticsearchEmitter implements ElasticsearchEmitter<Map<String, Object>> {
private static final long serialVersionUID = 1L;
private final String index;
private @Nullable final String documentType;
private @Nullable final String idFieldName;
private final boolean isDynamicIndex;
private transient Function<Map<String, Object>, String> indexProvider;
public MapElasticsearchEmitter(
String index,
@Nullable String documentType,
@Nullable String idFieldName,
boolean isDynamicIndex) {
this.index = checkNotNull(index);
this.documentType = documentType;
this.idFieldName = idFieldName;
this.isDynamicIndex = isDynamicIndex;
}
@Override
public void open() throws Exception {
if (isDynamicIndex) {
indexProvider = doc -> doc.get(index).toString();
} else {
indexProvider = doc -> index;
}
}
@Override
public void emit(Map<String, Object> doc, SinkWriter.Context context, RequestIndexer indexer) {
if (idFieldName != null) {
final UpdateRequest updateRequest =
new UpdateRequest(
indexProvider.apply(doc),
documentType,
doc.get(idFieldName).toString())
.doc(doc)
.upsert(doc);
indexer.add(updateRequest);
} else {
final IndexRequest indexRequest =
new IndexRequest(indexProvider.apply(doc), documentType).source(doc);
indexer.add(indexRequest);
}
}
}
| 5,807 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/sink/BulkProcessorConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import java.io.Serializable;
import static org.apache.flink.util.Preconditions.checkNotNull;
class BulkProcessorConfig implements Serializable {
private final int bulkFlushMaxActions;
private final int bulkFlushMaxMb;
private final long bulkFlushInterval;
private final FlushBackoffType flushBackoffType;
private final int bulkFlushBackoffRetries;
private final long bulkFlushBackOffDelay;
BulkProcessorConfig(
int bulkFlushMaxActions,
int bulkFlushMaxMb,
long bulkFlushInterval,
FlushBackoffType flushBackoffType,
int bulkFlushBackoffRetries,
long bulkFlushBackOffDelay) {
this.bulkFlushMaxActions = bulkFlushMaxActions;
this.bulkFlushMaxMb = bulkFlushMaxMb;
this.bulkFlushInterval = bulkFlushInterval;
this.flushBackoffType = checkNotNull(flushBackoffType);
this.bulkFlushBackoffRetries = bulkFlushBackoffRetries;
this.bulkFlushBackOffDelay = bulkFlushBackOffDelay;
}
public int getBulkFlushMaxActions() {
return bulkFlushMaxActions;
}
public int getBulkFlushMaxMb() {
return bulkFlushMaxMb;
}
public long getBulkFlushInterval() {
return bulkFlushInterval;
}
public FlushBackoffType getFlushBackoffType() {
return flushBackoffType;
}
public int getBulkFlushBackoffRetries() {
return bulkFlushBackoffRetries;
}
public long getBulkFlushBackOffDelay() {
return bulkFlushBackOffDelay;
}
}
| 5,808 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/sink/BulkProcessorBuilderFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import org.apache.flink.annotation.Internal;
import org.apache.flink.util.function.TriFunction;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.client.RestHighLevelClient;
import java.io.Serializable;
@Internal
interface BulkProcessorBuilderFactory
extends Serializable,
TriFunction<
RestHighLevelClient,
BulkProcessorConfig,
BulkProcessor.Listener,
BulkProcessor.Builder> {}
| 5,809 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/sink/BulkRequestConsumerFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import org.apache.flink.annotation.Internal;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import java.util.function.BiConsumer;
/**
* {@link BulkRequestConsumerFactory} is used to bridge incompatible Elasticsearch Java API calls
* across different Elasticsearch versions.
*/
@Internal
interface BulkRequestConsumerFactory
extends BiConsumer<BulkRequest, ActionListener<BulkResponse>> {}
| 5,810 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/sink/ElasticsearchSinkBuilderBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.api.common.ExecutionConfig;
import org.apache.flink.api.java.ClosureCleaner;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.util.InstantiationUtil;
import org.apache.http.HttpHost;
import java.util.Arrays;
import java.util.List;
import static org.apache.flink.util.Preconditions.checkArgument;
import static org.apache.flink.util.Preconditions.checkNotNull;
import static org.apache.flink.util.Preconditions.checkState;
/**
* Base builder to construct a {@link ElasticsearchSink}.
*
* @param <IN> type of the records converted to Elasticsearch actions
*/
@PublicEvolving
public abstract class ElasticsearchSinkBuilderBase<
IN, B extends ElasticsearchSinkBuilderBase<IN, B>> {
private int bulkFlushMaxActions = 1000;
private int bulkFlushMaxMb = -1;
private long bulkFlushInterval = -1;
private FlushBackoffType bulkFlushBackoffType = FlushBackoffType.NONE;
private int bulkFlushBackoffRetries = -1;
private long bulkFlushBackOffDelay = -1;
private DeliveryGuarantee deliveryGuarantee = DeliveryGuarantee.AT_LEAST_ONCE;
private List<HttpHost> hosts;
protected ElasticsearchEmitter<? super IN> emitter;
private String username;
private String password;
private String connectionPathPrefix;
private Integer connectionTimeout;
private Integer connectionRequestTimeout;
private Integer socketTimeout;
protected ElasticsearchSinkBuilderBase() {}
@SuppressWarnings("unchecked")
protected <S extends ElasticsearchSinkBuilderBase<?, ?>> S self() {
return (S) this;
}
/**
* Sets the emitter which is invoked on every record to convert it to Elasticsearch actions.
*
* @param emitter to process records into Elasticsearch actions.
* @return this builder
*/
public <T extends IN> ElasticsearchSinkBuilderBase<T, ?> setEmitter(
ElasticsearchEmitter<? super T> emitter) {
checkNotNull(emitter);
checkState(
InstantiationUtil.isSerializable(emitter),
"The elasticsearch emitter must be serializable.");
final ElasticsearchSinkBuilderBase<T, ?> self = self();
self.emitter = emitter;
return self;
}
/**
* Sets the hosts where the Elasticsearch cluster nodes are reachable.
*
* @param hosts http addresses describing the node locations
* @return this builder
*/
public B setHosts(HttpHost... hosts) {
checkNotNull(hosts);
checkState(hosts.length > 0, "Hosts cannot be empty.");
this.hosts = Arrays.asList(hosts);
return self();
}
/**
* Sets the wanted {@link DeliveryGuarantee}. The default delivery guarantee is {@link
* DeliveryGuarantee#NONE}
*
* @param deliveryGuarantee which describes the record emission behaviour
* @return this builder
*/
public B setDeliveryGuarantee(DeliveryGuarantee deliveryGuarantee) {
checkState(
deliveryGuarantee != DeliveryGuarantee.EXACTLY_ONCE,
"Elasticsearch sink does not support the EXACTLY_ONCE guarantee.");
this.deliveryGuarantee = checkNotNull(deliveryGuarantee);
return self();
}
/**
* Sets the maximum number of actions to buffer for each bulk request. You can pass -1 to
* disable it. The default flush size 1000.
*
* @param numMaxActions the maximum number of actions to buffer per bulk request.
* @return this builder
*/
public B setBulkFlushMaxActions(int numMaxActions) {
checkState(
numMaxActions == -1 || numMaxActions > 0,
"Max number of buffered actions must be larger than 0.");
this.bulkFlushMaxActions = numMaxActions;
return self();
}
/**
* Sets the maximum size of buffered actions, in mb, per bulk request. You can pass -1 to
* disable it.
*
* @param maxSizeMb the maximum size of buffered actions, in mb.
* @return this builder
*/
public B setBulkFlushMaxSizeMb(int maxSizeMb) {
checkState(
maxSizeMb == -1 || maxSizeMb > 0,
"Max size of buffered actions must be larger than 0.");
this.bulkFlushMaxMb = maxSizeMb;
return self();
}
/**
* Sets the bulk flush interval, in milliseconds. You can pass -1 to disable it.
*
* @param intervalMillis the bulk flush interval, in milliseconds.
* @return this builder
*/
public B setBulkFlushInterval(long intervalMillis) {
checkState(
intervalMillis == -1 || intervalMillis >= 0,
"Interval (in milliseconds) between each flush must be larger than "
+ "or equal to 0.");
this.bulkFlushInterval = intervalMillis;
return self();
}
/**
* Sets the type of back off to use when flushing bulk requests. The default bulk flush back off
* type is {@link FlushBackoffType#NONE}.
*
* <p>Sets the amount of delay between each backoff attempt when flushing bulk requests, in
* milliseconds.
*
* <p>Sets the maximum number of retries for a backoff attempt when flushing bulk requests.
*
* @param flushBackoffType the backoff type to use.
* @return this builder
*/
public B setBulkFlushBackoffStrategy(
FlushBackoffType flushBackoffType, int maxRetries, long delayMillis) {
this.bulkFlushBackoffType = checkNotNull(flushBackoffType);
checkState(
flushBackoffType != FlushBackoffType.NONE,
"FlushBackoffType#NONE does not require a configuration it is the default, retries and delay are ignored.");
checkState(maxRetries > 0, "Max number of backoff attempts must be larger than 0.");
this.bulkFlushBackoffRetries = maxRetries;
checkState(
delayMillis >= 0,
"Delay (in milliseconds) between each backoff attempt must be larger "
+ "than or equal to 0.");
this.bulkFlushBackOffDelay = delayMillis;
return self();
}
/**
* Sets the username used to authenticate the connection with the Elasticsearch cluster.
*
* @param username of the Elasticsearch cluster user
* @return this builder
*/
public B setConnectionUsername(String username) {
checkNotNull(username);
this.username = username;
return self();
}
/**
* Sets the password used to authenticate the connection with the Elasticsearch cluster.
*
* @param password of the Elasticsearch cluster user
* @return this builder
*/
public B setConnectionPassword(String password) {
checkNotNull(password);
this.password = password;
return self();
}
/**
* Sets a prefix which used for every REST communication to the Elasticsearch cluster.
*
* @param prefix for the communication
* @return this builder
*/
public B setConnectionPathPrefix(String prefix) {
checkNotNull(prefix);
this.connectionPathPrefix = prefix;
return self();
}
/**
* Sets the timeout for requesting the connection of the Elasticsearch cluster from the
* connection manager.
*
* @param timeout for the connection request
* @return this builder
*/
public B setConnectionRequestTimeout(int timeout) {
checkState(timeout >= 0, "Connection request timeout must be larger than or equal to 0.");
this.connectionRequestTimeout = timeout;
return self();
}
/**
* Sets the timeout for establishing a connection of the Elasticsearch cluster.
*
* @param timeout for the connection
* @return this builder
*/
public B setConnectionTimeout(int timeout) {
checkState(timeout >= 0, "Connection timeout must be larger than or equal to 0.");
this.connectionTimeout = timeout;
return self();
}
/**
* Sets the timeout for waiting for data or, put differently, a maximum period inactivity
* between two consecutive data packets.
*
* @param timeout for the socket
* @return this builder
*/
public B setSocketTimeout(int timeout) {
checkState(timeout >= 0, "Socket timeout must be larger than or equal to 0.");
this.socketTimeout = timeout;
return self();
}
protected abstract BulkProcessorBuilderFactory getBulkProcessorBuilderFactory();
/**
* Constructs the {@link ElasticsearchSink} with the properties configured this builder.
*
* @return {@link ElasticsearchSink}
*/
public ElasticsearchSink<IN> build() {
checkNotNull(emitter);
checkNotNull(hosts);
NetworkClientConfig networkClientConfig = buildNetworkClientConfig();
BulkProcessorConfig bulkProcessorConfig = buildBulkProcessorConfig();
BulkProcessorBuilderFactory bulkProcessorBuilderFactory = getBulkProcessorBuilderFactory();
ClosureCleaner.clean(
bulkProcessorBuilderFactory, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
return new ElasticsearchSink<>(
hosts,
emitter,
deliveryGuarantee,
bulkProcessorBuilderFactory,
bulkProcessorConfig,
networkClientConfig);
}
private NetworkClientConfig buildNetworkClientConfig() {
checkArgument(!hosts.isEmpty(), "Hosts cannot be empty.");
return new NetworkClientConfig(
username,
password,
connectionPathPrefix,
connectionRequestTimeout,
connectionTimeout,
socketTimeout);
}
private BulkProcessorConfig buildBulkProcessorConfig() {
return new BulkProcessorConfig(
bulkFlushMaxActions,
bulkFlushMaxMb,
bulkFlushInterval,
bulkFlushBackoffType,
bulkFlushBackoffRetries,
bulkFlushBackOffDelay);
}
@Override
public String toString() {
return "ElasticsearchSinkBuilder{"
+ "bulkFlushMaxActions="
+ bulkFlushMaxActions
+ ", bulkFlushMaxMb="
+ bulkFlushMaxMb
+ ", bulkFlushInterval="
+ bulkFlushInterval
+ ", bulkFlushBackoffType="
+ bulkFlushBackoffType
+ ", bulkFlushBackoffRetries="
+ bulkFlushBackoffRetries
+ ", bulkFlushBackOffDelay="
+ bulkFlushBackOffDelay
+ ", deliveryGuarantee="
+ deliveryGuarantee
+ ", hosts="
+ hosts
+ ", emitter="
+ emitter
+ ", username='"
+ username
+ '\''
+ ", password='"
+ password
+ '\''
+ ", connectionPathPrefix='"
+ connectionPathPrefix
+ '\''
+ '}';
}
}
| 5,811 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/table/KeyExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.types.logical.DistinctType;
import org.apache.flink.table.types.logical.LogicalType;
import org.apache.flink.util.function.SerializableFunction;
import java.io.Serializable;
import java.time.Duration;
import java.time.LocalDate;
import java.time.LocalTime;
import java.time.Period;
import java.util.List;
/** An extractor for a Elasticsearch key from a {@link RowData}. */
@Internal
class KeyExtractor implements SerializableFunction<RowData, String> {
private final FieldFormatter[] fieldFormatters;
private final String keyDelimiter;
private interface FieldFormatter extends Serializable {
String format(RowData rowData);
}
private KeyExtractor(FieldFormatter[] fieldFormatters, String keyDelimiter) {
this.fieldFormatters = fieldFormatters;
this.keyDelimiter = keyDelimiter;
}
@Override
public String apply(RowData rowData) {
final StringBuilder builder = new StringBuilder();
for (int i = 0; i < fieldFormatters.length; i++) {
if (i > 0) {
builder.append(keyDelimiter);
}
final String value = fieldFormatters[i].format(rowData);
builder.append(value);
}
return builder.toString();
}
public static SerializableFunction<RowData, String> createKeyExtractor(
List<LogicalTypeWithIndex> primaryKeyTypesWithIndex, String keyDelimiter) {
if (!primaryKeyTypesWithIndex.isEmpty()) {
FieldFormatter[] formatters =
primaryKeyTypesWithIndex.stream()
.map(
logicalTypeWithIndex ->
toFormatter(
logicalTypeWithIndex.index,
logicalTypeWithIndex.logicalType))
.toArray(FieldFormatter[]::new);
return new KeyExtractor(formatters, keyDelimiter);
} else {
return (row) -> null;
}
}
private static FieldFormatter toFormatter(int index, LogicalType type) {
switch (type.getTypeRoot()) {
case DATE:
return (row) -> LocalDate.ofEpochDay(row.getInt(index)).toString();
case TIME_WITHOUT_TIME_ZONE:
return (row) ->
LocalTime.ofNanoOfDay((long) row.getInt(index) * 1_000_000L).toString();
case INTERVAL_YEAR_MONTH:
return (row) -> Period.ofDays(row.getInt(index)).toString();
case INTERVAL_DAY_TIME:
return (row) -> Duration.ofMillis(row.getLong(index)).toString();
case DISTINCT_TYPE:
return toFormatter(index, ((DistinctType) type).getSourceType());
default:
RowData.FieldGetter fieldGetter = RowData.createFieldGetter(type, index);
return (row) -> fieldGetter.getFieldOrNull(row).toString();
}
}
}
| 5,812 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/table/ElasticsearchConnectorOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.ConfigOptions;
import org.apache.flink.configuration.MemorySize;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.elasticsearch.sink.FlushBackoffType;
import java.time.Duration;
import java.util.List;
/**
* Base options for the Elasticsearch connector. Needs to be public so that the {@link
* org.apache.flink.table.api.TableDescriptor} can access it.
*/
@PublicEvolving
public class ElasticsearchConnectorOptions {
ElasticsearchConnectorOptions() {}
public static final ConfigOption<List<String>> HOSTS_OPTION =
ConfigOptions.key("hosts")
.stringType()
.asList()
.noDefaultValue()
.withDescription("Elasticsearch hosts to connect to.");
public static final ConfigOption<String> INDEX_OPTION =
ConfigOptions.key("index")
.stringType()
.noDefaultValue()
.withDescription("Elasticsearch index for every record.");
public static final ConfigOption<String> PASSWORD_OPTION =
ConfigOptions.key("password")
.stringType()
.noDefaultValue()
.withDescription("Password used to connect to Elasticsearch instance.");
public static final ConfigOption<String> USERNAME_OPTION =
ConfigOptions.key("username")
.stringType()
.noDefaultValue()
.withDescription("Username used to connect to Elasticsearch instance.");
public static final ConfigOption<String> KEY_DELIMITER_OPTION =
ConfigOptions.key("document-id.key-delimiter")
.stringType()
.defaultValue("_")
.withDescription(
"Delimiter for composite keys e.g., \"$\" would result in IDs \"KEY1$KEY2$KEY3\".");
public static final ConfigOption<Integer> BULK_FLUSH_MAX_ACTIONS_OPTION =
ConfigOptions.key("sink.bulk-flush.max-actions")
.intType()
.defaultValue(1000)
.withDescription("Maximum number of actions to buffer for each bulk request.");
public static final ConfigOption<MemorySize> BULK_FLUSH_MAX_SIZE_OPTION =
ConfigOptions.key("sink.bulk-flush.max-size")
.memoryType()
.defaultValue(MemorySize.parse("2mb"))
.withDescription("Maximum size of buffered actions per bulk request");
public static final ConfigOption<Duration> BULK_FLUSH_INTERVAL_OPTION =
ConfigOptions.key("sink.bulk-flush.interval")
.durationType()
.defaultValue(Duration.ofSeconds(1))
.withDescription("Bulk flush interval");
public static final ConfigOption<FlushBackoffType> BULK_FLUSH_BACKOFF_TYPE_OPTION =
ConfigOptions.key("sink.bulk-flush.backoff.strategy")
.enumType(FlushBackoffType.class)
.noDefaultValue()
.withDescription("Backoff strategy");
public static final ConfigOption<Integer> BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION =
ConfigOptions.key("sink.bulk-flush.backoff.max-retries")
.intType()
.noDefaultValue()
.withDescription("Maximum number of retries.");
public static final ConfigOption<Duration> BULK_FLUSH_BACKOFF_DELAY_OPTION =
ConfigOptions.key("sink.bulk-flush.backoff.delay")
.durationType()
.noDefaultValue()
.withDescription("Delay between each backoff attempt.");
public static final ConfigOption<String> CONNECTION_PATH_PREFIX_OPTION =
ConfigOptions.key("connection.path-prefix")
.stringType()
.noDefaultValue()
.withDescription("Prefix string to be added to every REST communication.");
public static final ConfigOption<Duration> CONNECTION_REQUEST_TIMEOUT =
ConfigOptions.key("connection.request-timeout")
.durationType()
.noDefaultValue()
.withDescription(
"The timeout for requesting a connection from the connection manager.");
public static final ConfigOption<Duration> CONNECTION_TIMEOUT =
ConfigOptions.key("connection.timeout")
.durationType()
.noDefaultValue()
.withDescription("The timeout for establishing a connection.");
public static final ConfigOption<Duration> SOCKET_TIMEOUT =
ConfigOptions.key("socket.timeout")
.durationType()
.noDefaultValue()
.withDescription(
"The socket timeout (SO_TIMEOUT) for waiting for data or, put differently,"
+ "a maximum period inactivity between two consecutive data packets.");
public static final ConfigOption<String> FORMAT_OPTION =
ConfigOptions.key("format")
.stringType()
.defaultValue("json")
.withDescription(
"The format must produce a valid JSON document. "
+ "Please refer to the documentation on formats for more details.");
public static final ConfigOption<DeliveryGuarantee> DELIVERY_GUARANTEE_OPTION =
ConfigOptions.key("sink.delivery-guarantee")
.enumType(DeliveryGuarantee.class)
.defaultValue(DeliveryGuarantee.AT_LEAST_ONCE)
.withDescription("Optional delivery guarantee when committing.");
}
| 5,813 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/table/ElasticsearchDynamicSinkFactoryBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.serialization.SerializationSchema;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.ReadableConfig;
import org.apache.flink.table.api.ValidationException;
import org.apache.flink.table.api.config.TableConfigOptions;
import org.apache.flink.table.catalog.Column;
import org.apache.flink.table.catalog.ResolvedSchema;
import org.apache.flink.table.connector.Projection;
import org.apache.flink.table.connector.format.EncodingFormat;
import org.apache.flink.table.connector.sink.DynamicTableSink;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.factories.DynamicTableSinkFactory;
import org.apache.flink.table.factories.FactoryUtil;
import org.apache.flink.table.factories.SerializationFormatFactory;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.types.logical.LogicalType;
import org.apache.flink.util.StringUtils;
import javax.annotation.Nullable;
import java.time.ZoneId;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_DELAY_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_TYPE_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_INTERVAL_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_MAX_ACTIONS_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_MAX_SIZE_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.CONNECTION_PATH_PREFIX_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.CONNECTION_REQUEST_TIMEOUT;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.CONNECTION_TIMEOUT;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.DELIVERY_GUARANTEE_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.FORMAT_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.HOSTS_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.INDEX_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.KEY_DELIMITER_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.PASSWORD_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.SOCKET_TIMEOUT;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.USERNAME_OPTION;
import static org.apache.flink.table.factories.FactoryUtil.SINK_PARALLELISM;
import static org.apache.flink.util.Preconditions.checkNotNull;
import static org.elasticsearch.common.Strings.capitalize;
/** A {@link DynamicTableSinkFactory} for discovering ElasticsearchDynamicSink. */
@Internal
abstract class ElasticsearchDynamicSinkFactoryBase implements DynamicTableSinkFactory {
private final String factoryIdentifier;
private final ElasticsearchSinkBuilderSupplier<RowData> sinkBuilderSupplier;
public ElasticsearchDynamicSinkFactoryBase(
String factoryIdentifier,
ElasticsearchSinkBuilderSupplier<RowData> sinkBuilderSupplier) {
this.factoryIdentifier = checkNotNull(factoryIdentifier);
this.sinkBuilderSupplier = checkNotNull(sinkBuilderSupplier);
}
@Nullable
String getDocumentType(ElasticsearchConfiguration configuration) {
return null; // document type is only set in Elasticsearch versions < 7
}
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
List<LogicalTypeWithIndex> primaryKeyLogicalTypesWithIndex =
getPrimaryKeyLogicalTypesWithIndex(context);
final FactoryUtil.TableFactoryHelper helper =
FactoryUtil.createTableFactoryHelper(this, context);
EncodingFormat<SerializationSchema<RowData>> format =
helper.discoverEncodingFormat(SerializationFormatFactory.class, FORMAT_OPTION);
ElasticsearchConfiguration config = getConfiguration(helper);
helper.validate();
validateConfiguration(config);
return new ElasticsearchDynamicSink(
format,
config,
primaryKeyLogicalTypesWithIndex,
context.getPhysicalRowDataType(),
capitalize(factoryIdentifier),
sinkBuilderSupplier,
getDocumentType(config),
getLocalTimeZoneId(context.getConfiguration()));
}
ElasticsearchConfiguration getConfiguration(FactoryUtil.TableFactoryHelper helper) {
return new ElasticsearchConfiguration(helper.getOptions());
}
ZoneId getLocalTimeZoneId(ReadableConfig readableConfig) {
final String zone = readableConfig.get(TableConfigOptions.LOCAL_TIME_ZONE);
final ZoneId zoneId =
TableConfigOptions.LOCAL_TIME_ZONE.defaultValue().equals(zone)
? ZoneId.systemDefault()
: ZoneId.of(zone);
return zoneId;
}
void validateConfiguration(ElasticsearchConfiguration config) {
config.getHosts(); // validate hosts
validate(
config.getIndex().length() >= 1,
() -> String.format("'%s' must not be empty", INDEX_OPTION.key()));
int maxActions = config.getBulkFlushMaxActions();
validate(
maxActions == -1 || maxActions >= 1,
() ->
String.format(
"'%s' must be at least 1. Got: %s",
BULK_FLUSH_MAX_ACTIONS_OPTION.key(), maxActions));
long maxSize = config.getBulkFlushMaxByteSize().getBytes();
long mb1 = 1024 * 1024;
validate(
maxSize == -1 || (maxSize >= mb1 && maxSize % mb1 == 0),
() ->
String.format(
"'%s' must be in MB granularity. Got: %s",
BULK_FLUSH_MAX_SIZE_OPTION.key(),
config.getBulkFlushMaxByteSize().toHumanReadableString()));
validate(
config.getBulkFlushBackoffRetries().map(retries -> retries >= 1).orElse(true),
() ->
String.format(
"'%s' must be at least 1. Got: %s",
BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION.key(),
config.getBulkFlushBackoffRetries().get()));
if (config.getUsername().isPresent()
&& !StringUtils.isNullOrWhitespaceOnly(config.getUsername().get())) {
validate(
config.getPassword().isPresent()
&& !StringUtils.isNullOrWhitespaceOnly(config.getPassword().get()),
() ->
String.format(
"'%s' and '%s' must be set at the same time. Got: username '%s' and password '%s'",
USERNAME_OPTION.key(),
PASSWORD_OPTION.key(),
config.getUsername().get(),
config.getPassword().orElse("")));
}
}
static void validate(boolean condition, Supplier<String> message) {
if (!condition) {
throw new ValidationException(message.get());
}
}
List<LogicalTypeWithIndex> getPrimaryKeyLogicalTypesWithIndex(Context context) {
DataType physicalRowDataType = context.getPhysicalRowDataType();
int[] primaryKeyIndexes = context.getPrimaryKeyIndexes();
if (primaryKeyIndexes.length != 0) {
DataType pkDataType = Projection.of(primaryKeyIndexes).project(physicalRowDataType);
ElasticsearchValidationUtils.validatePrimaryKey(pkDataType);
}
ResolvedSchema resolvedSchema = context.getCatalogTable().getResolvedSchema();
return Arrays.stream(primaryKeyIndexes)
.mapToObj(
index -> {
Optional<Column> column = resolvedSchema.getColumn(index);
if (!column.isPresent()) {
throw new IllegalStateException(
String.format(
"No primary key column found with index '%s'.",
index));
}
LogicalType logicalType = column.get().getDataType().getLogicalType();
return new LogicalTypeWithIndex(index, logicalType);
})
.collect(Collectors.toList());
}
@Override
public Set<ConfigOption<?>> requiredOptions() {
return Stream.of(HOSTS_OPTION, INDEX_OPTION).collect(Collectors.toSet());
}
@Override
public Set<ConfigOption<?>> optionalOptions() {
return Stream.of(
KEY_DELIMITER_OPTION,
BULK_FLUSH_MAX_SIZE_OPTION,
BULK_FLUSH_MAX_ACTIONS_OPTION,
BULK_FLUSH_INTERVAL_OPTION,
BULK_FLUSH_BACKOFF_TYPE_OPTION,
BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION,
BULK_FLUSH_BACKOFF_DELAY_OPTION,
CONNECTION_PATH_PREFIX_OPTION,
CONNECTION_REQUEST_TIMEOUT,
CONNECTION_TIMEOUT,
SOCKET_TIMEOUT,
FORMAT_OPTION,
DELIVERY_GUARANTEE_OPTION,
PASSWORD_OPTION,
USERNAME_OPTION,
SINK_PARALLELISM)
.collect(Collectors.toSet());
}
@Override
public Set<ConfigOption<?>> forwardOptions() {
return Stream.of(
HOSTS_OPTION,
INDEX_OPTION,
PASSWORD_OPTION,
USERNAME_OPTION,
KEY_DELIMITER_OPTION,
BULK_FLUSH_MAX_ACTIONS_OPTION,
BULK_FLUSH_MAX_SIZE_OPTION,
BULK_FLUSH_INTERVAL_OPTION,
BULK_FLUSH_BACKOFF_TYPE_OPTION,
BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION,
BULK_FLUSH_BACKOFF_DELAY_OPTION,
CONNECTION_PATH_PREFIX_OPTION,
CONNECTION_REQUEST_TIMEOUT,
CONNECTION_TIMEOUT,
SOCKET_TIMEOUT)
.collect(Collectors.toSet());
}
@Override
public String factoryIdentifier() {
return factoryIdentifier;
}
}
| 5,814 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/table/ElasticsearchConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.configuration.MemorySize;
import org.apache.flink.configuration.ReadableConfig;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.elasticsearch.sink.FlushBackoffType;
import org.apache.flink.table.api.ValidationException;
import org.apache.http.HttpHost;
import java.time.Duration;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_DELAY_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_TYPE_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_INTERVAL_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_MAX_ACTIONS_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_MAX_SIZE_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.CONNECTION_PATH_PREFIX_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.CONNECTION_REQUEST_TIMEOUT;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.CONNECTION_TIMEOUT;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.DELIVERY_GUARANTEE_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.HOSTS_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.INDEX_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.KEY_DELIMITER_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.PASSWORD_OPTION;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.SOCKET_TIMEOUT;
import static org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.USERNAME_OPTION;
import static org.apache.flink.table.factories.FactoryUtil.SINK_PARALLELISM;
import static org.apache.flink.util.Preconditions.checkNotNull;
/** Elasticsearch base configuration. */
@Internal
class ElasticsearchConfiguration {
protected final ReadableConfig config;
ElasticsearchConfiguration(ReadableConfig config) {
this.config = checkNotNull(config);
}
public int getBulkFlushMaxActions() {
return config.get(BULK_FLUSH_MAX_ACTIONS_OPTION);
}
public MemorySize getBulkFlushMaxByteSize() {
return config.get(BULK_FLUSH_MAX_SIZE_OPTION);
}
public long getBulkFlushInterval() {
return config.get(BULK_FLUSH_INTERVAL_OPTION).toMillis();
}
public DeliveryGuarantee getDeliveryGuarantee() {
return config.get(DELIVERY_GUARANTEE_OPTION);
}
public Optional<String> getUsername() {
return config.getOptional(USERNAME_OPTION);
}
public Optional<String> getPassword() {
return config.getOptional(PASSWORD_OPTION);
}
public Optional<FlushBackoffType> getBulkFlushBackoffType() {
return config.getOptional(BULK_FLUSH_BACKOFF_TYPE_OPTION);
}
public Optional<Integer> getBulkFlushBackoffRetries() {
return config.getOptional(BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION);
}
public Optional<Long> getBulkFlushBackoffDelay() {
return config.getOptional(BULK_FLUSH_BACKOFF_DELAY_OPTION).map(Duration::toMillis);
}
public String getIndex() {
return config.get(INDEX_OPTION);
}
public String getKeyDelimiter() {
return config.get(KEY_DELIMITER_OPTION);
}
public Optional<String> getPathPrefix() {
return config.getOptional(CONNECTION_PATH_PREFIX_OPTION);
}
public Optional<Duration> getConnectionRequestTimeout() {
return config.getOptional(CONNECTION_REQUEST_TIMEOUT);
}
public Optional<Duration> getConnectionTimeout() {
return config.getOptional(CONNECTION_TIMEOUT);
}
public Optional<Duration> getSocketTimeout() {
return config.getOptional(SOCKET_TIMEOUT);
}
public List<HttpHost> getHosts() {
return config.get(HOSTS_OPTION).stream()
.map(ElasticsearchConfiguration::validateAndParseHostsString)
.collect(Collectors.toList());
}
public Optional<Integer> getParallelism() {
return config.getOptional(SINK_PARALLELISM);
}
private static HttpHost validateAndParseHostsString(String host) {
try {
HttpHost httpHost = HttpHost.create(host);
if (httpHost.getPort() < 0) {
throw new ValidationException(
String.format(
"Could not parse host '%s' in option '%s'. It should follow the format 'http://host_name:port'. Missing port.",
host, HOSTS_OPTION.key()));
}
if (httpHost.getSchemeName() == null) {
throw new ValidationException(
String.format(
"Could not parse host '%s' in option '%s'. It should follow the format 'http://host_name:port'. Missing scheme.",
host, HOSTS_OPTION.key()));
}
return httpHost;
} catch (Exception e) {
throw new ValidationException(
String.format(
"Could not parse host '%s' in option '%s'. It should follow the format 'http://host_name:port'.",
host, HOSTS_OPTION.key()),
e);
}
}
}
| 5,815 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/table/ElasticsearchSinkBuilderSupplier.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.connector.elasticsearch.sink.ElasticsearchSinkBuilderBase;
import java.util.function.Supplier;
interface ElasticsearchSinkBuilderSupplier<T>
extends Supplier<ElasticsearchSinkBuilderBase<T, ? extends ElasticsearchSinkBuilderBase>> {}
| 5,816 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/table/LogicalTypeWithIndex.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.table.types.logical.LogicalType;
class LogicalTypeWithIndex {
public final int index;
public final LogicalType logicalType;
LogicalTypeWithIndex(int index, LogicalType logicalType) {
this.index = index;
this.logicalType = logicalType;
}
}
| 5,817 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/table/IndexGeneratorBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import java.util.Objects;
/** Base class for {@link IndexGenerator}. */
@Internal
public abstract class IndexGeneratorBase implements IndexGenerator {
private static final long serialVersionUID = 1L;
protected final String index;
public IndexGeneratorBase(String index) {
this.index = index;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof IndexGeneratorBase)) {
return false;
}
IndexGeneratorBase that = (IndexGeneratorBase) o;
return index.equals(that.index);
}
@Override
public int hashCode() {
return Objects.hash(index);
}
}
| 5,818 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/table/IndexGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.table.data.RowData;
import org.apache.flink.types.Row;
import java.io.Serializable;
/** This interface is responsible to generate index name from given {@link Row} record. */
@Internal
interface IndexGenerator extends Serializable {
/**
* Initialize the index generator, this will be called only once before {@link
* #generate(RowData)} is called.
*/
default void open() {}
/** Generate index name according to the given row. */
String generate(RowData row);
}
| 5,819 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/table/ElasticsearchValidationUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.table.api.ValidationException;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.types.logical.DistinctType;
import org.apache.flink.table.types.logical.LogicalTypeFamily;
import org.apache.flink.table.types.logical.LogicalTypeRoot;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
/** Utility methods for validating Elasticsearch properties. */
@Internal
class ElasticsearchValidationUtils {
private static final Set<LogicalTypeRoot> ALLOWED_PRIMARY_KEY_TYPES = new LinkedHashSet<>();
static {
ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.CHAR);
ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.VARCHAR);
ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.BOOLEAN);
ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.DECIMAL);
ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.TINYINT);
ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.SMALLINT);
ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.INTEGER);
ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.BIGINT);
ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.FLOAT);
ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.DOUBLE);
ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.DATE);
ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.TIME_WITHOUT_TIME_ZONE);
ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.TIMESTAMP_WITHOUT_TIME_ZONE);
ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.TIMESTAMP_WITH_TIME_ZONE);
ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.TIMESTAMP_WITH_LOCAL_TIME_ZONE);
ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.INTERVAL_YEAR_MONTH);
ALLOWED_PRIMARY_KEY_TYPES.add(LogicalTypeRoot.INTERVAL_DAY_TIME);
}
/**
* Checks that the table does not have a primary key defined on illegal types. In Elasticsearch
* the primary key is used to calculate the Elasticsearch document id, which is a string of up
* to 512 bytes. It cannot have whitespaces. As of now it is calculated by concatenating the
* fields. Certain types do not have a good string representation to be used in this scenario.
* The illegal types are mostly {@link LogicalTypeFamily#COLLECTION} types and {@link
* LogicalTypeRoot#RAW} type.
*/
public static void validatePrimaryKey(DataType primaryKeyDataType) {
List<DataType> fieldDataTypes = DataType.getFieldDataTypes(primaryKeyDataType);
List<LogicalTypeRoot> illegalTypes =
fieldDataTypes.stream()
.map(DataType::getLogicalType)
.map(
logicalType -> {
if (logicalType.is(LogicalTypeRoot.DISTINCT_TYPE)) {
return ((DistinctType) logicalType)
.getSourceType()
.getTypeRoot();
} else {
return logicalType.getTypeRoot();
}
})
.filter(t -> !ALLOWED_PRIMARY_KEY_TYPES.contains(t))
.collect(Collectors.toList());
if (!illegalTypes.isEmpty()) {
throw new ValidationException(
String.format(
"The table has a primary key on columns of illegal types: %s.",
illegalTypes));
}
}
private ElasticsearchValidationUtils() {}
}
| 5,820 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/table/AbstractTimeIndexGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import java.time.format.DateTimeFormatter;
/** Abstract class for time related {@link IndexGenerator}. */
@Internal
abstract class AbstractTimeIndexGenerator extends IndexGeneratorBase {
private final String dateTimeFormat;
protected transient DateTimeFormatter dateTimeFormatter;
public AbstractTimeIndexGenerator(String index, String dateTimeFormat) {
super(index);
this.dateTimeFormat = dateTimeFormat;
}
@Override
public void open() {
this.dateTimeFormatter = DateTimeFormatter.ofPattern(dateTimeFormat);
}
}
| 5,821 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/table/IndexGeneratorFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.table.api.TableException;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.data.TimestampData;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.types.logical.LogicalType;
import org.apache.flink.table.types.logical.LogicalTypeRoot;
import javax.annotation.Nonnull;
import java.io.Serializable;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
import java.time.ZoneId;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Factory of {@link IndexGenerator}.
*
* <p>Flink supports both static index and dynamic index.
*
* <p>If you want to have a static index, this option value should be a plain string, e.g.
* 'myusers', all the records will be consistently written into "myusers" index.
*
* <p>If you want to have a dynamic index, you can use '{field_name}' to reference a field value in
* the record to dynamically generate a target index. You can also use
* '{field_name|date_format_string}' to convert a field value of TIMESTAMP/DATE/TIME type into the
* format specified by date_format_string. The date_format_string is compatible with {@link
* java.text.SimpleDateFormat}. For example, if the option value is 'myusers_{log_ts|yyyy-MM-dd}',
* then a record with log_ts field value 2020-03-27 12:25:55 will be written into
* "myusers_2020-03-27" index.
*/
@Internal
final class IndexGeneratorFactory {
private IndexGeneratorFactory() {}
public static IndexGenerator createIndexGenerator(
String index,
List<String> fieldNames,
List<DataType> dataTypes,
ZoneId localTimeZoneId) {
final IndexHelper indexHelper = new IndexHelper();
if (indexHelper.checkIsDynamicIndex(index)) {
return createRuntimeIndexGenerator(
index,
fieldNames.toArray(new String[0]),
dataTypes.toArray(new DataType[0]),
indexHelper,
localTimeZoneId);
} else {
return new StaticIndexGenerator(index);
}
}
public static IndexGenerator createIndexGenerator(
String index, List<String> fieldNames, List<DataType> dataTypes) {
return createIndexGenerator(index, fieldNames, dataTypes, ZoneId.systemDefault());
}
interface DynamicFormatter extends Serializable {
String format(@Nonnull Object fieldValue, DateTimeFormatter formatter);
}
private static IndexGenerator createRuntimeIndexGenerator(
String index,
String[] fieldNames,
DataType[] fieldTypes,
IndexHelper indexHelper,
ZoneId localTimeZoneId) {
final String dynamicIndexPatternStr = indexHelper.extractDynamicIndexPatternStr(index);
final String indexPrefix = index.substring(0, index.indexOf(dynamicIndexPatternStr));
final String indexSuffix =
index.substring(indexPrefix.length() + dynamicIndexPatternStr.length());
if (indexHelper.checkIsDynamicIndexWithSystemTimeFormat(index)) {
final String dateTimeFormat =
indexHelper.extractDateFormat(
index, LogicalTypeRoot.TIMESTAMP_WITH_LOCAL_TIME_ZONE);
return new AbstractTimeIndexGenerator(index, dateTimeFormat) {
@Override
public String generate(RowData row) {
return indexPrefix
.concat(LocalDateTime.now(localTimeZoneId).format(dateTimeFormatter))
.concat(indexSuffix);
}
};
}
final boolean isDynamicIndexWithFormat = indexHelper.checkIsDynamicIndexWithFormat(index);
final int indexFieldPos =
indexHelper.extractIndexFieldPos(index, fieldNames, isDynamicIndexWithFormat);
final LogicalType indexFieldType = fieldTypes[indexFieldPos].getLogicalType();
final LogicalTypeRoot indexFieldLogicalTypeRoot = indexFieldType.getTypeRoot();
// validate index field type
indexHelper.validateIndexFieldType(indexFieldLogicalTypeRoot);
// time extract dynamic index pattern
final RowData.FieldGetter fieldGetter =
RowData.createFieldGetter(indexFieldType, indexFieldPos);
if (isDynamicIndexWithFormat) {
final String dateTimeFormat =
indexHelper.extractDateFormat(index, indexFieldLogicalTypeRoot);
DynamicFormatter formatFunction =
createFormatFunction(
indexFieldType, indexFieldLogicalTypeRoot, localTimeZoneId);
return new AbstractTimeIndexGenerator(index, dateTimeFormat) {
@Override
public String generate(RowData row) {
Object fieldOrNull = fieldGetter.getFieldOrNull(row);
final String formattedField;
// TODO we can possibly optimize it to use the nullability of the field
if (fieldOrNull != null) {
formattedField = formatFunction.format(fieldOrNull, dateTimeFormatter);
} else {
formattedField = "null";
}
return indexPrefix.concat(formattedField).concat(indexSuffix);
}
};
}
// general dynamic index pattern
return new IndexGeneratorBase(index) {
@Override
public String generate(RowData row) {
Object indexField = fieldGetter.getFieldOrNull(row);
return indexPrefix
.concat(indexField == null ? "null" : indexField.toString())
.concat(indexSuffix);
}
};
}
private static DynamicFormatter createFormatFunction(
LogicalType indexFieldType,
LogicalTypeRoot indexFieldLogicalTypeRoot,
ZoneId localTimeZoneId) {
switch (indexFieldLogicalTypeRoot) {
case DATE:
return (value, dateTimeFormatter) -> {
Integer indexField = (Integer) value;
return LocalDate.ofEpochDay(indexField).format(dateTimeFormatter);
};
case TIME_WITHOUT_TIME_ZONE:
return (value, dateTimeFormatter) -> {
Integer indexField = (Integer) value;
return LocalTime.ofNanoOfDay(indexField * 1_000_000L).format(dateTimeFormatter);
};
case TIMESTAMP_WITHOUT_TIME_ZONE:
return (value, dateTimeFormatter) -> {
TimestampData indexField = (TimestampData) value;
return indexField.toLocalDateTime().format(dateTimeFormatter);
};
case TIMESTAMP_WITH_TIME_ZONE:
throw new UnsupportedOperationException(
"TIMESTAMP_WITH_TIME_ZONE is not supported yet");
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
return (value, dateTimeFormatter) -> {
TimestampData indexField = (TimestampData) value;
return indexField.toInstant().atZone(localTimeZoneId).format(dateTimeFormatter);
};
default:
throw new TableException(
String.format(
"Unsupported type '%s' found in Elasticsearch dynamic index field, "
+ "time-related pattern only support types are: DATE,TIME,TIMESTAMP.",
indexFieldType));
}
}
/**
* Helper class for {@link IndexGeneratorFactory}, this helper can use to validate index field
* type ans parse index format from pattern.
*/
static class IndexHelper {
private static final Pattern dynamicIndexPattern = Pattern.compile("\\{[^\\{\\}]+\\}?");
private static final Pattern dynamicIndexTimeExtractPattern =
Pattern.compile(".*\\{.+\\|.*\\}.*");
private static final Pattern dynamicIndexSystemTimeExtractPattern =
Pattern.compile(
".*\\{\\s*(now\\(\\s*\\)|NOW\\(\\s*\\)|current_timestamp|CURRENT_TIMESTAMP)\\s*\\|.*\\}.*");
private static final List<LogicalTypeRoot> supportedTypes = new ArrayList<>();
private static final Map<LogicalTypeRoot, String> defaultFormats = new HashMap<>();
static {
// time related types
supportedTypes.add(LogicalTypeRoot.DATE);
supportedTypes.add(LogicalTypeRoot.TIME_WITHOUT_TIME_ZONE);
supportedTypes.add(LogicalTypeRoot.TIMESTAMP_WITHOUT_TIME_ZONE);
supportedTypes.add(LogicalTypeRoot.TIMESTAMP_WITH_TIME_ZONE);
supportedTypes.add(LogicalTypeRoot.TIMESTAMP_WITH_LOCAL_TIME_ZONE);
// general types
supportedTypes.add(LogicalTypeRoot.VARCHAR);
supportedTypes.add(LogicalTypeRoot.CHAR);
supportedTypes.add(LogicalTypeRoot.TINYINT);
supportedTypes.add(LogicalTypeRoot.INTEGER);
supportedTypes.add(LogicalTypeRoot.BIGINT);
}
static {
defaultFormats.put(LogicalTypeRoot.DATE, "yyyy_MM_dd");
defaultFormats.put(LogicalTypeRoot.TIME_WITHOUT_TIME_ZONE, "HH_mm_ss");
defaultFormats.put(LogicalTypeRoot.TIMESTAMP_WITHOUT_TIME_ZONE, "yyyy_MM_dd_HH_mm_ss");
defaultFormats.put(LogicalTypeRoot.TIMESTAMP_WITH_TIME_ZONE, "yyyy_MM_dd_HH_mm_ss");
defaultFormats.put(
LogicalTypeRoot.TIMESTAMP_WITH_LOCAL_TIME_ZONE, "yyyy_MM_dd_HH_mm_ssX");
}
/** Validate the index field Type. */
void validateIndexFieldType(LogicalTypeRoot logicalType) {
if (!supportedTypes.contains(logicalType)) {
throw new IllegalArgumentException(
String.format(
"Unsupported type %s of index field, " + "Supported types are: %s",
logicalType, supportedTypes));
}
}
/** Get the default date format. */
String getDefaultFormat(LogicalTypeRoot logicalType) {
return defaultFormats.get(logicalType);
}
/** Check general dynamic index is enabled or not by index pattern. */
boolean checkIsDynamicIndex(String index) {
final Matcher matcher = dynamicIndexPattern.matcher(index);
int count = 0;
while (matcher.find()) {
count++;
}
if (count > 1) {
throw new TableException(
String.format(
"Chaining dynamic index pattern %s is not supported,"
+ " only support single dynamic index pattern.",
index));
}
return count == 1;
}
/** Check time extract dynamic index is enabled or not by index pattern. */
boolean checkIsDynamicIndexWithFormat(String index) {
return dynamicIndexTimeExtractPattern.matcher(index).matches();
}
/** Check generate dynamic index is from system time or not. */
boolean checkIsDynamicIndexWithSystemTimeFormat(String index) {
return dynamicIndexSystemTimeExtractPattern.matcher(index).matches();
}
/** Extract dynamic index pattern string from index pattern string. */
String extractDynamicIndexPatternStr(String index) {
int start = index.indexOf("{");
int end = index.lastIndexOf("}");
return index.substring(start, end + 1);
}
/** Extract index field position in a fieldNames, return the field position. */
int extractIndexFieldPos(
String index, String[] fieldNames, boolean isDynamicIndexWithFormat) {
List<String> fieldList = Arrays.asList(fieldNames);
String indexFieldName;
if (isDynamicIndexWithFormat) {
indexFieldName = index.substring(index.indexOf("{") + 1, index.indexOf("|"));
} else {
indexFieldName = index.substring(index.indexOf("{") + 1, index.indexOf("}"));
}
if (!fieldList.contains(indexFieldName)) {
throw new TableException(
String.format(
"Unknown field '%s' in index pattern '%s', please check the field name.",
indexFieldName, index));
}
return fieldList.indexOf(indexFieldName);
}
/** Extract dateTime format by the date format that extracted from index pattern string. */
private String extractDateFormat(String index, LogicalTypeRoot logicalType) {
String format = index.substring(index.indexOf("|") + 1, index.indexOf("}"));
if ("".equals(format)) {
format = getDefaultFormat(logicalType);
}
return format;
}
}
}
| 5,822 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/table/ElasticsearchDynamicSink.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.serialization.SerializationSchema;
import org.apache.flink.connector.elasticsearch.sink.ElasticsearchSink;
import org.apache.flink.connector.elasticsearch.sink.ElasticsearchSinkBuilderBase;
import org.apache.flink.connector.elasticsearch.sink.FlushBackoffType;
import org.apache.flink.table.api.ValidationException;
import org.apache.flink.table.connector.ChangelogMode;
import org.apache.flink.table.connector.format.EncodingFormat;
import org.apache.flink.table.connector.sink.DynamicTableSink;
import org.apache.flink.table.connector.sink.SinkV2Provider;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.types.DataType;
import org.apache.flink.types.RowKind;
import org.apache.flink.util.StringUtils;
import org.apache.http.HttpHost;
import org.elasticsearch.common.xcontent.XContentType;
import javax.annotation.Nullable;
import java.time.ZoneId;
import java.util.List;
import java.util.Objects;
import java.util.function.Function;
import static org.apache.flink.util.Preconditions.checkNotNull;
/**
* A {@link DynamicTableSink} that describes how to create a {@link ElasticsearchSink} from a
* logical description.
*/
@Internal
class ElasticsearchDynamicSink implements DynamicTableSink {
final EncodingFormat<SerializationSchema<RowData>> format;
final DataType physicalRowDataType;
final List<LogicalTypeWithIndex> primaryKeyLogicalTypesWithIndex;
final ElasticsearchConfiguration config;
final ZoneId localTimeZoneId;
final String summaryString;
final ElasticsearchSinkBuilderSupplier<RowData> builderSupplier;
@Nullable final String documentType;
final boolean isDynamicIndexWithSystemTime;
ElasticsearchDynamicSink(
EncodingFormat<SerializationSchema<RowData>> format,
ElasticsearchConfiguration config,
List<LogicalTypeWithIndex> primaryKeyLogicalTypesWithIndex,
DataType physicalRowDataType,
String summaryString,
ElasticsearchSinkBuilderSupplier<RowData> builderSupplier,
@Nullable String documentType,
ZoneId localTimeZoneId) {
this.format = checkNotNull(format);
this.physicalRowDataType = checkNotNull(physicalRowDataType);
this.primaryKeyLogicalTypesWithIndex = checkNotNull(primaryKeyLogicalTypesWithIndex);
this.config = checkNotNull(config);
this.summaryString = checkNotNull(summaryString);
this.builderSupplier = checkNotNull(builderSupplier);
this.documentType = documentType;
this.localTimeZoneId = localTimeZoneId;
this.isDynamicIndexWithSystemTime = isDynamicIndexWithSystemTime();
}
public boolean isDynamicIndexWithSystemTime() {
IndexGeneratorFactory.IndexHelper indexHelper = new IndexGeneratorFactory.IndexHelper();
return indexHelper.checkIsDynamicIndexWithSystemTimeFormat(config.getIndex());
}
Function<RowData, String> createKeyExtractor() {
return KeyExtractor.createKeyExtractor(
primaryKeyLogicalTypesWithIndex, config.getKeyDelimiter());
}
IndexGenerator createIndexGenerator() {
return IndexGeneratorFactory.createIndexGenerator(
config.getIndex(),
DataType.getFieldNames(physicalRowDataType),
DataType.getFieldDataTypes(physicalRowDataType),
localTimeZoneId);
}
@Override
public ChangelogMode getChangelogMode(ChangelogMode requestedMode) {
ChangelogMode.Builder builder = ChangelogMode.newBuilder();
for (RowKind kind : requestedMode.getContainedKinds()) {
if (kind != RowKind.UPDATE_BEFORE) {
builder.addContainedKind(kind);
}
}
if (isDynamicIndexWithSystemTime && !requestedMode.containsOnly(RowKind.INSERT)) {
throw new ValidationException(
"Dynamic indexing based on system time only works on append only stream.");
}
return builder.build();
}
@Override
public SinkRuntimeProvider getSinkRuntimeProvider(Context context) {
SerializationSchema<RowData> format =
this.format.createRuntimeEncoder(context, physicalRowDataType);
final RowElasticsearchEmitter rowElasticsearchEmitter =
new RowElasticsearchEmitter(
createIndexGenerator(),
format,
XContentType.JSON,
documentType,
createKeyExtractor());
ElasticsearchSinkBuilderBase<RowData, ? extends ElasticsearchSinkBuilderBase> builder =
builderSupplier.get();
builder.setEmitter(rowElasticsearchEmitter);
builder.setHosts(config.getHosts().toArray(new HttpHost[0]));
builder.setDeliveryGuarantee(config.getDeliveryGuarantee());
builder.setBulkFlushMaxActions(config.getBulkFlushMaxActions());
builder.setBulkFlushMaxSizeMb(config.getBulkFlushMaxByteSize().getMebiBytes());
builder.setBulkFlushInterval(config.getBulkFlushInterval());
if (config.getBulkFlushBackoffType().isPresent()) {
FlushBackoffType backoffType = config.getBulkFlushBackoffType().get();
int backoffMaxRetries = config.getBulkFlushBackoffRetries().get();
long backoffDelayMs = config.getBulkFlushBackoffDelay().get();
builder.setBulkFlushBackoffStrategy(backoffType, backoffMaxRetries, backoffDelayMs);
}
if (config.getUsername().isPresent()
&& !StringUtils.isNullOrWhitespaceOnly(config.getUsername().get())) {
builder.setConnectionUsername(config.getUsername().get());
}
if (config.getPassword().isPresent()
&& !StringUtils.isNullOrWhitespaceOnly(config.getPassword().get())) {
builder.setConnectionPassword(config.getPassword().get());
}
if (config.getPathPrefix().isPresent()
&& !StringUtils.isNullOrWhitespaceOnly(config.getPathPrefix().get())) {
builder.setConnectionPathPrefix(config.getPathPrefix().get());
}
if (config.getConnectionRequestTimeout().isPresent()) {
builder.setConnectionRequestTimeout(
(int) config.getConnectionRequestTimeout().get().getSeconds());
}
if (config.getConnectionTimeout().isPresent()) {
builder.setConnectionTimeout((int) config.getConnectionTimeout().get().getSeconds());
}
if (config.getSocketTimeout().isPresent()) {
builder.setSocketTimeout((int) config.getSocketTimeout().get().getSeconds());
}
return SinkV2Provider.of(builder.build(), config.getParallelism().orElse(null));
}
@Override
public DynamicTableSink copy() {
return new ElasticsearchDynamicSink(
format,
config,
primaryKeyLogicalTypesWithIndex,
physicalRowDataType,
summaryString,
builderSupplier,
documentType,
localTimeZoneId);
}
@Override
public String asSummaryString() {
return summaryString;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ElasticsearchDynamicSink that = (ElasticsearchDynamicSink) o;
return Objects.equals(format, that.format)
&& Objects.equals(physicalRowDataType, that.physicalRowDataType)
&& Objects.equals(
primaryKeyLogicalTypesWithIndex, that.primaryKeyLogicalTypesWithIndex)
&& Objects.equals(config, that.config)
&& Objects.equals(summaryString, that.summaryString)
&& Objects.equals(builderSupplier, that.builderSupplier)
&& Objects.equals(documentType, that.documentType);
}
@Override
public int hashCode() {
return Objects.hash(
format,
physicalRowDataType,
primaryKeyLogicalTypesWithIndex,
config,
summaryString,
builderSupplier,
documentType);
}
}
| 5,823 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/table/RowElasticsearchEmitter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.api.common.serialization.SerializationSchema;
import org.apache.flink.api.connector.sink2.SinkWriter;
import org.apache.flink.connector.elasticsearch.sink.ElasticsearchEmitter;
import org.apache.flink.connector.elasticsearch.sink.RequestIndexer;
import org.apache.flink.metrics.MetricGroup;
import org.apache.flink.metrics.groups.UnregisteredMetricsGroup;
import org.apache.flink.table.api.TableException;
import org.apache.flink.table.data.RowData;
import org.apache.flink.util.FlinkRuntimeException;
import org.apache.flink.util.SimpleUserCodeClassLoader;
import org.apache.flink.util.UserCodeClassLoader;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.common.xcontent.XContentType;
import javax.annotation.Nullable;
import java.util.function.Function;
import static org.apache.flink.util.Preconditions.checkNotNull;
/** Sink function for converting upserts into Elasticsearch {@link ActionRequest}s. */
class RowElasticsearchEmitter implements ElasticsearchEmitter<RowData> {
private final IndexGenerator indexGenerator;
private final SerializationSchema<RowData> serializationSchema;
private final XContentType contentType;
@Nullable private final String documentType;
private final Function<RowData, String> createKey;
public RowElasticsearchEmitter(
IndexGenerator indexGenerator,
SerializationSchema<RowData> serializationSchema,
XContentType contentType,
@Nullable String documentType,
Function<RowData, String> createKey) {
this.indexGenerator = checkNotNull(indexGenerator);
this.serializationSchema = checkNotNull(serializationSchema);
this.contentType = checkNotNull(contentType);
this.documentType = documentType;
this.createKey = checkNotNull(createKey);
}
@Override
public void open() {
try {
serializationSchema.open(
new SerializationSchema.InitializationContext() {
@Override
public MetricGroup getMetricGroup() {
return new UnregisteredMetricsGroup();
}
@Override
public UserCodeClassLoader getUserCodeClassLoader() {
return SimpleUserCodeClassLoader.create(
RowElasticsearchEmitter.class.getClassLoader());
}
});
} catch (Exception e) {
throw new FlinkRuntimeException("Failed to initialize serialization schema.", e);
}
indexGenerator.open();
}
@Override
public void emit(RowData element, SinkWriter.Context context, RequestIndexer indexer) {
switch (element.getRowKind()) {
case INSERT:
case UPDATE_AFTER:
processUpsert(element, indexer);
break;
case UPDATE_BEFORE:
case DELETE:
processDelete(element, indexer);
break;
default:
throw new TableException("Unsupported message kind: " + element.getRowKind());
}
}
private void processUpsert(RowData row, RequestIndexer indexer) {
final byte[] document = serializationSchema.serialize(row);
final String key = createKey.apply(row);
if (key != null) {
final UpdateRequest updateRequest =
new UpdateRequest(indexGenerator.generate(row), documentType, key)
.doc(document, contentType)
.upsert(document, contentType);
indexer.add(updateRequest);
} else {
final IndexRequest indexRequest =
new IndexRequest(indexGenerator.generate(row), documentType)
.id(key)
.source(document, contentType);
indexer.add(indexRequest);
}
}
private void processDelete(RowData row, RequestIndexer indexer) {
final String key = createKey.apply(row);
final DeleteRequest deleteRequest =
new DeleteRequest(indexGenerator.generate(row), documentType, key);
indexer.add(deleteRequest);
}
}
| 5,824 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-base/src/main/java/org/apache/flink/connector/elasticsearch/table/StaticIndexGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.table.data.RowData;
/** A static {@link IndexGenerator} which generate fixed index name. */
@Internal
final class StaticIndexGenerator extends IndexGeneratorBase {
public StaticIndexGenerator(String index) {
super(index);
}
public String generate(RowData row) {
return index;
}
}
| 5,825 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch7-e2e-tests/src/test/java/org/apache/flink/streaming | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch7-e2e-tests/src/test/java/org/apache/flink/streaming/tests/Elasticsearch7SinkE2ECase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.tests;
import org.apache.flink.connector.elasticsearch.test.DockerImageVersions;
import org.apache.flink.connector.testframe.junit.annotations.TestContext;
import org.apache.flink.test.resources.ResourceTestUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
/** End to end test for Elasticsearch7Sink based on connector testing framework. */
@SuppressWarnings("unused")
public class Elasticsearch7SinkE2ECase
extends ElasticsearchSinkE2ECaseBase<KeyValue<Integer, String>> {
private static final Logger LOG = LoggerFactory.getLogger(Elasticsearch7SinkE2ECase.class);
public Elasticsearch7SinkE2ECase() throws Exception {}
String getElasticsearchContainerName() {
return DockerImageVersions.ELASTICSEARCH_7;
}
@TestContext
Elasticsearch7SinkExternalContextFactory contextFactory =
new Elasticsearch7SinkExternalContextFactory(
elasticsearch.getContainer(),
Arrays.asList(
ResourceTestUtils.getResource(
"dependencies/elasticsearch7-end-to-end-test.jar")
.toAbsolutePath()
.toUri()
.toURL(),
ResourceTestUtils.getResource(
"dependencies/flink-connector-test-utils.jar")
.toAbsolutePath()
.toUri()
.toURL(),
ResourceTestUtils.getResource(
"dependencies/flink-connector-elasticsearch-test-utils.jar")
.toAbsolutePath()
.toUri()
.toURL()));
}
| 5,826 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch7-e2e-tests/src/test/java/org/apache/flink/streaming | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch7-e2e-tests/src/test/java/org/apache/flink/streaming/tests/Elasticsearch7SinkExternalContextFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.tests;
import org.testcontainers.elasticsearch.ElasticsearchContainer;
import java.net.URL;
import java.util.List;
/** Elasticsearch sink external context factory. */
class Elasticsearch7SinkExternalContextFactory
extends ElasticsearchSinkExternalContextFactoryBase<Elasticsearch7SinkExternalContext> {
/**
* Instantiates a new Elasticsearch 7 sink external context factory.
*
* @param elasticsearchContainer The Elasticsearch container.
* @param connectorJars The connector jars.
*/
Elasticsearch7SinkExternalContextFactory(
ElasticsearchContainer elasticsearchContainer, List<URL> connectorJars) {
super(elasticsearchContainer, connectorJars);
}
@Override
public Elasticsearch7SinkExternalContext createExternalContext(String testName) {
return new Elasticsearch7SinkExternalContext(
elasticsearchContainer.getHttpHostAddress(),
formatInternalAddress(elasticsearchContainer),
connectorJars);
}
}
| 5,827 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch7-e2e-tests/src/test/java/org/apache/flink/streaming | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch7-e2e-tests/src/test/java/org/apache/flink/streaming/tests/Elasticsearch7SinkExternalContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.tests;
import org.apache.flink.api.connector.sink2.Sink;
import org.apache.flink.connector.elasticsearch.sink.Elasticsearch7SinkBuilder;
import org.apache.flink.connector.testframe.external.ExternalSystemDataReader;
import org.apache.flink.connector.testframe.external.sink.TestingSinkSettings;
import org.apache.http.HttpHost;
import java.net.URL;
import java.util.List;
class Elasticsearch7SinkExternalContext extends ElasticsearchSinkExternalContextBase {
/**
* Instantiates a new Elasticsearch 7 sink context base.
*
* @param addressExternal The address to access Elasticsearch from the host machine (outside of
* the containerized environment).
* @param addressInternal The address to access Elasticsearch from Flink. When running in a
* containerized environment, should correspond to the network alias that resolves within
* the environment's network together with the exposed port.
* @param connectorJarPaths The connector jar paths.
*/
Elasticsearch7SinkExternalContext(
String addressExternal, String addressInternal, List<URL> connectorJarPaths) {
super(addressInternal, connectorJarPaths, new Elasticsearch7Client(addressExternal));
}
@Override
public Sink<KeyValue<Integer, String>> createSink(TestingSinkSettings sinkSettings) {
client.createIndexIfDoesNotExist(indexName, 1, 0);
return new Elasticsearch7SinkBuilder<KeyValue<Integer, String>>()
.setHosts(HttpHost.create(this.addressInternal))
.setEmitter(new ElasticsearchTestEmitter(new UpdateRequest7Factory(indexName)))
.setBulkFlushMaxActions(BULK_BUFFER)
.build();
}
@Override
public ExternalSystemDataReader<KeyValue<Integer, String>> createSinkDataReader(
TestingSinkSettings sinkSettings) {
return new ElasticsearchDataReader(client, indexName, PAGE_LENGTH);
}
@Override
public String toString() {
return "Elasticsearch 7 sink context.";
}
}
| 5,828 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch7-e2e-tests/src/test/java/org/apache/flink/streaming | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch7-e2e-tests/src/test/java/org/apache/flink/streaming/tests/Elasticsearch7LookupE2ECase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.tests;
import org.apache.flink.connector.elasticsearch.test.DockerImageVersions;
/** End-to-end test for Elasticsearch7 lookup. */
public class Elasticsearch7LookupE2ECase extends ElasticsearchLookupE2ECase {
@Override
String getElasticsearchContainerName() {
return DockerImageVersions.ELASTICSEARCH_7;
}
@Override
String getEsOptions() {
return " 'connector' = 'elasticsearch-7',"
+ " 'hosts' = '"
+ "http://"
+ elasticsearchContainer.getHttpHostAddress()
+ "',"
+ "'index' = '"
+ ES_INDEX
+ "',"
+ "'lookup.cache' = 'partial',"
+ "'lookup.partial-cache.max-rows' = '100'";
}
}
| 5,829 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch7-e2e-tests/src/main/java/org/apache/flink/streaming | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch7-e2e-tests/src/main/java/org/apache/flink/streaming/tests/Elasticsearch7Client.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.tests;
import org.apache.http.HttpHost;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.client.indices.CreateIndexRequest;
import org.elasticsearch.client.indices.GetIndexRequest;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.sort.SortOrder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import static org.apache.flink.util.Preconditions.checkNotNull;
/** The type Elasticsearch 7 client. */
public class Elasticsearch7Client implements ElasticsearchClient {
private static final Logger LOG = LoggerFactory.getLogger(Elasticsearch7Client.class);
private final RestHighLevelClient restClient;
/**
* Instantiates a new Elasticsearch 7 client.
*
* @param addressExternal The address to access Elasticsearch from the host machine (outside of
* the containerized environment).
*/
public Elasticsearch7Client(String addressExternal) {
checkNotNull(addressExternal);
HttpHost httpHost = HttpHost.create(addressExternal);
RestClientBuilder restClientBuilder = RestClient.builder(httpHost);
this.restClient = new RestHighLevelClient(restClientBuilder);
checkNotNull(restClient);
}
@Override
public void deleteIndex(String indexName) {
DeleteIndexRequest request = new DeleteIndexRequest(indexName);
try {
restClient.indices().delete(request, RequestOptions.DEFAULT);
} catch (IOException e) {
LOG.error("Cannot delete index {}", indexName, e);
}
// This is needed to avoid race conditions between tests that reuse the same index
refreshIndex(indexName);
}
@Override
public void refreshIndex(String indexName) {
RefreshRequest refresh = new RefreshRequest(indexName);
try {
restClient.indices().refresh(refresh, RequestOptions.DEFAULT);
} catch (IOException e) {
LOG.error("Cannot delete index {}", indexName, e);
} catch (ElasticsearchException e) {
if (e.status() == RestStatus.NOT_FOUND) {
LOG.info("Index {} not found", indexName);
}
}
}
@Override
public void createIndexIfDoesNotExist(String indexName, int shards, int replicas) {
GetIndexRequest request = new GetIndexRequest(indexName);
CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName);
createIndexRequest.settings(
Settings.builder()
.put("index.number_of_shards", shards)
.put("index.number_of_replicas", replicas));
try {
boolean exists = restClient.indices().exists(request, RequestOptions.DEFAULT);
if (!exists) {
restClient.indices().create(createIndexRequest, RequestOptions.DEFAULT);
} else {
LOG.info("Index already exists {}", indexName);
}
} catch (IOException e) {
LOG.error("Cannot create index {}", indexName, e);
}
}
@Override
public void close() throws Exception {
restClient.close();
}
@Override
public List<KeyValue<Integer, String>> fetchAll(QueryParams params) {
try {
SearchResponse response =
restClient.search(
new SearchRequest(params.indexName())
.source(
new SearchSourceBuilder()
.sort(params.sortField(), SortOrder.ASC)
.from(params.from())
.size(params.pageLength())
.trackTotalHits(params.trackTotalHits())),
RequestOptions.DEFAULT);
SearchHit[] searchHits = response.getHits().getHits();
return Arrays.stream(searchHits)
.map(
searchHit ->
KeyValue.of(
Integer.valueOf(searchHit.getId()),
searchHit.getSourceAsMap().get("value").toString()))
.collect(Collectors.toList());
} catch (IOException e) {
LOG.error("Fetching records failed", e);
return Collections.emptyList();
}
}
}
| 5,830 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch7-e2e-tests/src/main/java/org/apache/flink/streaming | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch7-e2e-tests/src/main/java/org/apache/flink/streaming/tests/UpdateRequest7Factory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.tests;
import org.elasticsearch.action.update.UpdateRequest;
import java.util.Map;
/** Factory for creating UpdateRequests of Elasticsearch7. */
public class UpdateRequest7Factory implements UpdateRequestFactory {
private static final long serialVersionUID = 1L;
private final String indexName;
/**
* Instantiates a new update request factory for of Elasticsearch7.
*
* @param indexName The index name.
*/
public UpdateRequest7Factory(String indexName) {
this.indexName = indexName;
}
@Override
public UpdateRequest createUpdateRequest(KeyValue<Integer, String> element) {
Map<String, Object> json = UpdateRequestFactory.prepareDoc(element);
return new UpdateRequest(indexName, String.valueOf(element.key)).doc(json).upsert(json);
}
}
| 5,831 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch-e2e-tests-common/src/main/java/org/apache/flink/streaming | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch-e2e-tests-common/src/main/java/org/apache/flink/streaming/tests/ElasticsearchClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.tests;
import java.util.List;
/** The version-agnostic Elasticsearch client interface. */
public interface ElasticsearchClient {
/**
* Delete the index.
*
* @param indexName The index name.
*/
void deleteIndex(String indexName);
/**
* Refresh the index.
*
* @param indexName The index name.
*/
void refreshIndex(String indexName);
/**
* Create index if it does not exist.
*
* @param indexName The index name.
* @param shards The number of shards.
* @param replicas The number of replicas.
*/
void createIndexIfDoesNotExist(String indexName, int shards, int replicas);
/** Close the client. @throws Exception The exception. */
void close() throws Exception;
/**
* Fetch all results from the index.
*
* @param params The parameters of the query.
* @return All documents from the index.
*/
List<KeyValue<Integer, String>> fetchAll(QueryParams params);
}
| 5,832 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch-e2e-tests-common/src/main/java/org/apache/flink/streaming | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch-e2e-tests-common/src/main/java/org/apache/flink/streaming/tests/QueryParams.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.tests;
import static org.apache.flink.util.Preconditions.checkNotNull;
/** Holder class for Elasticsearch query parameters. */
public class QueryParams {
private final String indexName;
private final String sortField;
private final int from;
private final int pageLength;
private final boolean trackTotalHits;
private QueryParams(Builder builder) {
indexName = builder.indexName;
sortField = builder.sortField;
from = builder.from;
pageLength = builder.pageLength;
trackTotalHits = builder.trackTotalHits;
}
/**
* New {@code QueryParams} builder.
*
* @param indexName The index name. This parameter is mandatory.
* @return The builder.
*/
public static Builder newBuilder(String indexName) {
return new Builder(indexName);
}
/** {@code QueryParams} builder static inner class. */
public static final class Builder {
private String sortField;
private int from;
private int pageLength;
private boolean trackTotalHits;
private String indexName;
private Builder(String indexName) {
this.indexName = checkNotNull(indexName);
}
/**
* Sets the {@code sortField} and returns a reference to this Builder enabling method
* chaining.
*
* @param sortField The {@code sortField} to set.
* @return A reference to this Builder.
*/
public Builder sortField(String sortField) {
this.sortField = checkNotNull(sortField);
return this;
}
/**
* Sets the {@code from} and returns a reference to this Builder enabling method chaining.
*
* @param from The {@code from} to set.
* @return A reference to this Builder.
*/
public Builder from(int from) {
this.from = from;
return this;
}
/**
* Sets the {@code pageLength} and returns a reference to this Builder enabling method
* chaining.
*
* @param pageLength The {@code pageLength} to set.
* @return A reference to this Builder.
*/
public Builder pageLength(int pageLength) {
this.pageLength = pageLength;
return this;
}
/**
* Sets the {@code trackTotalHits} and returns a reference to this Builder enabling method
* chaining.
*
* @param trackTotalHits The {@code trackTotalHits} to set.
* @return A reference to this Builder.
*/
public Builder trackTotalHits(boolean trackTotalHits) {
this.trackTotalHits = trackTotalHits;
return this;
}
/**
* Returns a {@code QueryParams} built from the parameters previously set.
*
* @return A {@code QueryParams} built with parameters of this {@code QueryParams.Builder}
*/
public QueryParams build() {
return new QueryParams(this);
}
/**
* Sets the {@code indexName} and returns a reference to this Builder enabling method
* chaining.
*
* @param indexName The {@code indexName} to set.
* @return A reference to this Builder.
*/
public Builder indexName(String indexName) {
this.indexName = checkNotNull(indexName);
return this;
}
}
/**
* Sort field string.
*
* @return The string.
*/
public String sortField() {
return sortField;
}
/**
* From index to start the search from. Defaults to {@code 0}.
*
* @return The int.
*/
public int from() {
return from;
}
/**
* Page length int.
*
* @return The int.
*/
public int pageLength() {
return pageLength;
}
/**
* Track total hits boolean.
*
* @return The boolean.
*/
public boolean trackTotalHits() {
return trackTotalHits;
}
/**
* Index name string.
*
* @return The string.
*/
public String indexName() {
return indexName;
}
}
| 5,833 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch-e2e-tests-common/src/main/java/org/apache/flink/streaming | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch-e2e-tests-common/src/main/java/org/apache/flink/streaming/tests/ElasticsearchSinkExternalContextBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.tests;
import org.apache.flink.api.common.typeinfo.TypeHint;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.connector.sink2.Sink;
import org.apache.flink.connector.testframe.external.ExternalSystemDataReader;
import org.apache.flink.connector.testframe.external.sink.DataStreamSinkV2ExternalContext;
import org.apache.flink.connector.testframe.external.sink.TestingSinkSettings;
import org.apache.commons.lang3.RandomStringUtils;
import java.net.URL;
import java.util.List;
import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static org.apache.flink.util.Preconditions.checkNotNull;
/** The base class for Elasticsearch sink context. */
abstract class ElasticsearchSinkExternalContextBase
implements DataStreamSinkV2ExternalContext<KeyValue<Integer, String>> {
/** The constant INDEX_NAME_PREFIX. */
protected static final String INDEX_NAME_PREFIX = "es-index";
private static final int RANDOM_STRING_MAX_LENGTH = 50;
private static final int NUM_RECORDS_UPPER_BOUND = 500;
private static final int NUM_RECORDS_LOWER_BOUND = 100;
protected static final int BULK_BUFFER = 100;
protected static final int PAGE_LENGTH = NUM_RECORDS_UPPER_BOUND + 1;
/** The index name. */
protected final String indexName;
/** The address reachable from Flink (internal to the testing environment). */
protected final String addressInternal;
/** The connector jar paths. */
protected final List<URL> connectorJarPaths;
/** The client. */
protected final ElasticsearchClient client;
/**
* Instantiates a new Elasticsearch sink context base.
*
* @param addressInternal The address to access Elasticsearch from within Flink. When running in
* a containerized environment, should correspond to the network alias that resolves within
* the environment's network together with the exposed port.
* @param connectorJarPaths The connector jar paths.
* @param client The Elasticsearch client.
*/
ElasticsearchSinkExternalContextBase(
String addressInternal, List<URL> connectorJarPaths, ElasticsearchClient client) {
this.addressInternal = checkNotNull(addressInternal);
this.connectorJarPaths = checkNotNull(connectorJarPaths);
this.client = checkNotNull(client);
this.indexName =
INDEX_NAME_PREFIX + "-" + ThreadLocalRandom.current().nextLong(Long.MAX_VALUE);
}
@Override
public List<KeyValue<Integer, String>> generateTestData(
TestingSinkSettings sinkSettings, long seed) {
Random random = new Random(seed);
int recordNum =
random.nextInt(NUM_RECORDS_UPPER_BOUND - NUM_RECORDS_LOWER_BOUND)
+ NUM_RECORDS_LOWER_BOUND;
return IntStream.range(0, recordNum)
.boxed()
.map(
i -> {
int valueLength = random.nextInt(RANDOM_STRING_MAX_LENGTH) + 1;
String value = RandomStringUtils.random(valueLength, true, true);
return KeyValue.of(i, value);
})
.collect(Collectors.toList());
}
@Override
public void close() {
client.deleteIndex(indexName);
}
@Override
public List<URL> getConnectorJarPaths() {
return connectorJarPaths;
}
@Override
public TypeInformation<KeyValue<Integer, String>> getProducedType() {
return TypeInformation.of(new TypeHint<KeyValue<Integer, String>>() {});
}
@Override
public abstract Sink<KeyValue<Integer, String>> createSink(TestingSinkSettings sinkSettings);
@Override
public abstract ExternalSystemDataReader<KeyValue<Integer, String>> createSinkDataReader(
TestingSinkSettings sinkSettings);
@Override
public abstract String toString();
}
| 5,834 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch-e2e-tests-common/src/main/java/org/apache/flink/streaming | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch-e2e-tests-common/src/main/java/org/apache/flink/streaming/tests/ElasticsearchSinkExternalContextFactoryBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.tests;
import org.apache.flink.connector.testframe.external.ExternalContext;
import org.apache.flink.connector.testframe.external.ExternalContextFactory;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.elasticsearch.ElasticsearchContainer;
import java.net.URL;
import java.util.List;
import static org.apache.flink.util.Preconditions.checkNotNull;
/** The base class for Elasticsearch sink context factory base. */
public abstract class ElasticsearchSinkExternalContextFactoryBase<T extends ExternalContext>
implements ExternalContextFactory<T> {
/** The Elasticsearch container. */
protected final ElasticsearchContainer elasticsearchContainer;
/** The connector jars. */
protected final List<URL> connectorJars;
/**
* Instantiates a new Elasticsearch sink context factory.
*
* @param elasticsearchContainer The Elasticsearch container.
* @param connectorJars The connector jars.
*/
ElasticsearchSinkExternalContextFactoryBase(
ElasticsearchContainer elasticsearchContainer, List<URL> connectorJars) {
this.elasticsearchContainer = checkNotNull(elasticsearchContainer);
this.connectorJars = checkNotNull(connectorJars);
}
protected static String formatInternalAddress(
GenericContainer<ElasticsearchContainer> container) {
return String.format(
"%s:%d", container.getNetworkAliases().get(0), container.getExposedPorts().get(0));
}
}
| 5,835 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch-e2e-tests-common/src/main/java/org/apache/flink/streaming | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch-e2e-tests-common/src/main/java/org/apache/flink/streaming/tests/ElasticsearchTestEmitter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.tests;
import org.apache.flink.api.connector.sink2.SinkWriter;
import org.apache.flink.connector.elasticsearch.sink.ElasticsearchEmitter;
import org.apache.flink.connector.elasticsearch.sink.RequestIndexer;
import org.elasticsearch.action.update.UpdateRequest;
import static org.apache.flink.util.Preconditions.checkNotNull;
/** Test emitter for performing ElasticSearch indexing requests. */
public class ElasticsearchTestEmitter implements ElasticsearchEmitter<KeyValue<Integer, String>> {
private static final long serialVersionUID = 1L;
private final UpdateRequestFactory factory;
/**
* Instantiates a new Elasticsearch test emitter.
*
* @param factory The factory for creating {@link UpdateRequest}s.
*/
public ElasticsearchTestEmitter(UpdateRequestFactory factory) {
this.factory = checkNotNull(factory);
}
@Override
public void emit(
KeyValue<Integer, String> element, SinkWriter.Context context, RequestIndexer indexer) {
UpdateRequest updateRequest = factory.createUpdateRequest(element);
indexer.add(updateRequest);
}
}
| 5,836 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch-e2e-tests-common/src/main/java/org/apache/flink/streaming | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch-e2e-tests-common/src/main/java/org/apache/flink/streaming/tests/UpdateRequestFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.tests;
import org.elasticsearch.action.update.UpdateRequest;
import java.io.Serializable;
import java.util.HashMap;
import java.util.Map;
/** Factory for creating UpdateRequests. */
public interface UpdateRequestFactory extends Serializable {
UpdateRequest createUpdateRequest(KeyValue<Integer, String> element);
/**
* Utility to convert {@link KeyValue} elements into Elasticsearch-compatible format.
*
* @param element The element to be converted.
* @return The map with the element's fields.
*/
static Map<String, Object> prepareDoc(KeyValue<Integer, String> element) {
Map<String, Object> json = new HashMap<>();
json.put("key", element.key);
json.put("value", element.value);
return json;
}
}
| 5,837 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch-e2e-tests-common/src/main/java/org/apache/flink/streaming | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch-e2e-tests-common/src/main/java/org/apache/flink/streaming/tests/ElasticsearchLookupE2ECase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.tests;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.types.Row;
import org.apache.flink.util.CollectionUtil;
import org.junit.Before;
import org.junit.Test;
import org.testcontainers.elasticsearch.ElasticsearchContainer;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.stream.Collectors;
import static org.apache.flink.table.api.Expressions.$;
import static org.junit.Assert.assertEquals;
/** Base class for end to end Elasticsearch lookup. */
public abstract class ElasticsearchLookupE2ECase {
protected EnvironmentSettings streamSettings;
protected static final String DIM = "testTable1";
protected static final String ES_INDEX = "es1";
// prepare a source collection.
private static final List<Row> srcData = new ArrayList<>();
private static final RowTypeInfo testTypeInfo =
new RowTypeInfo(
new TypeInformation[] {Types.INT, Types.LONG, Types.STRING},
new String[] {"a", "b", "c"});
ElasticsearchContainer elasticsearchContainer = null;
static {
srcData.add(Row.of(1, 1L, "Hi"));
srcData.add(Row.of(2, 2L, "Hello"));
srcData.add(Row.of(3, 2L, "Hello Yubin Li"));
srcData.add(Row.of(4, 999L, "Hello Yubin Li !"));
}
abstract String getElasticsearchContainerName();
abstract String getEsOptions();
@Before
public void before() {
this.streamSettings = EnvironmentSettings.inStreamingMode();
elasticsearchContainer = new ElasticsearchContainer(getElasticsearchContainerName());
elasticsearchContainer.start();
}
@Test
public void testEsLookupTableSource() {
StreamExecutionEnvironment execEnv = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(execEnv, streamSettings);
tEnv.executeSql(
"CREATE TABLE "
+ DIM
+ " ("
+ " id int,"
+ " name string,"
+ " PRIMARY KEY (id) NOT ENFORCED"
+ ") WITH ("
+ getEsOptions()
+ ")");
tEnv.executeSql("insert into " + DIM + " values (1, 'rick')");
tEnv.executeSql("insert into " + DIM + " values (2, 'john')");
tEnv.executeSql("insert into " + DIM + " values (3, 'ted')");
// prepare a source table
String srcTableName = "src";
DataStream<Row> srcDs = execEnv.fromCollection(srcData).returns(testTypeInfo);
Table in = tEnv.fromDataStream(srcDs, $("a"), $("b"), $("c"), $("proc").proctime());
tEnv.registerTable(srcTableName, in);
// perform a temporal table join query
String dimJoinQuery =
"SELECT"
+ " a,"
+ " b,"
+ " c,"
+ " id,"
+ " name"
+ " FROM src JOIN "
+ DIM
+ " FOR SYSTEM_TIME AS OF src.proc as h ON src.a = h.id";
Iterator<Row> collected = tEnv.executeSql(dimJoinQuery).collect();
List<String> result =
CollectionUtil.iteratorToList(collected).stream()
.map(Row::toString)
.sorted()
.collect(Collectors.toList());
List<String> expected = new ArrayList<>();
expected.add("+I[1, 1, Hi, 1, rick]");
expected.add("+I[2, 2, Hello, 2, john]");
expected.add("+I[3, 2, Hello Yubin Li, 3, ted]");
assertEquals(expected, result);
}
}
| 5,838 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch-e2e-tests-common/src/main/java/org/apache/flink/streaming | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch-e2e-tests-common/src/main/java/org/apache/flink/streaming/tests/ElasticsearchSinkE2ECaseBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.tests;
import org.apache.flink.connector.testframe.container.FlinkContainerTestEnvironment;
import org.apache.flink.connector.testframe.external.DefaultContainerizedExternalSystem;
import org.apache.flink.connector.testframe.external.ExternalSystemDataReader;
import org.apache.flink.connector.testframe.junit.annotations.TestEnv;
import org.apache.flink.connector.testframe.junit.annotations.TestExternalSystem;
import org.apache.flink.connector.testframe.junit.annotations.TestSemantics;
import org.apache.flink.connector.testframe.testsuites.SinkTestSuiteBase;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testcontainers.elasticsearch.ElasticsearchContainer;
import org.testcontainers.utility.DockerImageName;
import java.time.Duration;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import static org.apache.flink.connector.testframe.utils.CollectIteratorAssertions.assertThat;
import static org.apache.flink.runtime.testutils.CommonTestUtils.waitUntilCondition;
/** Base classs for end to end ElasticsearchSink tests based on connector testing framework. */
@SuppressWarnings("unused")
public abstract class ElasticsearchSinkE2ECaseBase<T extends Comparable<T>>
extends SinkTestSuiteBase<T> {
private static final Logger LOG = LoggerFactory.getLogger(ElasticsearchSinkE2ECaseBase.class);
private static final int READER_RETRY_ATTEMPTS = 10;
private static final int READER_TIMEOUT = -1; // Not used
protected static final String ELASTICSEARCH_HOSTNAME = "elasticsearch";
@TestSemantics
CheckpointingMode[] semantics = new CheckpointingMode[] {CheckpointingMode.EXACTLY_ONCE};
// Defines TestEnvironment
@TestEnv
protected FlinkContainerTestEnvironment flink = new FlinkContainerTestEnvironment(1, 6);
// Defines ConnectorExternalSystem
@TestExternalSystem
DefaultContainerizedExternalSystem<ElasticsearchContainer> elasticsearch =
DefaultContainerizedExternalSystem.builder()
.fromContainer(
new ElasticsearchContainer(
DockerImageName.parse(getElasticsearchContainerName()))
.withEnv(
"cluster.routing.allocation.disk.threshold_enabled",
"false")
.withNetworkAliases(ELASTICSEARCH_HOSTNAME))
.bindWithFlinkContainer(flink.getFlinkContainers().getJobManager())
.build();
@Override
protected void checkResultWithSemantic(
ExternalSystemDataReader<T> reader, List<T> testData, CheckpointingMode semantic)
throws Exception {
waitUntilCondition(
() -> {
try {
List<T> result = reader.poll(Duration.ofMillis(READER_TIMEOUT));
assertThat(sort(result).iterator())
.matchesRecordsFromSource(
Collections.singletonList(sort(testData)), semantic);
return true;
} catch (Throwable t) {
LOG.warn("Polled results not as expected", t);
return false;
}
},
5000,
READER_RETRY_ATTEMPTS);
}
private List<T> sort(List<T> list) {
return list.stream().sorted().collect(Collectors.toList());
}
abstract String getElasticsearchContainerName();
}
| 5,839 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch-e2e-tests-common/src/main/java/org/apache/flink/streaming | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch-e2e-tests-common/src/main/java/org/apache/flink/streaming/tests/ElasticsearchDataReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.tests;
import org.apache.flink.connector.testframe.external.ExternalSystemDataReader;
import java.time.Duration;
import java.util.List;
import static org.apache.flink.util.Preconditions.checkNotNull;
/** Elasticsearch data reader. */
public class ElasticsearchDataReader
implements ExternalSystemDataReader<KeyValue<Integer, String>> {
private final ElasticsearchClient client;
private final String indexName;
private final int pageLength;
public ElasticsearchDataReader(ElasticsearchClient client, String indexName, int pageLength) {
this.client = checkNotNull(client);
this.indexName = checkNotNull(indexName);
this.pageLength = pageLength;
}
@Override
public List<KeyValue<Integer, String>> poll(Duration timeout) {
client.refreshIndex(indexName);
QueryParams params =
QueryParams.newBuilder(indexName)
.sortField("key")
.pageLength(pageLength)
.trackTotalHits(true)
.build();
return client.fetchAll(params);
}
@Override
public void close() throws Exception {
client.close();
}
}
| 5,840 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch-e2e-tests-common/src/main/java/org/apache/flink/streaming | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch-e2e-tests-common/src/main/java/org/apache/flink/streaming/tests/KeyValue.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.tests;
import org.apache.flink.util.StringUtils;
import java.io.Serializable;
import java.util.Objects;
/** A {@link Comparable} holder for key-value pairs. */
public class KeyValue<K extends Comparable<? super K>, V extends Comparable<? super V>>
implements Comparable<KeyValue<K, V>>, Serializable {
private static final long serialVersionUID = 1L;
/** The key of the key-value pair. */
public K key;
/** The value the key-value pair. */
public V value;
/** Creates a new key-value pair where all fields are null. */
public KeyValue() {}
private KeyValue(K key, V value) {
this.key = key;
this.value = value;
}
@Override
public int compareTo(KeyValue<K, V> other) {
int d = this.key.compareTo(other.key);
if (d == 0) {
return this.value.compareTo(other.value);
}
return d;
}
/** Creates a new key-value pair. */
public static <K extends Comparable<? super K>, T1 extends Comparable<? super T1>>
KeyValue<K, T1> of(K key, T1 value) {
return new KeyValue<>(key, value);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof KeyValue)) {
return false;
}
@SuppressWarnings("rawtypes")
KeyValue keyValue = (KeyValue) o;
if (key != null ? !key.equals(keyValue.key) : keyValue.key != null) {
return false;
}
if (value != null ? !value.equals(keyValue.value) : keyValue.value != null) {
return false;
}
return true;
}
@Override
public int hashCode() {
return Objects.hash(key, value);
}
@Override
public String toString() {
return "("
+ StringUtils.arrayAwareToString(this.key)
+ ","
+ StringUtils.arrayAwareToString(this.value)
+ ")";
}
}
| 5,841 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch6-e2e-tests/src/test/java/org/apache/flink/streaming | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch6-e2e-tests/src/test/java/org/apache/flink/streaming/tests/Elasticsearch6SinkExternalContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.tests;
import org.apache.flink.api.connector.sink2.Sink;
import org.apache.flink.connector.elasticsearch.sink.Elasticsearch6SinkBuilder;
import org.apache.flink.connector.testframe.external.ExternalSystemDataReader;
import org.apache.flink.connector.testframe.external.sink.TestingSinkSettings;
import org.apache.http.HttpHost;
import java.net.URL;
import java.util.List;
class Elasticsearch6SinkExternalContext extends ElasticsearchSinkExternalContextBase {
/**
* Instantiates a new Elasticsearch 6 sink context base.
*
* @param addressExternal The address to access Elasticsearch from the host machine (outside of
* the containerized environment).
* @param addressInternal The address to access Elasticsearch from Flink. When running in a
* containerized environment, should correspond to the network alias that resolves within
* the environment's network together with the exposed port.
* @param connectorJarPaths The connector jar paths.
*/
Elasticsearch6SinkExternalContext(
String addressExternal, String addressInternal, List<URL> connectorJarPaths) {
super(addressInternal, connectorJarPaths, new Elasticsearch6Client(addressExternal));
}
@Override
public Sink<KeyValue<Integer, String>> createSink(TestingSinkSettings sinkSettings) {
client.createIndexIfDoesNotExist(indexName, 1, 0);
return new Elasticsearch6SinkBuilder<KeyValue<Integer, String>>()
.setHosts(HttpHost.create(this.addressInternal))
.setEmitter(new ElasticsearchTestEmitter(new UpdateRequest6Factory(indexName)))
.setBulkFlushMaxActions(BULK_BUFFER)
.build();
}
@Override
public ExternalSystemDataReader<KeyValue<Integer, String>> createSinkDataReader(
TestingSinkSettings sinkSettings) {
return new ElasticsearchDataReader(client, indexName, PAGE_LENGTH);
}
@Override
public String toString() {
return "Elasticsearch 6 sink context.";
}
}
| 5,842 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch6-e2e-tests/src/test/java/org/apache/flink/streaming | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch6-e2e-tests/src/test/java/org/apache/flink/streaming/tests/Elasticsearch6SinkE2ECase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.tests;
import org.apache.flink.connector.elasticsearch.test.DockerImageVersions;
import org.apache.flink.connector.testframe.junit.annotations.TestContext;
import org.apache.flink.test.resources.ResourceTestUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
/** End to end test for Elasticsearch6Sink based on connector testing framework. */
@SuppressWarnings("unused")
public class Elasticsearch6SinkE2ECase
extends ElasticsearchSinkE2ECaseBase<KeyValue<Integer, String>> {
private static final Logger LOG = LoggerFactory.getLogger(Elasticsearch6SinkE2ECase.class);
public Elasticsearch6SinkE2ECase() throws Exception {}
String getElasticsearchContainerName() {
return DockerImageVersions.ELASTICSEARCH_6;
}
@TestContext
Elasticsearch6SinkExternalContextFactory contextFactory =
new Elasticsearch6SinkExternalContextFactory(
elasticsearch.getContainer(),
Arrays.asList(
ResourceTestUtils.getResource(
"dependencies/elasticsearch6-end-to-end-test.jar")
.toAbsolutePath()
.toUri()
.toURL(),
ResourceTestUtils.getResource(
"dependencies/flink-connector-test-utils.jar")
.toAbsolutePath()
.toUri()
.toURL(),
ResourceTestUtils.getResource(
"dependencies/flink-connector-elasticsearch-test-utils.jar")
.toAbsolutePath()
.toUri()
.toURL()));
}
| 5,843 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch6-e2e-tests/src/test/java/org/apache/flink/streaming | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch6-e2e-tests/src/test/java/org/apache/flink/streaming/tests/Elasticsearch6SinkExternalContextFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.tests;
import org.testcontainers.elasticsearch.ElasticsearchContainer;
import java.net.URL;
import java.util.List;
/** Elasticsearch sink external context factory. */
class Elasticsearch6SinkExternalContextFactory
extends ElasticsearchSinkExternalContextFactoryBase<Elasticsearch6SinkExternalContext> {
/**
* Instantiates a new Elasticsearch 6 sink external context factory.
*
* @param elasticsearchContainer The Elasticsearch container.
* @param connectorJars The connector jars.
*/
Elasticsearch6SinkExternalContextFactory(
ElasticsearchContainer elasticsearchContainer, List<URL> connectorJars) {
super(elasticsearchContainer, connectorJars);
}
@Override
public Elasticsearch6SinkExternalContext createExternalContext(String testName) {
return new Elasticsearch6SinkExternalContext(
elasticsearchContainer.getHttpHostAddress(),
formatInternalAddress(elasticsearchContainer),
connectorJars);
}
}
| 5,844 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch6-e2e-tests/src/test/java/org/apache/flink/streaming | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch6-e2e-tests/src/test/java/org/apache/flink/streaming/tests/Elasticsearch6LookupE2ECase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.tests;
import org.apache.flink.connector.elasticsearch.test.DockerImageVersions;
/** End-to-end test for Elasticsearch6 lookup. */
public class Elasticsearch6LookupE2ECase extends ElasticsearchLookupE2ECase {
@Override
String getElasticsearchContainerName() {
return DockerImageVersions.ELASTICSEARCH_6;
}
@Override
String getEsOptions() {
return " 'connector' = 'elasticsearch-6',"
+ " 'hosts' = '"
+ "http://"
+ elasticsearchContainer.getHttpHostAddress()
+ "',"
+ "'document-type' = '_doc',"
+ "'index' = '"
+ ES_INDEX
+ "',"
+ "'lookup.cache' = 'partial',"
+ "'lookup.partial-cache.max-rows' = '100'";
}
}
| 5,845 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch6-e2e-tests/src/main/java/org/apache/flink/streaming | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch6-e2e-tests/src/main/java/org/apache/flink/streaming/tests/Elasticsearch6Client.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.tests;
import org.apache.http.HttpHost;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.client.indices.CreateIndexRequest;
import org.elasticsearch.client.indices.GetIndexRequest;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.sort.SortOrder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import static org.apache.flink.util.Preconditions.checkNotNull;
/** The type Elasticsearch 6 client. */
public class Elasticsearch6Client implements ElasticsearchClient {
private static final Logger LOG = LoggerFactory.getLogger(Elasticsearch6Client.class);
private final RestHighLevelClient restClient;
/**
* Instantiates a new Elasticsearch 6 client.
*
* @param addressExternal The address to access Elasticsearch from the host machine (outside of
* the containerized environment).
*/
public Elasticsearch6Client(String addressExternal) {
checkNotNull(addressExternal);
HttpHost httpHost = HttpHost.create(addressExternal);
RestClientBuilder restClientBuilder = RestClient.builder(httpHost);
this.restClient = new RestHighLevelClient(restClientBuilder);
checkNotNull(restClient);
}
@Override
public void deleteIndex(String indexName) {
DeleteIndexRequest request = new DeleteIndexRequest(indexName);
try {
restClient.indices().delete(request, RequestOptions.DEFAULT);
} catch (IOException e) {
LOG.error("Cannot delete index {}", indexName, e);
}
// This is needed to avoid race conditions between tests that reuse the same index
refreshIndex(indexName);
}
@Override
public void refreshIndex(String indexName) {
RefreshRequest refresh = new RefreshRequest(indexName);
refresh.indicesOptions(IndicesOptions.strictSingleIndexNoExpandForbidClosed());
try {
restClient.indices().refresh(refresh, RequestOptions.DEFAULT);
} catch (IOException e) {
LOG.error("Cannot refresh index {}", indexName, e);
} catch (ElasticsearchException e) {
if (e.status() == RestStatus.NOT_FOUND) {
LOG.info("Index {} not found", indexName);
}
}
}
@Override
public void createIndexIfDoesNotExist(String indexName, int shards, int replicas) {
GetIndexRequest request = new GetIndexRequest(indexName);
CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName);
createIndexRequest.settings(
Settings.builder()
.put("index.number_of_shards", shards)
.put("index.number_of_replicas", replicas));
try {
boolean exists = restClient.indices().exists(request, RequestOptions.DEFAULT);
if (!exists) {
restClient.indices().create(createIndexRequest, RequestOptions.DEFAULT);
} else {
LOG.info("Index already exists {}", indexName);
}
} catch (IOException e) {
LOG.error("Cannot create index {}", indexName, e);
}
}
@Override
public void close() throws Exception {
restClient.close();
}
@Override
public List<KeyValue<Integer, String>> fetchAll(QueryParams params) {
try {
SearchResponse response =
restClient.search(
new SearchRequest(params.indexName())
.source(
new SearchSourceBuilder()
.sort(params.sortField(), SortOrder.ASC)
.from(params.from())
.size(params.pageLength())
.trackTotalHits(params.trackTotalHits())),
RequestOptions.DEFAULT);
SearchHit[] searchHits = response.getHits().getHits();
return Arrays.stream(searchHits)
.map(
searchHit ->
KeyValue.of(
Integer.valueOf(searchHit.getId()),
searchHit.getSourceAsMap().get("value").toString()))
.collect(Collectors.toList());
} catch (IOException e) {
LOG.error("Fetching records failed", e);
return Collections.emptyList();
}
}
}
| 5,846 |
0 | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch6-e2e-tests/src/main/java/org/apache/flink/streaming | Create_ds/flink-connector-elasticsearch/flink-connector-elasticsearch-e2e-tests/flink-connector-elasticsearch6-e2e-tests/src/main/java/org/apache/flink/streaming/tests/UpdateRequest6Factory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.tests;
import org.elasticsearch.action.update.UpdateRequest;
import java.util.Map;
/** Factory for creating UpdateRequests of Elasticsearch6. */
public class UpdateRequest6Factory implements UpdateRequestFactory {
private static final long serialVersionUID = 1L;
private final String indexName;
/**
* Instantiates a new update request factory for of Elasticsearch6.
*
* @param indexName The index name.
*/
public UpdateRequest6Factory(String indexName) {
this.indexName = indexName;
}
@Override
public UpdateRequest createUpdateRequest(KeyValue<Integer, String> element) {
Map<String, Object> json = UpdateRequestFactory.prepareDoc(element);
return new UpdateRequest(indexName, "doc", String.valueOf(element.key))
.doc(json)
.upsert(json);
}
}
| 5,847 |
0 | Create_ds/aws-cloudtrail-processing-library/src | Create_ds/aws-cloudtrail-processing-library/src/sample/SampleExceptionHandler.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package sample;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.cloudtrail.processinglibrary.exceptions.ProcessingLibraryException;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.ExceptionHandler;
import com.amazonaws.services.cloudtrail.processinglibrary.progress.ProgressInfo;
import com.amazonaws.services.cloudtrail.processinglibrary.progress.ProgressState;
import com.amazonaws.services.cloudtrail.processinglibrary.progress.ProgressStatus;
public class SampleExceptionHandler implements ExceptionHandler{
private static final Log logger = LogFactory.getLog(SampleExceptionHandler.class);
/**
* Exception handler that simply log progress state and progress information.
*/
@Override
public void handleException(ProcessingLibraryException exception) {
ProgressStatus status = exception.getStatus();
ProgressState state = status.getProgressState();
ProgressInfo info = status.getProgressInfo();
logger.error(String.format("Exception. Progress State: %s. Progress Information: %s.", state, info));
}
}
| 5,848 |
0 | Create_ds/aws-cloudtrail-processing-library/src | Create_ds/aws-cloudtrail-processing-library/src/sample/SampleEventsProcessor.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package sample;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.EventsProcessor;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailEvent;
public class SampleEventsProcessor implements EventsProcessor {
private static final Log logger = LogFactory.getLog(SampleEventsProcessor.class);
public void process(List<CloudTrailEvent> events) {
int i = 0;
for (CloudTrailEvent event : events) {
validateEvent(event);
logger.info(String.format("Process event %d : %s", i++, event.getEventData()));
}
}
/**
* Do simple validation before processing.
*
* @param event to validate
*/
private void validateEvent(CloudTrailEvent event) {
if (event.getEventData().getAccountId() == null) {
logger.error(String.format("Event %s doesn't have account ID.", event.getEventData()));
}
// more validation here...
}
}
| 5,849 |
0 | Create_ds/aws-cloudtrail-processing-library/src | Create_ds/aws-cloudtrail-processing-library/src/sample/SampleApp.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package sample;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.cloudtrail.processinglibrary.AWSCloudTrailProcessingExecutor;
/**
* Sample application that use AWS CloudTrail Processing Library
*/
public class SampleApp {
public static void main(String[] args) throws InterruptedException{
final Log logger = LogFactory.getLog(SampleApp.class);
//create AWSCloudTrailProcessingExecutor and start it
final AWSCloudTrailProcessingExecutor executor = new AWSCloudTrailProcessingExecutor
.Builder(new SampleEventsProcessor(), "/sample/awscloudtrailprocessinglibrary.properties")
.withSourceFilter(new SampleSourceFilter())
.withEventFilter(new SampleEventFilter())
.withProgressReporter(new SampleProgressReporter())
.withExceptionHandler(new SampleExceptionHandler())
.build();
executor.start();
// add shut down hook to gracefully stop executor (optional)
Runtime.getRuntime().addShutdownHook(new Thread() {
public void run() {
logger.info("Shut Down Hook is called.");
executor.stop();
}
});
// register a Default Uncaught Exception Handler (optional)
Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
@Override
public void uncaughtException(Thread t, Throwable e) {
logger.error("Handled by global Exception handler. " + e.getMessage() + " " + t.getName());
//Two options here:
//First, we can call System.exit(1); in such case shut down hook will be called.
//Second, we can optionally restart another executor and start.
final AWSCloudTrailProcessingExecutor executor = new AWSCloudTrailProcessingExecutor
.Builder(new SampleEventsProcessor(), "/sample/awscloudtrailprocessinglibrary.properties")
.withSourceFilter(new SampleSourceFilter())
.withEventFilter(new SampleEventFilter())
.withProgressReporter(new SampleProgressReporter())
.withExceptionHandler(new SampleExceptionHandler())
.build();
executor.start();
}
});
//can optionally limit running time, or remove both lines so it is running forever. (optional)
Thread.sleep(24 * 60 * 60 *1000);
executor.stop();
}
}
| 5,850 |
0 | Create_ds/aws-cloudtrail-processing-library/src | Create_ds/aws-cloudtrail-processing-library/src/sample/SampleProgressReporter.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package sample;
import java.util.Date;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.ProgressReporter;
import com.amazonaws.services.cloudtrail.processinglibrary.progress.ProgressStatus;
/**
* Simply log the processing latency.
*/
public class SampleProgressReporter implements ProgressReporter {
private static final Log logger = LogFactory.getLog(SampleProgressReporter.class);
@Override
public Object reportStart(ProgressStatus status) {
return new Date();
}
@Override
public void reportEnd(ProgressStatus status, Object startDate) {
logger.info(status.getProgressState().toString() + " is " + status.getProgressInfo().isSuccess()
+ " , and latency is " + Math.abs(((Date) startDate).getTime()-new Date().getTime())
+ " milliseconds.");
}
}
| 5,851 |
0 | Create_ds/aws-cloudtrail-processing-library/src | Create_ds/aws-cloudtrail-processing-library/src/sample/SampleEventFilter.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package sample;
import com.amazonaws.services.cloudtrail.processinglibrary.exceptions.CallbackException;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.EventFilter;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailEvent;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailEventData;
public class SampleEventFilter implements EventFilter{
private static final String EC2_EVENTS = "ec2.amazonaws.com";
/**
* Event filter that only keep EC2 deletion API calls.
*/
@Override
public boolean filterEvent(CloudTrailEvent event) throws CallbackException {
CloudTrailEventData eventData = event.getEventData();
String eventSource = eventData.getEventSource();
String eventName = eventData.getEventName();
return eventSource.equals(EC2_EVENTS) && eventName.startsWith("Delete");
}
}
| 5,852 |
0 | Create_ds/aws-cloudtrail-processing-library/src | Create_ds/aws-cloudtrail-processing-library/src/sample/SampleSourceFilter.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package sample;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import com.amazonaws.services.cloudtrail.processinglibrary.exceptions.CallbackException;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.SourceFilter;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailSource;
import com.amazonaws.services.cloudtrail.processinglibrary.model.SQSBasedSource;
import com.amazonaws.services.cloudtrail.processinglibrary.model.SourceAttributeKeys;
public class SampleSourceFilter implements SourceFilter{
/**
* Max retry for a SQS message.
*/
private static final int MAX_RECEIVED_COUNT = 3;
/**
* Account IDs would like to process.
*/
private static List<String> accountIDs ;
static {
accountIDs = new ArrayList<>();
accountIDs.add("123456789012");
accountIDs.add("234567890123");
}
/**
* This Sample Source Filter filter out messages that have been received more than 3 times and
* accountIDs in a certain range.
*
* It is useful when you only want to retry on failed message up to certain times.
*/
@Override
public boolean filterSource(CloudTrailSource source) throws CallbackException {
source = (SQSBasedSource) source;
Map<String, String> sourceAttributes = source.getSourceAttributes();
String accountId = sourceAttributes.get(SourceAttributeKeys.ACCOUNT_ID.getAttributeKey());
String receivedCount = sourceAttributes.get(SourceAttributeKeys.APPROXIMATE_RECEIVE_COUNT.getAttributeKey());
int approximateReceivedCount = Integer.parseInt(receivedCount);
return approximateReceivedCount <= MAX_RECEIVED_COUNT && accountIDs.contains(accountId);
}
}
| 5,853 |
0 | Create_ds/aws-cloudtrail-processing-library/src | Create_ds/aws-cloudtrail-processing-library/src/sample/package-info.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
/**
* Simple example to use AWS CloudTrail Processing Library.
*/
package sample;
| 5,854 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/AWSCloudTrailProcessingExecutor.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.services.cloudtrail.processinglibrary.configuration.ProcessingConfiguration;
import com.amazonaws.services.cloudtrail.processinglibrary.configuration.PropertiesFileConfiguration;
import com.amazonaws.services.cloudtrail.processinglibrary.factory.EventReaderFactory;
import com.amazonaws.services.cloudtrail.processinglibrary.factory.SourceSerializerFactory;
import com.amazonaws.services.cloudtrail.processinglibrary.factory.ThreadPoolFactory;
import com.amazonaws.services.cloudtrail.processinglibrary.impl.DefaultExceptionHandler;
import com.amazonaws.services.cloudtrail.processinglibrary.impl.DefaultEventFilter;
import com.amazonaws.services.cloudtrail.processinglibrary.impl.DefaultEventsProcessor;
import com.amazonaws.services.cloudtrail.processinglibrary.impl.DefaultProgressReporter;
import com.amazonaws.services.cloudtrail.processinglibrary.impl.DefaultSourceFilter;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.ExceptionHandler;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.EventFilter;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.EventsProcessor;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.ProgressReporter;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.SourceFilter;
import com.amazonaws.services.cloudtrail.processinglibrary.manager.BasicS3Manager;
import com.amazonaws.services.cloudtrail.processinglibrary.manager.S3Manager;
import com.amazonaws.services.cloudtrail.processinglibrary.manager.SqsManager;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailEvent;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailSource;
import com.amazonaws.services.cloudtrail.processinglibrary.reader.EventReader;
import com.amazonaws.services.cloudtrail.processinglibrary.serializer.SourceSerializer;
import com.amazonaws.services.cloudtrail.processinglibrary.utils.LibraryUtils;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import com.amazonaws.services.sqs.AmazonSQS;
import com.amazonaws.services.sqs.AmazonSQSClientBuilder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
/**
* AWS CloudTrail Processing Library's main execution logic. This class loads a user's configuration and
* creates an {@link EventReaderFactory} object which spawns a {@link EventReader} to process log files.
* <p>
* It has two thread pools: <code>scheduledThreadPool</code>, which is a single-threaded scheduled
* thread pool used to poll SQS for messages, and <code>mainThreadPool</code>, which has a
* configurable size and processes each {@link CloudTrailSource} in parallel.
*/
public class AWSCloudTrailProcessingExecutor {
private static final Log logger = LogFactory.getLog(AWSCloudTrailProcessingExecutor.class);
/**
* The delay between starting time stamp of each execution, minimum 1 nanosecond.
* In such a case it behaves as if it were continuously running.
*/
private static final int EXECUTION_DELAY = 1; //1 nanosecond
private static final String ERROR_CONFIGURATION_NULL = "ProcessingConfiguration object is null. " +
"Either pass in a class path property file path or directly pass in a ProcessingConfiguration object";
private ProcessingConfiguration config;
private SourceFilter sourceFilter;
private EventFilter eventFilter;
private EventsProcessor eventsProcessor;
private ProgressReporter progressReporter;
private ExceptionHandler exceptionHandler;
/**
* Scheduled thread pool used to continuously poll queue and enqueue jobs
* into our main thread pool executorService.
*/
private ScheduledExecutorService scheduledThreadPool;
/**
* The thread pool that processes the log files.
*/
private ExecutorService mainThreadPool;
private EventReaderFactory readerFactory;
private AWSCloudTrailProcessingExecutor(Builder builder) {
this.config = builder.config;
this.sourceFilter = builder.sourceFilter;
this.eventFilter = builder.eventFilter;
this.eventsProcessor = builder.eventsProcessor;
this.progressReporter = builder.progressReporter;
this.exceptionHandler = builder.exceptionHandler;
this.scheduledThreadPool = builder.scheduledThreadPool;
this.mainThreadPool = builder.mainThreadPool;
this.readerFactory = builder.readerFactory;
}
/**
* Start processing AWS CloudTrail logs.
*/
public void start() {
logger.info("Started AWSCloudTrailProcessingLibrary.");
validateBeforeStart();
scheduledThreadPool.scheduleAtFixedRate(new ScheduledJob(readerFactory), 0L, EXECUTION_DELAY, TimeUnit.MICROSECONDS);
}
/**
* Stop processing AWS CloudTrail logs.
*/
public void stop() {
stopThreadPool(mainThreadPool);
stopThreadPool(scheduledThreadPool);
logger.info("Stopped AWSCloudTrailProcessingLibrary.");
}
/**
* Helper function to gracefully stop an {@link ExecutorService}.
*
* @param threadPool the thread pool to stop.
*/
private void stopThreadPool(ExecutorService threadPool) {
LibraryUtils.checkCondition(threadPool == null, "Thread pool is null when calling stop");
if (threadPool.isShutdown()) {
logger.debug(threadPool.toString() + " is already stopped.");
} else {
logger.debug(threadPool.toString() + " is about to shutdown.");
threadPool.shutdown(); // Shutdown thread pool
try { // Wait for shutdown
threadPool.awaitTermination(config.getThreadTerminationDelaySeconds(), TimeUnit.SECONDS);
} catch (InterruptedException e) {
logger.debug("Wait thread pool termination is interrupted.");
}
if (!threadPool.isShutdown()) { // ShutdownNow after waiting
logger.debug(threadPool.toString() + " is force to shutdown now.");
threadPool.shutdownNow();
}
logger.debug(threadPool.toString() + " is stopped.");
}
}
/**
* A job that runs in a scheduled thread pool.
* <p>
* If any execution of the task raises an exception, then {@link ScheduledExecutorService} will
* suppress any subsequent executions. Therefore, we try/catch a Throwable here.
*/
private class ScheduledJob implements Runnable {
private EventReaderFactory eventReaderFactory;
public ScheduledJob(EventReaderFactory eventReaderFactory) {
this.eventReaderFactory = eventReaderFactory;
}
/**
* Run the scheduled job.
*/
public void run() {
try {
final EventReader reader = eventReaderFactory.createReader();
List<CloudTrailSource> sources = reader.getSources();
for (final CloudTrailSource source : sources) {
// process each CloudTrailSource in main thread pool
mainThreadPool.execute(new Runnable() {
public void run() {
reader.processSource(source);
}
});
}
} catch (Throwable t) {
logger.error("Executor failed to process a task. " + t.getMessage(), t);
}
}
}
/**
* Validate the user's input before processing logs.
*/
private void validateBeforeStart() {
LibraryUtils.checkArgumentNotNull(config, "Configuration is null.");
config.validate();
LibraryUtils.checkArgumentNotNull(sourceFilter, "sourceFilter is null.");
LibraryUtils.checkArgumentNotNull(eventFilter, "eventFilter is null.");
LibraryUtils.checkArgumentNotNull(eventsProcessor, "eventsProcessor is null.");
LibraryUtils.checkArgumentNotNull(progressReporter, "progressReporter is null.");
LibraryUtils.checkArgumentNotNull(exceptionHandler, "exceptionHandler is null.");
LibraryUtils.checkArgumentNotNull(scheduledThreadPool, "scheduledThreadPool is null.");
LibraryUtils.checkArgumentNotNull(mainThreadPool, "mainThreadPool is null.");
LibraryUtils.checkArgumentNotNull(readerFactory, "readerFactory is null.");
}
/**
* A class that builds an {@link AWSCloudTrailProcessingExecutor} object.
*/
public static class Builder {
private static final int SDK_TIME_OUT = 10000; // 10 seconds
private ProcessingConfiguration config;
//provide default implementation to AWSCloudTrailProcessingLibrary interfaces.
private SourceFilter sourceFilter = new DefaultSourceFilter();
private EventFilter eventFilter = new DefaultEventFilter();
private EventsProcessor eventsProcessor = new DefaultEventsProcessor();
private ProgressReporter progressReporter = new DefaultProgressReporter();
private ExceptionHandler exceptionHandler = new DefaultExceptionHandler();
private ScheduledExecutorService scheduledThreadPool;
private ExecutorService mainThreadPool;
private EventReaderFactory readerFactory;
private SourceSerializer sourceSerializer = SourceSerializerFactory.createSourceSerializerChain();
private String propertyFilePath;
private AmazonS3 s3Client;
private AmazonSQS sqsClient;
private S3Manager s3Manager;
/**
* Builder for {@link AWSCloudTrailProcessingExecutor}.
*
* @param eventsProcessor The {@link EventsProcessor} that will process {@link CloudTrailEvent}s.
* @param propertyFilePath The path to a property file containing the AWS CloudTrail Processing Library's
* configuration.
*/
public Builder(EventsProcessor eventsProcessor, String propertyFilePath) {
this.eventsProcessor = eventsProcessor;
this.propertyFilePath = propertyFilePath;
}
/**
* Builder for {@link AWSCloudTrailProcessingExecutor}.
*
* @param eventsProcessor The {@link EventsProcessor} instance that will process {@link CloudTrailEvent}s.
* @param config An {@link ProcessingConfiguration} instance that provides the library's
* configuration details.
*/
public Builder(EventsProcessor eventsProcessor, ProcessingConfiguration config) {
this.eventsProcessor = eventsProcessor;
this.config = config;
}
/**
* Applies a user-defined {@link SourceFilter} to this instance.
*
* @param sourceFilter The <code>SourceFilter</code> that will be used to filter {@link CloudTrailSource} source.
* @return This <code>Builder</code> instance, using the specified <code>SourceFilter</code>.
*/
public Builder withSourceFilter(SourceFilter sourceFilter) {
this.sourceFilter = sourceFilter;
return this;
}
/**
* Applies a user-defined {@link EventFilter} to this instance.
*
* @param eventFilter The <code>EventFilter</code> that will be used to filter {@link CloudTrailEvent}s.
* @return This <code>Builder</code> instance, using the specified <code>EventFilter</code>.
*/
public Builder withEventFilter(EventFilter eventFilter) {
this.eventFilter = eventFilter;
return this;
}
/**
* Applies a user-defined {@link ProgressReporter} to this instance.
*
* @param progressReporter The <code>ProgressReporter</code> that will report
* the state of the AWSCloudTrailProcessingLibrary processing process.
* @return This <code>Builder</code> instance, using the specified <code>ProgressReporter</code>.
*/
public Builder withProgressReporter(ProgressReporter progressReporter) {
this.progressReporter = progressReporter;
return this;
}
/**
* Applies a user-defined {@link ExceptionHandler} to this instance.
*
* @param exceptionHandler The <code>ExceptionHandler</code> that will handle exceptions for
* this instance.
* @return This <code>Builder</code> instance, using the specified
* <code>ExceptionHandler</code>.
*/
public Builder withExceptionHandler(ExceptionHandler exceptionHandler) {
this.exceptionHandler = exceptionHandler;
return this;
}
/**
* Applies a user-defined {@link SourceSerializer} to this instance.
*
* @param sourceSerializer The<code>SourceSerializer</code> that gets the {@link CloudTrailSource} from the SQS message
* object for this instance.
* @return This <code>Builder</code> instance, using the specified <code>SourceSerializer</code>
*/
public Builder withSourceSerializer(SourceSerializer sourceSerializer) {
this.sourceSerializer = sourceSerializer;
return this;
}
/**
* Applies a user-defined <a
* href="http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ExecutorService.html">ExecutorService</a>
* thread pool to this instance.
*
* @param mainThreadPool The <code>ExecutorService</code> thread pool that will be used to
* process CloudTrailSource
* @return This <code>Builder</code> instance, using the specified thread pool.
*/
public Builder withThreadPool(ExecutorService mainThreadPool) {
this.mainThreadPool = mainThreadPool;
return this;
}
/**
* Applies a user-defined <a
* href="http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/s3/AmazonS3.html">AmazonS3</a>
* to this instance.
* If user provides the user-defined S3Manager, then this s3Client will not be used.
*
* @param s3Client the <code>AmazonS3</code> object used to download CloudTrail log files
* @return This <code>Builder</code> instance, using the specified <code>AmazonS3</code>.
*/
public Builder withS3Client(AmazonS3 s3Client) {
this.s3Client = s3Client;
return this;
}
/**
* Applies a user-defined {@link S3Manager} to this instance.
* User-defined s3Client will not be used if user provides the user-defined S3Manager.
*
* @param s3Manager the <code>S3Manager</code> object used to manage Amazon S3 service-related operations
* @return This <code>Builder</code> instance, using the specified <code>AmazonS3</code>.
*/
public Builder withS3Manager(S3Manager s3Manager) {
this.s3Manager = s3Manager;
return this;
}
/**
* Applies a user-defined <a
* href="http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/sqs/AmazonSQS.html">AmazonSQS</a>
* to this instance.
*
* @param sqsClient The <code>AmazonSQS</code> that will be used to poll messages from
* the SQS queue.
* @return This <code>Builder</code> instance, using the specified
* <code>AmazonSQS</code>.
*/
public Builder withSQSClient(AmazonSQS sqsClient) {
this.sqsClient = sqsClient;
return this;
}
/**
* Build an {@link AWSCloudTrailProcessingExecutor} using the classpath property file.
*
* @return an AWSCloudTrailProcessingExecutor instance.
*/
public AWSCloudTrailProcessingExecutor build() {
buildConfig();
validateBeforeBuild();
buildS3Client();
buildS3Manager();
buildSqsClient();
buildReaderFactory();
buildThreadPools();
return new AWSCloudTrailProcessingExecutor(this);
}
private void buildConfig() {
// passed in configuration as property file
if (config == null && propertyFilePath != null) {
config = new PropertiesFileConfiguration(propertyFilePath);
}
}
private void validateBeforeBuild() {
LibraryUtils.checkArgumentNotNull(config, ERROR_CONFIGURATION_NULL);
LibraryUtils.checkArgumentNotNull(config.getAwsCredentialsProvider(),
"ProcessingConfiguration missing AWSCredentialsProvider attribute");
LibraryUtils.checkArgumentNotNull(eventsProcessor, "eventsProcessor is null.");
LibraryUtils.checkArgumentNotNull(sourceFilter, "sourceFilter is null.");
LibraryUtils.checkArgumentNotNull(eventFilter, "eventFilter is null.");
LibraryUtils.checkArgumentNotNull(progressReporter, "progressReporter is null.");
LibraryUtils.checkArgumentNotNull(exceptionHandler, "exceptionHandler is null.");
LibraryUtils.checkArgumentNotNull(sourceSerializer, "sourceSerializer is null.");
}
private void buildS3Client() {
// override default timeout for S3Client
ClientConfiguration clientConfiguration = new ClientConfiguration();
clientConfiguration.setConnectionTimeout(SDK_TIME_OUT);
clientConfiguration.setSocketTimeout(SDK_TIME_OUT);
clientConfiguration.setMaxConnections(Math.max(clientConfiguration.DEFAULT_MAX_CONNECTIONS, config.getThreadCount()));
if (s3Client == null) {
s3Client = AmazonS3ClientBuilder.standard()
.withCredentials(config.getAwsCredentialsProvider())
.withClientConfiguration(clientConfiguration)
.withRegion(config.getS3Region())
.build();
}
}
private void buildS3Manager() {
if (s3Manager == null) {
s3Manager = new BasicS3Manager(s3Client, config, exceptionHandler, progressReporter);
}
}
private void buildSqsClient() {
if (sqsClient == null) {
ClientConfiguration clientConfiguration = new ClientConfiguration();
clientConfiguration.setMaxConnections(Math.max(clientConfiguration.DEFAULT_MAX_CONNECTIONS, config.getThreadCount()));
sqsClient = AmazonSQSClientBuilder.standard()
.withCredentials(config.getAwsCredentialsProvider())
.withClientConfiguration(clientConfiguration)
.withRegion(config.getSqsRegion())
.build();
}
}
private void buildReaderFactory() {
SqsManager sqsManager = new SqsManager(sqsClient, config, exceptionHandler, progressReporter, sourceSerializer);
readerFactory = new EventReaderFactory.Builder(config)
.withEventsProcessor(eventsProcessor)
.withSourceFilter(sourceFilter)
.withEventFilter(eventFilter)
.withProgressReporter(progressReporter)
.withExceptionHandler(exceptionHandler)
.withS3Manager(s3Manager)
.withSQSManager(sqsManager).build();
}
private void buildThreadPools() {
ThreadPoolFactory threadFactory = new ThreadPoolFactory(config.getThreadCount(), exceptionHandler);
scheduledThreadPool = threadFactory.createScheduledThreadPool(config.getNumOfParallelReaders());
if (mainThreadPool == null) {
mainThreadPool = threadFactory.createMainThreadPool();
}
}
}
}
| 5,855 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/package-info.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
/**
* The AWS CloudTrail Processing Library allows you to easily process AWS CloudTrail logs in your Java applications.
*/
package com.amazonaws.services.cloudtrail.processinglibrary;
| 5,856 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/reader/EventReader.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.reader;
import com.amazonaws.services.cloudtrail.processinglibrary.configuration.ProcessingConfiguration;
import com.amazonaws.services.cloudtrail.processinglibrary.exceptions.CallbackException;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.EventFilter;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.EventsProcessor;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.ExceptionHandler;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.ProgressReporter;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.SourceFilter;
import com.amazonaws.services.cloudtrail.processinglibrary.manager.S3Manager;
import com.amazonaws.services.cloudtrail.processinglibrary.manager.SqsManager;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailEvent;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailLog;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailSource;
import com.amazonaws.services.cloudtrail.processinglibrary.model.SQSBasedSource;
import com.amazonaws.services.cloudtrail.processinglibrary.progress.BasicProcessLogInfo;
import com.amazonaws.services.cloudtrail.processinglibrary.progress.BasicProcessSourceInfo;
import com.amazonaws.services.cloudtrail.processinglibrary.progress.ProgressState;
import com.amazonaws.services.cloudtrail.processinglibrary.progress.ProgressStatus;
import com.amazonaws.services.cloudtrail.processinglibrary.serializer.DefaultEventSerializer;
import com.amazonaws.services.cloudtrail.processinglibrary.serializer.EventSerializer;
import com.amazonaws.services.cloudtrail.processinglibrary.serializer.RawLogDeliveryEventSerializer;
import com.amazonaws.services.cloudtrail.processinglibrary.utils.EventBuffer;
import com.amazonaws.services.cloudtrail.processinglibrary.utils.LibraryUtils;
import com.amazonaws.services.sqs.model.Message;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.zip.GZIPInputStream;
/**
* EventReader is responsible for processing a stream of events. It parses each event and hands
* the events to EventsProcessor to process.
*/
public class EventReader {
private static final Log logger = LogFactory.getLog(EventReader.class);
private final SourceFilter sourceFilter;
private final EventFilter eventFilter;
private final EventsProcessor eventsProcessor;
private final ProgressReporter progressReporter;
private final ExceptionHandler exceptionHandler;
private ProcessingConfiguration config;
private SqsManager sqsManager;
private S3Manager s3Manager;
/**
* Jackson parser to parse CloudTrail log files.
*/
private ObjectMapper mapper;
/**
* Internal use only.
*
* This constructor creates an instance of EventReader object.
*
* @param eventsProcessor user's implementation of eventsProcessor.
* @param sourceFilter user's implementation of sourceFilter.
* @param eventFilter user's implementation of eventFilter.
* @param progressReporter user's implementation of progressReporter.
* @param exceptionHandler user's implementation of exceptionHandler.
* @param sqsManager that poll message from SQS queue.
* @param s3Manager that download CloudTrail log files from S3.
* @param configuration user provided ProcessingConfiguration.
*/
public EventReader(EventsProcessor eventsProcessor, SourceFilter sourceFilter, EventFilter eventFilter,
ProgressReporter progressReporter, ExceptionHandler exceptionHandler, SqsManager sqsManager,
S3Manager s3Manager, ProcessingConfiguration configuration) {
this.eventsProcessor = eventsProcessor;
this.sourceFilter = sourceFilter;
this.eventFilter = eventFilter;
this.progressReporter = progressReporter;
this.exceptionHandler = exceptionHandler;
this.config = configuration;
this.sqsManager = sqsManager;
this.s3Manager = s3Manager;
this.mapper = new ObjectMapper();
}
/**
* Poll messages from SQS queue and convert messages to CloudTrailSource.
*
* @return a list of {@link CloudTrailSource}.
*/
public List<CloudTrailSource> getSources() {
List<Message> sqsMessages = sqsManager.pollQueue();
return sqsManager.parseMessage(sqsMessages);
}
/**
* Retrieve S3 object URL from source then downloads the object processes each event through
* call back functions.
*
* @param source {@link CloudTrailSource} to process.
*/
public void processSource (CloudTrailSource source) {
boolean filterSourceOut = false;
boolean downloadLogSuccess = true;
boolean processSourceSuccess = false;
ProgressStatus processSourceStatus = new ProgressStatus(ProgressState.processSource, new BasicProcessSourceInfo(source, processSourceSuccess));
final Object processSourceReportObject = progressReporter.reportStart(processSourceStatus);
// Start to process the source
try {
// Apply source filter first. If source filtered out then delete source immediately and return.
if (!sourceFilter.filterSource(source)) {
logger.debug("AWSCloudTrailSource " + source + " has been filtered out.");
processSourceSuccess = true;
filterSourceOut = true;
} else {
int nLogFilesToProcess = ((SQSBasedSource)source).getLogs().size();
for (CloudTrailLog ctLog : ((SQSBasedSource)source).getLogs()) {
//start to process the log
boolean processLogSuccess = false;
ProgressStatus processLogStatus = new ProgressStatus(ProgressState.processLog, new BasicProcessLogInfo(source, ctLog, processLogSuccess));
final Object processLogReportObject = progressReporter.reportStart(processLogStatus);
try {
byte[] s3ObjectBytes = s3Manager.downloadLog(ctLog, source);
if (s3ObjectBytes == null) {
downloadLogSuccess = false;
continue; //Failure downloading log file. Skip it.
}
try (GZIPInputStream gzippedInputStream = new GZIPInputStream(new ByteArrayInputStream(s3ObjectBytes));
EventSerializer serializer = getEventSerializer(gzippedInputStream, ctLog)) {
emitEvents(serializer);
//decrement this value upon successfully processed a log
nLogFilesToProcess --;
processLogSuccess = true;
} catch (IllegalArgumentException | IOException e) {
LibraryUtils.handleException(exceptionHandler, processLogStatus, e, "Failed to parse log file.");
}
} finally {
//end to process the log
LibraryUtils.endToProcess(progressReporter, processLogSuccess, processLogStatus, processLogReportObject);
}
}
if (nLogFilesToProcess == 0) {
processSourceSuccess = true;
}
}
} catch (CallbackException ex) {
exceptionHandler.handleException(ex);
} finally {
cleanupMessage(filterSourceOut, downloadLogSuccess, processSourceSuccess, source);
// end to process the source
LibraryUtils.endToProcess(progressReporter, processSourceSuccess, processSourceStatus, processSourceReportObject);
}
}
/**
* Delete SQS message after processing source.
*
* @param progressState {@link ProgressState} either {@link ProgressState#deleteMessage}, or {@link ProgressState#deleteFilteredMessage}
* @param source {@link CloudTrailSource} that contains the SQS message that will be deleted.
*/
private void deleteMessageAfterProcessSource(ProgressState progressState, CloudTrailSource source) {
ProgressStatus deleteMessageStatus = new ProgressStatus(progressState, new BasicProcessSourceInfo(source, false));
sqsManager.deleteMessageFromQueue(((SQSBasedSource)source).getSqsMessage(), deleteMessageStatus);
}
/**
* Clean up the message after CPL finishes the processing.
* <p>
* <li>If the source is filtered out, the message will be deleted with {@link ProgressState#deleteFilteredMessage}.</li>
* <li>If the processing is successful, the message with be deleted with {@link ProgressState#deleteMessage}.</li>
* <li>If the processing failed due to downloading logs, the message will not be deleted regardless of
* {@link ProcessingConfiguration#isDeleteMessageUponFailure()} value. Otherwise, this property controls the
* deletion decision.</li>
* </p>
*/
private void cleanupMessage(boolean filterSourceOut, boolean downloadLogsSuccess, boolean processSourceSuccess, CloudTrailSource source) {
if (filterSourceOut) {
deleteMessageAfterProcessSource(ProgressState.deleteFilteredMessage, source);
} else if (processSourceSuccess || sqsManager.shouldDeleteMessageUponFailure(!downloadLogsSuccess)) {
deleteMessageAfterProcessSource(ProgressState.deleteMessage, source);
}
}
/**
* Gets the EventSerializer based on user's configuration.
*
* @param inputStream the Gzipped content from CloudTrail log file.
* @param ctLog CloudTrail log file.
* @return parser that parses CloudTrail log file.
*/
private EventSerializer getEventSerializer(GZIPInputStream inputStream, CloudTrailLog ctLog) throws IOException {
EventSerializer serializer;
if (config.isEnableRawEventInfo()) {
String logFileContent = new String(LibraryUtils.toByteArray(inputStream), StandardCharsets.UTF_8);
JsonParser jsonParser = mapper.getFactory().createParser(logFileContent);
serializer = new RawLogDeliveryEventSerializer(logFileContent, ctLog, jsonParser);
} else {
JsonParser jsonParser = mapper.getFactory().createParser(inputStream);
serializer = new DefaultEventSerializer(ctLog, jsonParser);
}
return serializer;
}
/**
* Filter, buffer, and emit CloudTrailEvents.
*
* @param serializer {@link EventSerializer} that parses CloudTrail log file.
*
* @throws IOException If the log cannot be read.
* @throws CallbackException If an error occurs when filtering or processing events.
*/
private void emitEvents(EventSerializer serializer) throws IOException, CallbackException {
EventBuffer<CloudTrailEvent> eventBuffer = new EventBuffer<>(config.getMaxEventsPerEmit());
while (serializer.hasNextEvent()) {
CloudTrailEvent event = serializer.getNextEvent();
try {
if (eventFilter.filterEvent(event)) {
eventBuffer.addEvent(event);
if (eventBuffer.isBufferFull()) {
eventsProcessor.process(eventBuffer.getEvents());
}
} else {
logger.debug("AWSCloudTrailEvent " + event + " has been filtered out.");
}
} catch (Exception e) {
logger.error("AWSCloudTrailEvent " + event + " caused the following Exception:", e);
throw e;
}
}
//emit whatever in the buffer as last batch
List<CloudTrailEvent> events = eventBuffer.getEvents();
if (!events.isEmpty()) {
eventsProcessor.process(events);
}
}
}
| 5,857 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/reader/package-info.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
/**
* Classes responsible for processing a stream of events.
*/
package com.amazonaws.services.cloudtrail.processinglibrary.reader;
| 5,858 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/impl/DefaultSourceFilter.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.impl;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.SourceFilter;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailSource;
/**
* Default implementation of {@link SourceFilter} that simply returns <code>true</code> for any {@link CloudTrailSource}.
*/
public class DefaultSourceFilter implements SourceFilter {
/**
* All pass source filter
*/
@Override
public boolean filterSource(CloudTrailSource source) {
return true;
}
}
| 5,859 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/impl/DefaultEventFilter.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.impl;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.EventFilter;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailEvent;
/**
* Default implementation of {@link EventFilter} that simply returns <code>true</code> for any {@link CloudTrailEvent}.
*/
public class DefaultEventFilter implements EventFilter {
@Override
public boolean filterEvent(CloudTrailEvent event) {
return true;
}
}
| 5,860 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/impl/DefaultEventsProcessor.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.impl;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.EventsProcessor;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailEvent;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.util.List;
/**
* Default implementation of {@link EventsProcessor} that simply logs each event.
*/
public class DefaultEventsProcessor implements EventsProcessor {
private static final Log logger = LogFactory.getLog(DefaultExceptionHandler.class);
@Override
public void process(List<CloudTrailEvent> events) {
for (CloudTrailEvent event : events) {
logger.info(event);
}
}
}
| 5,861 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/impl/DefaultExceptionHandler.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.impl;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.cloudtrail.processinglibrary.exceptions.ProcessingLibraryException;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.ExceptionHandler;
/**
* Default implementation of {@link ExceptionHandler} that simply logs exceptions.
*/
public class DefaultExceptionHandler implements ExceptionHandler {
private static final Log logger = LogFactory.getLog(DefaultExceptionHandler.class);
@Override
public void handleException(ProcessingLibraryException exception) {
logger.error(exception.getMessage(), exception);
}
}
| 5,862 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/impl/DefaultProgressReporter.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.impl;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.ProgressReporter;
import com.amazonaws.services.cloudtrail.processinglibrary.progress.ProgressStatus;
/**
* Default implementation of {@link ProgressReporter} that simply logs the {@link ProgressStatus}.
*/
public class DefaultProgressReporter implements ProgressReporter{
private static final Log logger = LogFactory.getLog(DefaultProgressReporter.class);
@Override
public Object reportStart(ProgressStatus status) {
logger.info(status.getProgressState().toString());
return null;
}
@Override
public void reportEnd(ProgressStatus status, Object object) {
logger.info(status.getProgressState().toString());
}
}
| 5,863 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/impl/package-info.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
/**
* Default implementation of call back interfaces.
*/
package com.amazonaws.services.cloudtrail.processinglibrary.impl;
| 5,864 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/configuration/ClientConfiguration.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.configuration;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.services.cloudtrail.processinglibrary.AWSCloudTrailProcessingExecutor;
import com.amazonaws.services.cloudtrail.processinglibrary.manager.SqsManager;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailEventMetadata;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailSource;
import com.amazonaws.services.cloudtrail.processinglibrary.reader.EventReader;
import com.amazonaws.services.cloudtrail.processinglibrary.utils.LibraryUtils;
import java.util.List;
/**
* Defines a basic processing configuration for the AWS CloudTrail Processing Library.
*
* You can use instances of this class to configure an {@link AWSCloudTrailProcessingExecutor}
* as an alternative to using a class path properties file.
*/
public class ClientConfiguration implements ProcessingConfiguration{
private static final String ERROR_CREDENTIALS_PROVIDER_NULL = "CredentialsProvider is null. Either put your " +
"access key and secret key in the configuration file in your class path, or specify it in the " +
"ProcessingConfiguration object.";
/**
* The <a
* href="http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/auth/AWSCredentialsProvider.html">AWS credentials provider</a>
* used to obtain credentials.
*/
public AWSCredentialsProvider awsCredentialsProvider;
/**
* The SQS Queue URL used to receive events.
* <p>
* The Queue must be subscribed to AWS CloudTrail.
*/
public String sqsUrl = null;
/**
* The SQS region to use.
* <p>
* If not specified, the {@value ProcessingConfiguration#DEFAULT_SQS_REGION} will be used.
* </p>
*/
public String sqsRegion = DEFAULT_SQS_REGION;
/**
* A period of time, in seconds, during which Amazon SQS prevents other consuming components from receiving and
* processing messages that are currently being processed by the CloudTrail Processing Library on your behalf.
*/
public int visibilityTimeout = DEFAULT_VISIBILITY_TIMEOUT;
/**
* The S3 endpoint specific to a region.
* <p>
* If not specified, the {@value ProcessingConfiguration#DEFAULT_S3_REGION } will be used.
* </p>
*/
public String s3Region = DEFAULT_S3_REGION;
/**
* The number of threads used to download log files from S3 in parallel.
* <p>
* Callbacks can be invoked from any thread.
* </p>
*/
public int threadCount = DEFAULT_THREAD_COUNT;
/**
* The number of SQS reader threads
*/
public int numOfParallelReaders = DEFAULT_NUM_OF_PARALLEL_READERS;
/**
* The time allowed, in seconds, for threads to shut down after {@link AWSCloudTrailProcessingExecutor#stop()} is
* called.
* <p>
* Any threads still running beyond this time will be forcibly terminated.
* </p>
*/
public int threadTerminationDelaySeconds = DEFAULT_THREAD_TERMINATION_DELAY_SECONDS;
/**
* The maximum number of AWSCloudTrailClientEvents sent to a single invocation of processEvents().
*/
public int maxEventsPerEmit = DEFAULT_MAX_EVENTS_PER_EMIT;
/**
* Whether to include raw event information in {@link CloudTrailEventMetadata}.
*/
public boolean enableRawEventInfo = DEFAULT_ENABLE_RAW_EVENT_INFO;
/**
* whether or not to delete SQS messages when there is any failure during {@link SqsManager#parseMessage(List)} and
* {@link EventReader#processSource(CloudTrailSource)}.
*/
public boolean deleteMessageUponFailure = DEFAULT_DELETE_MESSAGE_UPON_FAILURE;
/**
* Initializes a new <code>ClientConfiguration</code>.
* <p>
* Both parameters are required.
* </p>
* @see <a href="http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/ImportantIdentifiers.html">Queue and Message Identifiers</a>
* @see <a href="http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/auth/AWSCredentialsProvider.html">AWSCredentialsProvider</a>
*
* @param sqsUrl the SQS URL to use to get CloudTrail events.
* @param awsCredentialsProvider The AWS Credentials provider to use to
* obtain AWS access credentials.
*/
public ClientConfiguration(String sqsUrl, AWSCredentialsProvider awsCredentialsProvider) {
this.sqsUrl = sqsUrl;
this.awsCredentialsProvider = awsCredentialsProvider;
}
/**
* {@inheritDoc}
*/
@Override
public AWSCredentialsProvider getAwsCredentialsProvider() {
return awsCredentialsProvider;
}
/**
* {@inheritDoc}
*/
@Override
public String getSqsUrl() {
return sqsUrl;
}
/**
* {@inheritDoc}
*/
@Override
public String getSqsRegion() {
return sqsRegion;
}
/**
* {@inheritDoc}
*/
@Override
public int getVisibilityTimeout() {
return visibilityTimeout;
}
/**
* {@inheritDoc}
*/
@Override
public String getS3Region() {
return s3Region;
}
/**
* {@inheritDoc}
*/
@Override
public int getThreadCount() {
return threadCount;
}
/**
* {@inheritDoc}
*/
@Override
public int getNumOfParallelReaders(){
return numOfParallelReaders;
}
/**
* {@inheritDoc}
*/
@Override
public int getThreadTerminationDelaySeconds() {
return threadTerminationDelaySeconds;
}
/**
* {@inheritDoc}
*/
@Override
public int getMaxEventsPerEmit() {
return maxEventsPerEmit;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isEnableRawEventInfo() {
return enableRawEventInfo;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isDeleteMessageUponFailure() {
return deleteMessageUponFailure;
}
/**
* {@inheritDoc}
*/
@Override
public void validate() {
LibraryUtils.checkArgumentNotNull(getAwsCredentialsProvider(), ERROR_CREDENTIALS_PROVIDER_NULL);
LibraryUtils.checkArgumentNotNull(getSqsUrl(), "SQS URL is null.");
LibraryUtils.checkArgumentNotNull(getSqsRegion(), "SQS Region is null.");
LibraryUtils.checkArgumentNotNull(getS3Region(), "S3 Region is null.");
LibraryUtils.checkCondition(getMaxEventsPerEmit() <= 0, "Maximum Events Per Emit is a non-positive integer.");
LibraryUtils.checkCondition(getThreadCount() <= 0, "Thread Count is a non-positive integer.");
LibraryUtils.checkCondition(getThreadTerminationDelaySeconds() <= 0, "Thread Termination Delay Seconds is a non-positive integer.");
LibraryUtils.checkCondition(getVisibilityTimeout() <= 0, "Visibility Timeout is a non-positive integer.");
}
/**
* Set the AWS Credentials Provider used to access AWS.
*
* @param awsCredentialsProvider the
* <a href="http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/auth/AWSCredentialsProvider.html">AWSCredentialsProvider</a>
* to set.
*/
public void setAwsCredentialsProvider(AWSCredentialsProvider awsCredentialsProvider) {
this.awsCredentialsProvider = awsCredentialsProvider;
}
/**
* Sets the SQS Region to use to get CloudTrail logs.
*
* @param sqsRegion the
* <a href="http://docs.aws.amazon.com/general/latest/gr/rande.html#d0e387">AWS region</a>
* to use.
*/
public void setSqsRegion(String sqsRegion) {
this.sqsRegion = sqsRegion;
}
/**
* Sets the SQS visibility timeout, during which SQS ignores other requests
* for the message.
*
* @param visibilityTimeout the duration, in seconds, to ignore other
* requests for SQS messages being processed by the AWS CloudTrail
* Processing Library.
*/
public void setVisibilityTimeout(int visibilityTimeout) {
this.visibilityTimeout = visibilityTimeout;
}
/**
* The S3 endpoint specific to a region.
* <p>
* If not specified, the default S3 region will be used.
* </p>
* @param s3Region the s3Region to set
*/
public void setS3Region(String s3Region) {
this.s3Region = s3Region;
}
/**
* The number of threads used to download log files from S3 in parallel.
* <p>
* Callbacks can be invoked from any thread.
* </p>
* @param threadCount the number of threads to set.
*/
public void setThreadCount(int threadCount) {
this.threadCount = threadCount;
}
/**
* The number of reader threads that pull messages from the SQS
* @param numOfParallelReaders
*/
public void setNumOfParallelReaders(int numOfParallelReaders){
this.numOfParallelReaders = numOfParallelReaders;
}
/**
* Set the time allowed, in seconds, for threads to shut down after
* {@link AWSCloudTrailProcessingExecutor#stop()} is called.
* <p>
* Any threads still running beyond this time will be forcibly terminated.
* </p>
* @param threadTerminationDelaySeconds the termination delay, in seconds, to set.
*/
public void setThreadTerminationDelaySeconds(int threadTerminationDelaySeconds) {
this.threadTerminationDelaySeconds = threadTerminationDelaySeconds;
}
/**
* Set the maximum number of events that can be buffered per call to <code>processEvents()</code>.
* <p>
* Fewer events than this may be sent; this number represents only the <i>maximum</i>.
* </p>
* @param maxEventsPerEmit the maximum number of events to buffer.
*/
public void setMaxEventsPerEmit(int maxEventsPerEmit) {
this.maxEventsPerEmit = maxEventsPerEmit;
}
/**
* Set whether or not raw event information should be returned in {@link CloudTrailEventMetadata}.
*
* @param enableRawEventInfo set to <code>true</code> to enable raw event information.
*/
public void setEnableRawEventInfo(boolean enableRawEventInfo) {
this.enableRawEventInfo = enableRawEventInfo;
}
/**
* Set whether or not to delete SQS messages when there is any failure during {@link SqsManager#parseMessage(List)}
* and {@link EventReader#processSource(CloudTrailSource)}.
* The SQS message will be deleted upon success regardless the setting of <code>deleteMessageUponFailure</code>.
*
* @param deleteMessageUponFailure set to <code>true</code> to delete messages upon failure.
*/
public void setDeleteMessageUponFailure(boolean deleteMessageUponFailure) {
this.deleteMessageUponFailure = deleteMessageUponFailure;
}
}
| 5,865 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/configuration/ProcessingConfiguration.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.configuration;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.services.cloudtrail.processinglibrary.manager.SqsManager;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailEventMetadata;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailSource;
import com.amazonaws.services.cloudtrail.processinglibrary.reader.EventReader;
import java.util.List;
/**
* Data used to configure a {@link EventReader}.
* <p>
* You can use a system properties file to load the configuration or create a {@link ClientConfiguration} object and set
* each attribute. If you do not provide a value for an attribute, a default value will be provided.
* </p>
*/
public interface ProcessingConfiguration {
/* default configuration values */
/**
* The default SQS region; {@value}.
*/
public static final String DEFAULT_SQS_REGION = "us-east-1";
/**
* The default S3 region; {@value}.
*/
public static final String DEFAULT_S3_REGION = "us-east-1";
/**
* The default SQS visibility timeout, in seconds; {@value}.
*/
public static final int DEFAULT_VISIBILITY_TIMEOUT = 60;
/**
* The default S3 thread count; {@value}.
*/
public static final int DEFAULT_THREAD_COUNT = 1;
/**
* The default SQS reader thread count
*/
public static final int DEFAULT_NUM_OF_PARALLEL_READERS = 1;
/**
* The default thread termination delay, in seconds; {@value}.
*/
public static final int DEFAULT_THREAD_TERMINATION_DELAY_SECONDS = 60;
/**
* The default number of events accumulated before emitting; {@value}.
*/
public static final int DEFAULT_MAX_EVENTS_PER_EMIT = 1;
/**
* Whether to enable raw event information in event metadata; {@value}.
*/
public static final boolean DEFAULT_ENABLE_RAW_EVENT_INFO = false;
/**
* Whether to delete SQS messages if there is failure
* during {@link SqsManager#parseMessage(List)} and {@link EventReader#processSource(CloudTrailSource)}; {@value}.
*/
public static final boolean DEFAULT_DELETE_MESSAGE_UPON_FAILURE = false;
/**
* Get the AWS Credentials provider used to access AWS.
*
* @return an
* <a href="http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/auth/AWSCredentialsProvider.html">AWSCredentialsProvider</a>
* object.
*/
public AWSCredentialsProvider getAwsCredentialsProvider();
/**
* Gets the SQS URL used to obtain CloudTrail logs.
*
* @return the configured SQS URL used to get CloudTrail logs.
*/
public String getSqsUrl();
/**
* Gets the SQS Region from which CloudTrail logs are obtained.
*
* @return the SQS Region
*/
public String getSqsRegion();
/**
* Get the visibility timeout value for the SQS queue.
* <p>
* The period of time during which Amazon SQS prevents other consuming
* components from receiving and processing a message.
*
* @return the visibility timeout value.
*/
public int getVisibilityTimeout();
/**
* Get the AWS S3 Region.
*
* @return the Amazon S3 region used.
*/
public String getS3Region();
/**
* Get the number of threads used to download S3 files in parallel.
*
* @return the number of threads.
*/
public int getThreadCount();
/**
* Get a number of reader threads
* @return
*/
public int getNumOfParallelReaders();
/**
* Get the thread termination delay value.
*
* @return the thread termination delay, in seconds.
*/
public int getThreadTerminationDelaySeconds();
/**
* Get the maximum number of AWSCloudTrailClientEvents sent to a single invocation of processEvents().
*
* @return the maximum number of events that will be buffered per call to processEvents.
*/
public int getMaxEventsPerEmit();
/**
* Indicates whether raw event information is returned in
* {@link CloudTrailEventMetadata}.
*
* @return <code>true</code> if raw event information is enabled; <code>false</code> otherwise.
*/
public boolean isEnableRawEventInfo();
/**
* Indicates whether to delete SQS messages when there is a failure during {@link SqsManager#parseMessage(List)}
* and {@link EventReader#processSource(CloudTrailSource)}.
* The SQS message will be deleted upon success regardless of the setting for deleteMessageUponFailure.
*
* @return <code>true</code> if delete SQS message upon failure is enabled. Otherwise, <code>false</code>.
*/
public boolean isDeleteMessageUponFailure();
/**
* Validate that all necessary parameters are set in the provided configuration.
* <p>
* This method throws an exception if any of the required parameters are <code>null</code>.
* </p>
* @throws IllegalStateException if any parameters are <code>null</code>.
*/
public void validate();
}
| 5,866 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/configuration/PropertiesFileConfiguration.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.configuration;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.ClasspathPropertiesFileCredentialsProvider;
import com.amazonaws.services.cloudtrail.processinglibrary.AWSCloudTrailProcessingExecutor;
import com.amazonaws.services.cloudtrail.processinglibrary.manager.SqsManager;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailEventMetadata;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailSource;
import com.amazonaws.services.cloudtrail.processinglibrary.reader.EventReader;
import com.amazonaws.services.cloudtrail.processinglibrary.utils.LibraryUtils;
import java.io.IOException;
import java.io.InputStream;
import java.util.List;
import java.util.Properties;
/**
* A class used to obtain AWS CloudTrail Processing Library configuration
* information from a classpath properties file.
* <p>
* In addition to this class, you can use {@link ClientConfiguration} to manually set configuration options.
* </p>
*/
public class PropertiesFileConfiguration implements ProcessingConfiguration{
/* configuration file property names */
public static final String ACCESS_KEY = "accessKey";
public static final String SECRET_KEY = "secretKey";
public static final String SQS_URL = "sqsUrl";
public static final String SQS_REGION = "sqsRegion";
public static final String VISIBILITY_TIMEOUT = "visibilityTimeout";
public static final String S3_REGION = "s3Region";
public static final String THREAD_COUNT = "threadCount";
public static final String NUM_OF_PARALLEL_READERS = "numOfParallelReaders";
public static final String THREAD_TERMINATION_DELAY_SECONDS = "threadTerminationDelaySeconds";
public static final String MAX_EVENTS_PER_EMIT = "maxEventsPerEmit";
public static final String ENABLE_RAW_EVENT_INFO = "enableRawEventInfo";
public static final String DELETE_MESSAGE_UPON_FAILURE = "deleteMessageUponFailure";
private static final String ERROR_CREDENTIALS_PROVIDER_NULL = "CredentialsProvider is null. Either put your " +
"access key and secret key in the configuration file in your class path, or spcify it in the " +
"ProcessingConfiguration object.";
/**
* The
* <a href="http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/auth/AWSCredentialsProvider.html">AWS credentials provider</a>
* used to obtain credentials.
*/
private AWSCredentialsProvider awsCredentialsProvider;
/**
* The SQS Queue URL used to receive events.
* <p>
* The Queue must be subscribed to AWS CloudTrail.
*/
private String sqsUrl = null;
/**
* The SQS region to use.
* <p>
* If not specified, the default SQS region ({@value ProcessingConfiguration#DEFAULT_SQS_REGION}) will be used.
* </p>
*/
private String sqsRegion = DEFAULT_SQS_REGION;
/**
* A period of time, in seconds, during which Amazon SQS prevents other consuming components from receiving and
* processing messages that are currently being processed by the CloudTrail Processing Library on your behalf.
*/
private int visibilityTimeout = DEFAULT_VISIBILITY_TIMEOUT;
/**
* The S3 endpoint specific to a region.
* <p>
* If not specified, the default S3 region will be used.
* </p>
*/
private String s3Region = DEFAULT_S3_REGION;
/**
* The number of threads used to download log files from S3 in parallel.
* <p>
* Callbacks can be invoked from any thread.
* </p>
*/
private int threadCount = DEFAULT_THREAD_COUNT;
/**
* The number of threads used to get SQS messages
*/
private int numOfParallelReaders = DEFAULT_NUM_OF_PARALLEL_READERS;
/**
* The time allowed, in seconds, for threads to shut down after {@link AWSCloudTrailProcessingExecutor#stop()} is
* called.
* <p>
* Any threads still running beyond this time will be forcibly terminated.
* </p>
*/
private int threadTerminationDelaySeconds = DEFAULT_THREAD_TERMINATION_DELAY_SECONDS;
/**
* The maximum number of AWSCloudTrailClientEvents sent to a single invocation of processEvents().
*/
private int maxEventsPerEmit = DEFAULT_MAX_EVENTS_PER_EMIT;
/**
* Whether to include raw event information in {@link CloudTrailEventMetadata}.
*/
private boolean enableRawEventInfo = DEFAULT_ENABLE_RAW_EVENT_INFO;
/**
* Whether to delete messages if there is any failure during {@link SqsManager#parseMessage(List)} and
* {@link EventReader#processSource(CloudTrailSource)}.
*/
private boolean deleteMessageUponFailure = DEFAULT_DELETE_MESSAGE_UPON_FAILURE;
/**
* Creates a {@link PropertiesFileConfiguration} from values provided in a classpath properties file.
*
* @param propertiesFile the classpath properties file to load.
*/
public PropertiesFileConfiguration(String propertiesFile) {
//load properties from configuration properties file
Properties prop = loadProperty(propertiesFile);
sqsUrl = prop.getProperty(SQS_URL);
LibraryUtils.checkArgumentNotNull(sqsUrl, "Cannot find SQS URL in properties file.");
String accessKey = prop.getProperty(ACCESS_KEY);
String secretKey = prop.getProperty(SECRET_KEY);
if (accessKey != null && secretKey != null) {
awsCredentialsProvider = new ClasspathPropertiesFileCredentialsProvider(propertiesFile);
}
s3Region = prop.getProperty(S3_REGION);
visibilityTimeout = getIntProperty(prop, VISIBILITY_TIMEOUT);
sqsRegion = prop.getProperty(SQS_REGION);
threadCount = getIntProperty(prop, THREAD_COUNT);
numOfParallelReaders = getIntProperty(prop, NUM_OF_PARALLEL_READERS);
threadTerminationDelaySeconds = getIntProperty(prop, THREAD_TERMINATION_DELAY_SECONDS);
maxEventsPerEmit = getIntProperty(prop, MAX_EVENTS_PER_EMIT);
enableRawEventInfo = getBooleanProperty(prop, ENABLE_RAW_EVENT_INFO);
deleteMessageUponFailure = getBooleanProperty(prop, DELETE_MESSAGE_UPON_FAILURE);
}
/**
* {@inheritDoc}
*/
@Override
public AWSCredentialsProvider getAwsCredentialsProvider() {
return awsCredentialsProvider;
}
/**
* {@inheritDoc}
*/
@Override
public String getSqsUrl() {
return sqsUrl;
}
/**
* {@inheritDoc}
*/
@Override
public String getSqsRegion() {
return sqsRegion;
}
/**
* {@inheritDoc}
*/
@Override
public int getVisibilityTimeout() {
return visibilityTimeout;
}
/**
* {@inheritDoc}
*/
@Override
public String getS3Region() {
return s3Region;
}
/**
* {@inheritDoc}
*/
@Override
public int getThreadCount() {
return threadCount;
}
/**
* {@inheritDoc}
*/
public int getNumOfParallelReaders(){
return numOfParallelReaders;
}
/**
* {@inheritDoc}
*/
@Override
public int getThreadTerminationDelaySeconds() {
return threadTerminationDelaySeconds;
}
/**
* {@inheritDoc}
*/
@Override
public int getMaxEventsPerEmit() {
return maxEventsPerEmit;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isEnableRawEventInfo() {
return enableRawEventInfo;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isDeleteMessageUponFailure() {
return deleteMessageUponFailure;
}
/**
* {@inheritDoc}
*/
@Override
public void validate() {
LibraryUtils.checkArgumentNotNull(getAwsCredentialsProvider(), ERROR_CREDENTIALS_PROVIDER_NULL);
LibraryUtils.checkArgumentNotNull(getSqsUrl(), "SQS URL is null.");
LibraryUtils.checkArgumentNotNull(getSqsRegion(), "SQS Region is null.");
LibraryUtils.checkArgumentNotNull(getS3Region(), "S3 Region is null.");
LibraryUtils.checkCondition(getMaxEventsPerEmit() <= 0, "Maximum Events Per Emit is a non-positive integer.");
LibraryUtils.checkCondition(getThreadCount() <= 0, "Num of Parallel Readers Count is a non-positive integer.");
LibraryUtils.checkCondition(getNumOfParallelReaders() <= 0, "Thread Count is a non-positive integer.");
LibraryUtils.checkCondition(getThreadTerminationDelaySeconds() <= 0, "Thread Termination Delay Seconds is a non-positive integer.");
LibraryUtils.checkCondition(getVisibilityTimeout() <= 0, "Visibility Timeout is a non-positive integer.");
}
/**
* Load properties from a classpath property file.
*
* @param propertiesFile the classpath properties file to read.
* @return a <a href="http://docs.oracle.com/javase/7/docs/api/java/util/Properties.html">Properties</a> object
* containing the properties set in the file.
*/
private Properties loadProperty(String propertiesFile) {
Properties prop = new Properties();
try {
InputStream in = getClass().getResourceAsStream(propertiesFile);
prop.load(in);
in.close();
} catch (IOException e) {
throw new IllegalStateException("Cannot load property file at " + propertiesFile, e);
}
return prop;
}
/**
* Convert a string representation of a property to an integer type.
*
* @param prop the {@link Properties} needs conversion.
* @param name a name to evaluate in the property file.
* @return an integer representation of the value associated with the property name.
*/
private int getIntProperty(Properties prop, String name) {
String propertyValue = prop.getProperty(name);
return Integer.parseInt(propertyValue);
}
/**
* Convert a string representation of a property to a boolean type.
*
* @param prop the {@link Properties} needs conversion.
* @param name a name to evaluate in the property file.
* @return a boolean representation of the value associated with the property name.
*/
private Boolean getBooleanProperty(Properties prop, String name) {
String propertyValue = prop.getProperty(name);
return Boolean.parseBoolean(propertyValue);
}
}
| 5,867 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/configuration/package-info.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
/**
* Classes that are used to configure an {@link
* com.amazonaws.services.cloudtrail.processinglibrary.AWSCloudTrailProcessingExecutor}.
*/
package com.amazonaws.services.cloudtrail.processinglibrary.configuration;
| 5,868 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/serializer/EventSerializer.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.serializer;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailEvent;
import java.io.IOException;
/**
* AWSCloudTrailSerializer is an interface provides a set of methods to serialize AWS CloudTrail log files in a
* streaming fashion.
*/
public interface EventSerializer extends AutoCloseable {
/**
* Indicates if there are more events in the current log to serialize.
*
* @return <code>true</code> if there are more events to serialize; <code>false</code> otherwise.
* @throws IOException if the log could not be read.
*/
public boolean hasNextEvent() throws IOException;
/**
* Get the next event in the log, this one should be called after verifying that there are more events by using
* <code>hasNextEvent()</code>.
*
* @return the {@link CloudTrailEvent}.
* @throws IOException if the log could not be read.
*/
public CloudTrailEvent getNextEvent() throws IOException;
/**
* Close the underlying input stream
*
* @throws IOException if the input stream could not be accessed or closed.
*/
public void close() throws IOException;
}
| 5,869 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/serializer/CloudTrailSourceSerializer.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.serializer;
import com.amazonaws.services.cloudtrail.processinglibrary.factory.SourceSerializerFactory;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailLog;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailSource;
import com.amazonaws.services.cloudtrail.processinglibrary.model.SQSBasedSource;
import com.amazonaws.services.cloudtrail.processinglibrary.model.SourceAttributeKeys;
import com.amazonaws.services.cloudtrail.processinglibrary.model.internal.SourceType;
import com.amazonaws.services.cloudtrail.processinglibrary.utils.LibraryUtils;
import com.amazonaws.services.cloudtrail.processinglibrary.utils.SNSMessageBodyExtractor;
import com.amazonaws.services.cloudtrail.processinglibrary.utils.SourceIdentifier;
import com.amazonaws.services.sqs.model.Message;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
/**
* The <code>CloudTrailSourceSerializer</code> extracts CloudTrail log file information from notifications that CloudTrail
* sends to an SNS topic. Use {@link SourceSerializerFactory#createCloudTrailSourceSerializer()} for default initialization.
*/
public class CloudTrailSourceSerializer implements SourceSerializer {
private static final String S3_BUCKET_NAME = "s3Bucket";
private static final String S3_OBJECT_KEY = "s3ObjectKey";
private SNSMessageBodyExtractor messageExtractor;
private ObjectMapper mapper;
private SourceIdentifier sourceIdentifier;
public CloudTrailSourceSerializer(SNSMessageBodyExtractor messageExtractor, ObjectMapper mapper, SourceIdentifier sourceIdentifier) {
this.messageExtractor = messageExtractor;
this.mapper = mapper;
this.sourceIdentifier = sourceIdentifier;
}
@Override
public CloudTrailSource getSource(Message sqsMessage) throws IOException {
List<CloudTrailLog> cloudTrailLogs = new ArrayList<>();
JsonNode messageNode = messageExtractor.getMessageBody(sqsMessage);
addCloudTrailLogsAndMessageAttributes(sqsMessage, cloudTrailLogs, messageNode);
addRestMessageAttributes(sqsMessage, messageNode);
return new SQSBasedSource(sqsMessage, cloudTrailLogs);
}
/**
* As long as there is at least one CloudTrail log object:
* <p>
* <li>Add the CloudTrail log object key to the list.</li>
* <li>Add <code>accountId</code> extracted from log object key to <code>sqsMessage</code>.</li>
* <li>Add {@link SourceType#CloudTrailLog} to the <code>sqsMessage</code>.</li>
* </p>
*
* If there is no CloudTrail log object and it is a valid CloudTrail message, CPL adds only {@link SourceType#Other}
* to the <code>sqsMessage</code>.
*
*/
private void addCloudTrailLogsAndMessageAttributes(Message sqsMessage, List<CloudTrailLog> cloudTrailLogs, JsonNode messageNode) throws IOException {
SourceType sourceType = SourceType.Other;
String bucketName = messageNode.get(S3_BUCKET_NAME).textValue();
List<String> objectKeys = mapper.readValue(messageNode.get(S3_OBJECT_KEY).traverse(), new TypeReference<List<String>>() {});
for (String objectKey: objectKeys) {
SourceType currSourceType = sourceIdentifier.identify(objectKey);
if (currSourceType == SourceType.CloudTrailLog) {
cloudTrailLogs.add(new CloudTrailLog(bucketName, objectKey));
sourceType = currSourceType;
LibraryUtils.setMessageAccountId(sqsMessage, objectKey);
}
}
sqsMessage.addAttributesEntry(SourceAttributeKeys.SOURCE_TYPE.getAttributeKey(), sourceType.name());
}
/**
* Excluding S3_BUCKET, S3_OBJECT_KEY, add all other attributes from the message body to <code>sqsMessage</code>.
* @param sqsMessage The SQS message.
* @param messageNode The message body.
*/
private void addRestMessageAttributes(Message sqsMessage, JsonNode messageNode) {
Iterator<String> it = messageNode.fieldNames();
while(it.hasNext()) {
String key = it.next();
if (!key.equals(S3_OBJECT_KEY) && !key.equals(S3_BUCKET_NAME)) {
sqsMessage.addAttributesEntry(key, messageNode.get(key).textValue());
}
}
}
}
| 5,870 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/serializer/S3SourceSerializer.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.serializer;
import com.amazonaws.services.cloudtrail.processinglibrary.factory.SourceSerializerFactory;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailLog;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailSource;
import com.amazonaws.services.cloudtrail.processinglibrary.model.SQSBasedSource;
import com.amazonaws.services.cloudtrail.processinglibrary.model.SourceAttributeKeys;
import com.amazonaws.services.cloudtrail.processinglibrary.model.internal.SourceType;
import com.amazonaws.services.cloudtrail.processinglibrary.utils.LibraryUtils;
import com.amazonaws.services.cloudtrail.processinglibrary.utils.SourceIdentifier;
import com.amazonaws.services.sqs.model.Message;
import com.fasterxml.jackson.core.JsonPointer;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/**
* The <code>S3SourceSerializer</code> extracts CloudTrail log file information from notifications sent directly
* by Amazon S3. Use {@link SourceSerializerFactory#createS3SourceSerializer()} for initialization.
*/
public class S3SourceSerializer implements SourceSerializer {
private static final JsonPointer S3_BUCKET_NAME = JsonPointer.compile("/s3/bucket/name");
private static final JsonPointer S3_OBJECT_KEY = JsonPointer.compile("/s3/object/key");
private static final String RECORDS = "Records";
private static final String EVENT_NAME = "eventName";
private ObjectMapper mapper;
private SourceIdentifier sourceIdentifier;
public S3SourceSerializer(ObjectMapper mapper, SourceIdentifier sourceIdentifier) {
this.mapper = mapper;
this.sourceIdentifier = sourceIdentifier;
}
@Override
public CloudTrailSource getSource(Message sqsMessage) throws IOException{
JsonNode s3MessageNode = mapper.readTree(sqsMessage.getBody());
return getCloudTrailSource(sqsMessage, s3MessageNode);
}
public CloudTrailSource getCloudTrailSource(Message sqsMessage, JsonNode s3MessageNode) throws IOException {
JsonNode s3RecordsNode = s3MessageNode.get(RECORDS);
List<CloudTrailLog> cloudTrailLogs = new ArrayList<>();
addCloudTrailLogsAndMessageAttributes(sqsMessage, s3RecordsNode, cloudTrailLogs);
return new SQSBasedSource(sqsMessage, cloudTrailLogs);
}
/**
* As long as there is at least one CloudTrail log object:
* <p>
* <li>Add the CloudTrail log object key to the list.</li>
* <li>Add <code>accountId</code> extracted from log object key to <code>sqsMessage</code>.</li>
* <li>Add {@link SourceType#CloudTrailLog} to the <code>sqsMessage</code>.</li>
* </p>
*
* If there is no CloudTrail log object and it is a valid S3 message, CPL adds only {@link SourceType#Other}
* to the <code>sqsMessage</code>.
*
*/
private void addCloudTrailLogsAndMessageAttributes(Message sqsMessage, JsonNode s3RecordsNode, List<CloudTrailLog> cloudTrailLogs) {
SourceType sourceType = SourceType.Other;
for (JsonNode s3Record: s3RecordsNode) {
String bucketName = s3Record.at(S3_BUCKET_NAME).textValue();
String objectKey = s3Record.at(S3_OBJECT_KEY).textValue();
String eventName = s3Record.get(EVENT_NAME).textValue();
SourceType currSourceType = sourceIdentifier.identifyWithEventName(objectKey, eventName);
if (currSourceType == SourceType.CloudTrailLog) {
cloudTrailLogs.add(new CloudTrailLog(bucketName, objectKey));
sourceType = currSourceType;
LibraryUtils.setMessageAccountId(sqsMessage, objectKey);
}
}
sqsMessage.addAttributesEntry(SourceAttributeKeys.SOURCE_TYPE.getAttributeKey(), sourceType.name());
}
}
| 5,871 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/serializer/DefaultEventSerializer.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.serializer;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailEventMetadata;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailLog;
import com.amazonaws.services.cloudtrail.processinglibrary.model.LogDeliveryInfo;
import com.fasterxml.jackson.core.JsonParser;
import java.io.IOException;
/**
* Default implementation of event serializer.
*/
public class DefaultEventSerializer extends AbstractEventSerializer{
private CloudTrailLog ctLog;
/***
* @param ctLog The CloudTrail Log needs to be read.
* @param jsonParser The JSON parser to serialize events.
* @throws IOException
*/
public DefaultEventSerializer(CloudTrailLog ctLog, JsonParser jsonParser) throws IOException {
super(jsonParser);
this.ctLog = ctLog;
readArrayHeader();
}
@Override
public CloudTrailEventMetadata getMetadata(int charStart, int charEnd) {
return new LogDeliveryInfo(ctLog, -1, -1, null);
}
}
| 5,872 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/serializer/SourceSerializerChain.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.serializer;
import com.amazonaws.services.cloudtrail.processinglibrary.factory.SourceSerializerFactory;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailSource;
import com.amazonaws.services.sqs.model.Message;
import com.amazonaws.util.CollectionUtils;
import com.fasterxml.jackson.core.JsonProcessingException;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
/**
* {@link SourceSerializer} implementation that chains together multiple source serializers. When a caller passes {@link Message}
* to this serializer, it calls all the serializers in the chain, in the original order specified, until one can parse
* and return {@link CloudTrailSource}. If all source serializers in the chain are called, and they cannot successfully
* parse the message, then this class throws an {@link IOException} that indicates that no sources are available.
* <p>
* This class remembers the first source serializer in the chain that can successfully parse messages, and will
* continue to use that serializer when there are future messages.
* </p>
*/
public class SourceSerializerChain implements SourceSerializer {
private final List<SourceSerializer> sourceSerializers;
private SourceSerializer lastUsedSourceSerializer;
/**
* Constructs a new <code>SourceSerializerChain</code> with the specified source serializers.
* <p>
* Use {@link SourceSerializerFactory#createSourceSerializerChain()} for default construction.
* </p>
* <p>
* When source are required from this serializer, it will call each of these source serializers in the same order
* specified here until one of them return {@link CloudTrailSource}.
* </p>
*
* @param sourceSerializers A list of at least one {@link SourceSerializer} implementation instance.
*/
public SourceSerializerChain(List<? extends SourceSerializer> sourceSerializers) {
if (CollectionUtils.isNullOrEmpty(sourceSerializers) || sourceSerializers.contains(null)) {
throw new IllegalArgumentException("No source serializer specified or contains null serializers.");
}
this.sourceSerializers = new LinkedList<>(sourceSerializers);
}
/**
* {@inheritDoc}
*/
@Override
public CloudTrailSource getSource(Message sqsMessage) throws IOException {
ExceptionChain exceptionChain = new ExceptionChain();
if (lastUsedSourceSerializer != null) {
CloudTrailSource source = getCloudTrailSource(sqsMessage, lastUsedSourceSerializer, exceptionChain);
if (source != null) {
return source;
}
}
for (SourceSerializer serializer: sourceSerializers) {
// already tried lastUsedSourceSerializer, so skip it and move on
if (serializer == lastUsedSourceSerializer) {
continue;
}
CloudTrailSource source = getCloudTrailSource(sqsMessage, serializer, exceptionChain);
if (source != null) {
lastUsedSourceSerializer = serializer;
return source;
}
}
throw exceptionChain.throwOut();
}
private CloudTrailSource getCloudTrailSource(Message sqsMessage, SourceSerializer serializer, ExceptionChain exceptionChain) throws IOException {
try {
return serializer.getSource(sqsMessage);
} catch (Exception e) {
exceptionChain.addSuppressedException(e);
}
return null;
}
/**
* The exception will provide information why every chained serializer failed to parse the message.
*/
private class ExceptionChain {
IOException exception;
ExceptionChain() {
exception = new IOException("Unable to parse the message from any source serializers in the chain.");
}
void addSuppressedException(Exception e) {
exception.addSuppressed(e);
}
IOException throwOut() {
return exception;
}
}
}
| 5,873 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/serializer/CloudTrailValidationMessageSerializer.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.serializer;
import com.amazonaws.services.cloudtrail.processinglibrary.factory.SourceSerializerFactory;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailSource;
import com.amazonaws.services.cloudtrail.processinglibrary.model.SQSBasedSource;
import com.amazonaws.services.cloudtrail.processinglibrary.model.SourceAttributeKeys;
import com.amazonaws.services.cloudtrail.processinglibrary.model.internal.SourceType;
import com.amazonaws.services.cloudtrail.processinglibrary.utils.SNSMessageBodyExtractor;
import com.amazonaws.services.sqs.model.Message;
import java.io.IOException;
/**
* The <code>CloudTrailValidationMessageSerializer</code> extracts CloudTrail validation message from notifications that CloudTrail
* sends to an SNS topic. Use {@link SourceSerializerFactory#createCloudTrailValidationMessageSerializer()} for default initialization.
*/
public class CloudTrailValidationMessageSerializer implements SourceSerializer {
private static final String CLOUD_TRAIL_VALIDATION_MESSAGE = "CloudTrail validation message.";
private SNSMessageBodyExtractor messageExtractor;
public CloudTrailValidationMessageSerializer(SNSMessageBodyExtractor messageExtractor) {
this.messageExtractor = messageExtractor;
}
@Override
public CloudTrailSource getSource(Message sqsMessage) throws IOException {
if (messageExtractor.getMessageText(sqsMessage).equals(CLOUD_TRAIL_VALIDATION_MESSAGE)) {
sqsMessage.addAttributesEntry(SourceAttributeKeys.SOURCE_TYPE.getAttributeKey(), SourceType.CloudTrailValidationMessage.name());
return new SQSBasedSource(sqsMessage, null);
}
return null;
}
}
| 5,874 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/serializer/RawLogDeliveryEventSerializer.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.serializer;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailEventMetadata;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailLog;
import com.amazonaws.services.cloudtrail.processinglibrary.model.LogDeliveryInfo;
import com.fasterxml.jackson.core.JsonParser;
import java.io.IOException;
/**
* The implementation of raw CloudTrail log's event serializer.
*/
public class RawLogDeliveryEventSerializer extends AbstractEventSerializer{
private String logFile;
private CloudTrailLog ctLog;
public RawLogDeliveryEventSerializer(String logFile, CloudTrailLog ctLog, JsonParser jsonParser) throws IOException {
super(jsonParser);
this.ctLog = ctLog;
this.logFile = logFile;
readArrayHeader();
}
/**
* Find the raw event in string format from logFileContent based on character start index and end index.
*/
@Override
public CloudTrailEventMetadata getMetadata(int charStart, int charEnd) {
// Use Jackson getTokenLocation API only return the , (Comma) position, we need to advance to first open curly brace.
String rawEvent = logFile.substring(charStart, charEnd+1);
int offset = rawEvent.indexOf("{");
rawEvent = rawEvent.substring(offset);
CloudTrailEventMetadata metadata = new LogDeliveryInfo(ctLog, charStart + offset, charEnd, rawEvent);
return metadata;
}
}
| 5,875 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/serializer/SourceSerializer.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.serializer;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailSource;
import com.amazonaws.services.sqs.model.Message;
import java.io.IOException;
/**
* Interface for getting CloudTrail log file information from {@link CloudTrailSource}. Implementations can parse
* messages polled from SQS queue for extracting CloudTrail log file information. The following are provided implementations:
* <p>
* {@link CloudTrailSourceSerializer}, {@link S3SourceSerializer}, {@link S3SNSSourceSerializer}, {@link SourceSerializerChain}.
* </p>
*/
public interface SourceSerializer {
/**
* Get CloudTrail log file information by parsing single SQS message.
*
* @param sqsMessage The message polled from SQS queue.
* @return {@link CloudTrailSource} that contains log file information.
* @throws IOException If <code>sqsMessage</code> is unrecognized.
*/
public CloudTrailSource getSource(Message sqsMessage) throws IOException;
}
| 5,876 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/serializer/AbstractEventSerializer.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.serializer;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailEvent;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailEventData;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailEventMetadata;
import com.amazonaws.services.cloudtrail.processinglibrary.model.internal.*;
import com.amazonaws.services.cloudtrail.processinglibrary.model.internal.OnBehalfOf;
import com.amazonaws.services.cloudtrail.processinglibrary.model.internal.UserIdentity;
import com.amazonaws.services.cloudtrail.processinglibrary.utils.LibraryUtils;
import com.fasterxml.jackson.core.JsonParseException;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.core.JsonToken;
import com.fasterxml.jackson.databind.JsonNode;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.io.IOException;
import java.text.ParseException;
import java.util.*;
/**
* Abstract base class for Event Serializer implementations.
*/
public abstract class AbstractEventSerializer implements EventSerializer {
private static final Log logger = LogFactory.getLog(AbstractEventSerializer.class);
private static final String RECORDS = "Records";
private static final double SUPPORTED_EVENT_VERSION = 1.08d;
/**
* A Jackson JSON Parser object.
*/
private JsonParser jsonParser;
/**
* Construct an AbstractEventSerializer object
*
* @param jsonParser a Jackson
* <a href="http://jackson.codehaus.org/1.4.0/javadoc/org/codehaus/jackson/JsonParser.html">JsonParser</a> object to
* use for interpreting JSON objects.
* @throws IOException under no conditions.
*/
public AbstractEventSerializer(JsonParser jsonParser) throws IOException {
this.jsonParser = jsonParser;
}
/**
* An abstract class that returns an
* {@link CloudTrailEventMetadata} object.
*
* @param charStart the character count at which to begin reading event data.
* @param charEnd the character count at which to stop reading event data.
* @return the event metadata.
*/
public abstract CloudTrailEventMetadata getMetadata(int charStart, int charEnd);
/**
* Read the header of an AWS CloudTrail log.
*
* @throws JsonParseException if the log could not be parsed.
* @throws IOException if the log could not be opened or accessed.
*/
protected void readArrayHeader() throws IOException {
if (jsonParser.nextToken() != JsonToken.START_OBJECT) {
throw new JsonParseException("Not a Json object", jsonParser.getCurrentLocation());
}
jsonParser.nextToken();
if (!jsonParser.getText().equals(RECORDS)) {
throw new JsonParseException("Not a CloudTrail log", jsonParser.getCurrentLocation());
}
if (jsonParser.nextToken() != JsonToken.START_ARRAY) {
throw new JsonParseException("Not a CloudTrail log", jsonParser.getCurrentLocation());
}
}
/**
* Indicates whether the CloudTrail log has more events to read.
*
* @return <code>true</code> if the log contains more events; <code>false</code> otherwise.
* @throws IOException if the log could not be opened or accessed.
*/
public boolean hasNextEvent() throws IOException {
/* In Fasterxml parser, hasNextEvent will consume next token. So do not call it multiple times. */
JsonToken nextToken = jsonParser.nextToken();
return nextToken == JsonToken.START_OBJECT || nextToken == JsonToken.START_ARRAY;
}
/**
* Close the JSON parser object used to read the CloudTrail log.
*
* @throws IOException if the log could not be opened or accessed.
*/
public void close() throws IOException {
jsonParser.close();
}
/**
* Get the next event from the CloudTrail log and parse it.
*
* @return a {@link CloudTrailEvent} that represents the
* parsed event.
* @throws IOException if the event could not be parsed.
*/
public CloudTrailEvent getNextEvent() throws IOException {
CloudTrailEventData eventData = new CloudTrailEventData();
String key;
/* Get next CloudTrailEvent event from log file. When failed to parse a event,
* IOException will be thrown. In this case, the charEnd index the place we
* encountered parsing error. */
// return the starting location of the current token; that is, position of the first character
// from input that starts the current token
int charStart = (int) jsonParser.getTokenLocation().getCharOffset();
while(jsonParser.nextToken() != JsonToken.END_OBJECT) {
key = jsonParser.getCurrentName();
switch (key) {
case "eventVersion":
String eventVersion = jsonParser.nextTextValue();
if (Double.parseDouble(eventVersion) > SUPPORTED_EVENT_VERSION) {
logger.debug(String.format("EventVersion %s is not supported by CloudTrail.", eventVersion));
}
eventData.add(key, eventVersion);
break;
case "userIdentity":
this.parseUserIdentity(eventData);
break;
case "eventTime":
eventData.add(CloudTrailEventField.eventTime.name(), convertToDate(jsonParser.nextTextValue()));
break;
case "eventID":
eventData.add(key, convertToUUID(jsonParser.nextTextValue()));
break;
case "readOnly":
this.parseReadOnly(eventData);
break;
case "resources":
this.parseResources(eventData);
break;
case "managementEvent":
this.parseManagementEvent(eventData);
break;
case "insightDetails":
this.parseInsightDetails(eventData);
break;
case "addendum":
this.parseAddendum(eventData);
break;
case "tlsDetails":
this.parseTlsDetails(eventData);
break;
default:
eventData.add(key, parseDefaultValue(key));
break;
}
}
this.setAccountId(eventData);
// event's last character position in the log file.
int charEnd = (int) jsonParser.getTokenLocation().getCharOffset();
CloudTrailEventMetadata metaData = getMetadata(charStart, charEnd);
return new CloudTrailEvent(eventData, metaData);
}
/**
* Set AccountId in CloudTrailEventData top level from either recipientAccountID or from UserIdentity.
* If recipientAccountID exists then recipientAccountID is set to accountID; otherwise, accountID is retrieved
* from UserIdentity.
*
* There are 2 places accountID would appear in UserIdentity: first is the UserIdentity top level filed
* and the second place is accountID inside SessionIssuer. If accountID exists in the top level field, then it is
* set to accountID; otherwise, accountID is retrieved from SessionIssuer.
*
* If all 3 places cannot find accountID, then accountID is not set.
*
* @param eventData the event data to set.
*/
private void setAccountId(CloudTrailEventData eventData) {
if (eventData.getRecipientAccountId() != null) {
eventData.add("accountId", eventData.getRecipientAccountId());
return;
}
if (eventData.getUserIdentity() != null &&
eventData.getUserIdentity().getAccountId() != null) {
eventData.add("accountId", eventData.getUserIdentity().getAccountId());
return;
}
if (eventData.getUserIdentity() != null &&
eventData.getUserIdentity().getAccountId() == null &&
eventData.getUserIdentity().getSessionContext() != null &&
eventData.getUserIdentity().getSessionContext().getSessionIssuer() != null &&
eventData.getUserIdentity().getSessionContext().getSessionIssuer().getAccountId() != null) {
eventData.add("accountId", eventData.getUserIdentity().getSessionContext().getSessionIssuer().getAccountId());
}
}
/**
* Parses the {@link CloudTrailDataStore.UserIdentity} in CloudTrailEventData
*
* @param eventData {@link CloudTrailEventData} needs to parse.
* @throws IOException
*/
private void parseUserIdentity(CloudTrailEventData eventData) throws IOException {
JsonToken nextToken = jsonParser.nextToken();
if (nextToken == JsonToken.VALUE_NULL) {
eventData.add(CloudTrailEventField.userIdentity.name(), null);
return;
}
if (nextToken != JsonToken.START_OBJECT) {
throw new JsonParseException("Not a UserIdentity object", jsonParser.getCurrentLocation());
}
UserIdentity userIdentity = new UserIdentity();
while (jsonParser.nextToken() != JsonToken.END_OBJECT) {
String key = jsonParser.getCurrentName();
switch (key) {
case "type":
userIdentity.add(CloudTrailEventField.type.name(), jsonParser.nextTextValue());
break;
case "principalId":
userIdentity.add(CloudTrailEventField.principalId.name(), jsonParser.nextTextValue());
break;
case "arn":
userIdentity.add(CloudTrailEventField.arn.name(), jsonParser.nextTextValue());
break;
case "accountId":
userIdentity.add(CloudTrailEventField.accountId.name(), jsonParser.nextTextValue());
break;
case "accessKeyId":
userIdentity.add(CloudTrailEventField.accessKeyId.name(), jsonParser.nextTextValue());
break;
case "userName":
userIdentity.add(CloudTrailEventField.userName.name(), jsonParser.nextTextValue());
break;
case "sessionContext":
this.parseSessionContext(userIdentity);
break;
case "invokedBy":
userIdentity.add(CloudTrailEventField.invokedBy.name(), jsonParser.nextTextValue());
break;
case "identityProvider":
userIdentity.add(CloudTrailEventField.identityProvider.name(), jsonParser.nextTextValue());
break;
case "credentialId":
userIdentity.add(CloudTrailEventField.credentialId.name(), jsonParser.nextTextValue());
break;
case "onBehalfOf":
this.parseOnBehalfOf(userIdentity);
break;
default:
userIdentity.add(key, parseDefaultValue(key));
break;
}
}
eventData.add(CloudTrailEventField.userIdentity.name(), userIdentity);
}
private void parseOnBehalfOf(UserIdentity userIdentity) throws IOException{
if (jsonParser.nextToken() != JsonToken.START_OBJECT) {
throw new JsonParseException("Not a SessionContext object", jsonParser.getCurrentLocation());
}
OnBehalfOf onBehalfOf = new OnBehalfOf();
while (jsonParser.nextToken() != JsonToken.END_OBJECT) {
String key = jsonParser.getCurrentName();
switch (key) {
case "userId":
onBehalfOf.add(CloudTrailEventField.onBehalfOfUserId.name(), jsonParser.nextTextValue());
break;
case "identityStoreArn":
onBehalfOf.add(CloudTrailEventField.onBehalfOfIdentityStoreArn.name(), jsonParser.nextTextValue());
break;
default:
onBehalfOf.add(key, parseDefaultValue(key));
break;
}
userIdentity.add(CloudTrailEventField.onBehalfOf.name(), onBehalfOf);
}
}
/**
* Parses the {@link SessionContext} object.
*
* @param userIdentity the {@link UserIdentity}
* @throws IOException
* @throws JsonParseException
*/
private void parseSessionContext(UserIdentity userIdentity) throws IOException {
if (jsonParser.nextToken() != JsonToken.START_OBJECT) {
throw new JsonParseException("Not a SessionContext object", jsonParser.getCurrentLocation());
}
SessionContext sessionContext = new SessionContext();
while (jsonParser.nextToken() != JsonToken.END_OBJECT) {
String key = jsonParser.getCurrentName();
switch (key) {
case "attributes":
sessionContext.add(CloudTrailEventField.attributes.name(), parseAttributes());
break;
case "sessionIssuer":
sessionContext.add(CloudTrailEventField.sessionIssuer.name(), parseSessionIssuer(sessionContext));
break;
case "webIdFederationData":
sessionContext.add(CloudTrailEventField.webIdFederationData.name(), parseWebIdentitySessionContext(sessionContext));
break;
default:
sessionContext.add(key, parseDefaultValue(key));
break;
}
}
userIdentity.add(CloudTrailEventField.sessionContext.name(), sessionContext);
}
/**
* Parses the {@link InsightDetails} in CloudTrailEventData
*
* @param eventData {@link CloudTrailEventData} needs to parse.
* @throws IOException
*/
private void parseInsightDetails(CloudTrailEventData eventData) throws IOException {
JsonToken nextToken = jsonParser.nextToken();
if (nextToken == JsonToken.VALUE_NULL) {
eventData.add(CloudTrailEventField.insightDetails.name(), null);
return;
}
if (nextToken != JsonToken.START_OBJECT) {
throw new JsonParseException("Not a InsightDetails object", jsonParser.getCurrentLocation());
}
InsightDetails insightDetails = new InsightDetails();
while (jsonParser.nextToken() != JsonToken.END_OBJECT) {
String key = jsonParser.getCurrentName();
switch (key) {
case "eventName":
insightDetails.add(CloudTrailEventField.eventName.name(), jsonParser.nextTextValue());
break;
case "eventSource":
insightDetails.add(CloudTrailEventField.eventSource.name(), jsonParser.nextTextValue());
break;
case "insightType":
insightDetails.add(CloudTrailEventField.insightType.name(), jsonParser.nextTextValue());
break;
case "state":
insightDetails.add(CloudTrailEventField.state.name(), jsonParser.nextTextValue());
break;
case "insightContext":
this.parseInsightContext(insightDetails);
break;
case "errorCode":
insightDetails.add(CloudTrailEventField.errorCode.name(), jsonParser.nextTextValue());
break;
default:
insightDetails.add(key, parseDefaultValue(key));
break;
}
}
eventData.add(CloudTrailEventField.insightDetails.name(), insightDetails);
}
/**
* Parses the {@link InsightContext} object.
*
* @param insightDetails the {@link InsightDetails}
* @throws IOException
* @throws JsonParseException
*/
private void parseInsightContext(InsightDetails insightDetails) throws IOException {
if (jsonParser.nextToken() != JsonToken.START_OBJECT) {
throw new JsonParseException("Not a InsightContext object", jsonParser.getCurrentLocation());
}
InsightContext insightContext = new InsightContext();
while (jsonParser.nextToken() != JsonToken.END_OBJECT) {
String key = jsonParser.getCurrentName();
switch (key) {
case "statistics":
this.parseInsightStatistics(insightContext);
break;
case "attributions":
this.parseInsightAttributionsList(insightContext);
break;
default:
insightContext.add(key, parseDefaultValue(key));
}
}
insightDetails.add(CloudTrailEventField.insightContext.name(), insightContext);
}
/**
* Parses the {@link InsightStatistics} object.
*
* @param insightContext the {@link InsightContext}
* @throws IOException
* @throws JsonParseException
*/
private void parseInsightStatistics(InsightContext insightContext) throws IOException {
if (jsonParser.nextToken() != JsonToken.START_OBJECT) {
throw new JsonParseException("Not a InsightStatistics object", jsonParser.getCurrentLocation());
}
InsightStatistics insightStatistics = new InsightStatistics();
while (jsonParser.nextToken() != JsonToken.END_OBJECT) {
String key = jsonParser.getCurrentName();
switch (key) {
case "insightDuration":
insightStatistics.add(key, Integer.valueOf(jsonParser.getValueAsInt()));
break;
case "baselineDuration":
insightStatistics.add(key, Integer.valueOf(jsonParser.getValueAsInt()));
break;
case "baseline":
insightStatistics.add(key, parseAttributesWithDoubleValues());
break;
case "insight":
insightStatistics.add(key, parseAttributesWithDoubleValues());
break;
default:
insightStatistics.add(key, parseDefaultValue(key));
break;
}
}
insightContext.add(CloudTrailEventField.statistics.name(), insightStatistics);
}
/**
* Parses a list of {@link InsightAttributions} objects.
*
* @param insightContext the {@link InsightContext}
* @throws IOException
* @throws JsonParseException
*/
private void parseInsightAttributionsList(InsightContext insightContext) throws IOException {
if (jsonParser.nextToken() != JsonToken.START_ARRAY) {
throw new JsonParseException("Not a InsightAttributions list", jsonParser.getCurrentLocation());
}
List<InsightAttributions> insightAttributionsList = new ArrayList<>();
while (jsonParser.nextToken() != JsonToken.END_ARRAY) {
insightAttributionsList.add(parseInsightAttributions());
}
insightContext.add(CloudTrailEventField.attributions.name(), insightAttributionsList);
}
/**
* Parses an {@link InsightAttributions} object.
*
* @return a single {@link InsightAttributions}
* @throws IOException
* @throws JsonParseException
*/
private InsightAttributions parseInsightAttributions() throws IOException {
if (jsonParser.getCurrentToken() != JsonToken.START_OBJECT) {
throw new JsonParseException("Not a InsightAttributions object", jsonParser.getCurrentLocation());
}
InsightAttributions insightAttributions = new InsightAttributions();
while (jsonParser.nextToken() != JsonToken.END_OBJECT) {
String key = jsonParser.getCurrentName();
switch (key) {
case "attribute":
insightAttributions.add(CloudTrailEventField.attribute.name(), jsonParser.nextTextValue());
break;
case "baseline":
insightAttributions.add(CloudTrailEventField.baseline.name(), parseAttributeValueList());
break;
case "insight":
insightAttributions.add(CloudTrailEventField.insight.name(), parseAttributeValueList());
break;
default:
insightAttributions.add(key, parseDefaultValue(key));
}
}
return insightAttributions;
}
/**
* Parses a list of {@link AttributeValue} objects.
*
* @return list of {@link AttributeValue}
* @throws IOException
* @throws JsonParseException
*/
private List<AttributeValue> parseAttributeValueList() throws IOException {
if (jsonParser.nextToken() != JsonToken.START_ARRAY) {
throw new JsonParseException("Not a InsightAttributions list", jsonParser.getCurrentLocation());
}
List<AttributeValue> attributeValues = new ArrayList<>();
while (jsonParser.nextToken() != JsonToken.END_ARRAY) {
attributeValues.add(parseAttributeValue());
}
return attributeValues;
}
/**
* Parses a single {@link AttributeValue} object.
*
* @return a single {@link AttributeValue}, which contains the string value of the attribute, and the average number of
* occurrences.
* @throws IOException
* @throws JsonParseException
*/
private AttributeValue parseAttributeValue() throws IOException {
if (jsonParser.getCurrentToken() != JsonToken.START_OBJECT) {
throw new JsonParseException("Not a InsightAttributions object", jsonParser.getCurrentLocation());
}
AttributeValue attributeValue = new AttributeValue();
while (jsonParser.nextToken() != JsonToken.END_OBJECT) {
String key = jsonParser.getCurrentName();
switch (key) {
case "value":
attributeValue.add(CloudTrailEventField.value.name(), jsonParser.nextTextValue());
break;
case "average":
attributeValue.add(CloudTrailEventField.average.name(), Double.valueOf(jsonParser.getValueAsDouble()));
break;
default:
attributeValue.add(key, parseDefaultValue(key));
}
}
return attributeValue;
}
/**
* Parses the {@link WebIdentitySessionContext} object.
*
* @param sessionContext {@link SessionContext}
* @return the web identity session context
* @throws IOException
*/
private WebIdentitySessionContext parseWebIdentitySessionContext(SessionContext sessionContext) throws IOException {
if (jsonParser.nextToken() != JsonToken.START_OBJECT) {
throw new JsonParseException("Not a WebIdentitySessionContext object", jsonParser.getCurrentLocation());
}
WebIdentitySessionContext webIdFederationData = new WebIdentitySessionContext();
while (jsonParser.nextToken() != JsonToken.END_OBJECT) {
String key = jsonParser.getCurrentName();
switch (key) {
case "attributes":
webIdFederationData.add(CloudTrailEventField.attributes.name(), parseAttributes());
break;
case "federatedProvider":
webIdFederationData.add(CloudTrailEventField.federatedProvider.name(), jsonParser.nextTextValue());
break;
default:
webIdFederationData.add(key, parseDefaultValue(key));
break;
}
}
return webIdFederationData;
}
/**
* Parses the {@link SessionContext} object.
* This runs only if the session is running with role-based or federated access permissions
* (in other words, temporary credentials in IAM).
*
* @param sessionContext
* @return the session issuer object.
* @throws IOException
*/
private SessionIssuer parseSessionIssuer(SessionContext sessionContext) throws IOException {
if (jsonParser.nextToken() != JsonToken.START_OBJECT) {
throw new JsonParseException("Not a SessionIssuer object", jsonParser.getCurrentLocation());
}
SessionIssuer sessionIssuer = new SessionIssuer();
while (jsonParser.nextToken() != JsonToken.END_OBJECT) {
String key = jsonParser.getCurrentName();
switch (key) {
case "type":
sessionIssuer.add(CloudTrailEventField.type.name(), this.jsonParser.nextTextValue());
break;
case "principalId":
sessionIssuer.add(CloudTrailEventField.principalId.name(), this.jsonParser.nextTextValue());
break;
case "arn":
sessionIssuer.add(CloudTrailEventField.arn.name(), this.jsonParser.nextTextValue());
break;
case "accountId":
sessionIssuer.add(CloudTrailEventField.accountId.name(), this.jsonParser.nextTextValue());
break;
case "userName":
sessionIssuer.add(CloudTrailEventField.userName.name(), this.jsonParser.nextTextValue());
break;
default:
sessionIssuer.add(key, this.parseDefaultValue(key));
break;
}
}
return sessionIssuer;
}
/**
* Parses the event readOnly attribute.
*
* @param eventData
*
* @throws JsonParseException
* @throws IOException
*/
private void parseReadOnly(CloudTrailEventData eventData) throws IOException {
jsonParser.nextToken();
Boolean readOnly = null;
if (jsonParser.getCurrentToken() != JsonToken.VALUE_NULL) {
readOnly = jsonParser.getBooleanValue();
}
eventData.add(CloudTrailEventField.readOnly.name(), readOnly);
}
/**
* Parses the event managementEvent attribute.
* @param eventData the interesting {@link CloudTrailEventData}
* @throws IOException
*/
private void parseManagementEvent(CloudTrailEventData eventData) throws IOException {
jsonParser.nextToken();
Boolean managementEvent = null;
if (jsonParser.getCurrentToken() != JsonToken.VALUE_NULL) {
managementEvent = jsonParser.getBooleanValue();
}
eventData.add(CloudTrailEventField.managementEvent.name(), managementEvent);
}
/**
* Parses a list of Resource.
*
* @param eventData the resources belong to
* @throws IOException
*/
private void parseResources(CloudTrailEventData eventData) throws IOException {
JsonToken nextToken = jsonParser.nextToken();
if (nextToken == JsonToken.VALUE_NULL) {
eventData.add(CloudTrailEventField.resources.name(), null);
return;
}
if (nextToken != JsonToken.START_ARRAY) {
throw new JsonParseException("Not a list of resources object", jsonParser.getCurrentLocation());
}
List<Resource> resources = new ArrayList<Resource>();
while (jsonParser.nextToken() != JsonToken.END_ARRAY) {
resources.add(parseResource());
}
eventData.add(CloudTrailEventField.resources.name(), resources);
}
/**
* Parses a single Resource.
*
* @return a single resource
* @throws IOException
*/
private Resource parseResource() throws IOException {
//current token is ready consumed by parseResources
if (jsonParser.getCurrentToken() != JsonToken.START_OBJECT) {
throw new JsonParseException("Not a Resource object", jsonParser.getCurrentLocation());
}
Resource resource = new Resource();
while (jsonParser.nextToken() != JsonToken.END_OBJECT) {
String key = jsonParser.getCurrentName();
switch (key) {
default:
resource.add(key, parseDefaultValue(key));
break;
}
}
return resource;
}
/**
* Parses the {@link Addendum} in CloudTrailEventData
*
* @param eventData {@link CloudTrailEventData} must parse.
* @throws IOException
*/
private void parseAddendum(CloudTrailEventData eventData) throws IOException {
JsonToken nextToken = jsonParser.nextToken();
if (nextToken == JsonToken.VALUE_NULL) {
eventData.add(CloudTrailEventField.addendum.name(), null);
return;
}
if (nextToken != JsonToken.START_OBJECT) {
throw new JsonParseException("Not an Addendum object", jsonParser.getCurrentLocation());
}
Addendum addendum = new Addendum();
while (jsonParser.nextToken() != JsonToken.END_OBJECT) {
String key = jsonParser.getCurrentName();
switch (key) {
case "reason":
addendum.add(CloudTrailEventField.reason.name(), jsonParser.nextTextValue());
break;
case "updatedFields":
addendum.add(CloudTrailEventField.updatedFields.name(), jsonParser.nextTextValue());
break;
case "originalRequestID":
addendum.add(CloudTrailEventField.originalRequestID.name(), jsonParser.nextTextValue());
break;
case "originalEventID":
addendum.add(CloudTrailEventField.originalEventID.name(), jsonParser.nextTextValue());
break;
default:
addendum.add(key, parseDefaultValue(key));
break;
}
}
eventData.add(CloudTrailEventField.addendum.name(), addendum);
}
private void parseTlsDetails(CloudTrailEventData eventData) throws IOException {
JsonToken nextToken = jsonParser.nextToken();
if (nextToken == JsonToken.VALUE_NULL) {
eventData.add(CloudTrailEventField.tlsDetails.name(), null);
return;
}
if (nextToken != JsonToken.START_OBJECT) {
throw new JsonParseException("Not a TLS Details object", jsonParser.getCurrentLocation());
}
TlsDetails tlsDetails = new TlsDetails();
while (jsonParser.nextToken() != JsonToken.END_OBJECT) {
String key = jsonParser.getCurrentName();
switch (key) {
case "tlsVersion":
tlsDetails.add(CloudTrailEventField.tlsVersion.name(), jsonParser.nextTextValue());
break;
case "cipherSuite":
tlsDetails.add(CloudTrailEventField.cipherSuite.name(), jsonParser.nextTextValue());
break;
case "clientProvidedHostHeader":
tlsDetails.add(CloudTrailEventField.clientProvidedHostHeader.name(), jsonParser.nextTextValue());
break;
default:
tlsDetails.add(key, this.parseDefaultValue(key));
break;
}
}
eventData.add(CloudTrailEventField.tlsDetails.name(), tlsDetails);
}
/**
* Parses the event with key as default value.
*
* If the value is JSON null, then we will return null.
* If the value is JSON object (of starting with START_ARRAY or START_OBject) , then we will convert the object to String.
* If the value is JSON scalar value (non-structured object), then we will return simply return it as String.
*
* @param key
* @throws IOException
*/
private String parseDefaultValue(String key) throws IOException {
jsonParser.nextToken();
String value = null;
JsonToken currentToken = jsonParser.getCurrentToken();
if (currentToken != JsonToken.VALUE_NULL) {
if (currentToken == JsonToken.START_ARRAY || currentToken == JsonToken.START_OBJECT) {
JsonNode node = jsonParser.readValueAsTree();
value = node.toString();
} else {
value = jsonParser.getValueAsString();
}
}
return value;
}
/**
* Parses attributes as a Map, used in both parseWebIdentitySessionContext and parseSessionContext
*
* @return attributes for either session context or web identity session context
* @throws IOException
*/
private Map<String, String> parseAttributes() throws IOException {
if (jsonParser.nextToken() != JsonToken.START_OBJECT) {
throw new JsonParseException("Not a Attributes object", jsonParser.getCurrentLocation());
}
Map<String, String> attributes = new HashMap<>();
while (jsonParser.nextToken() != JsonToken.END_OBJECT) {
String key = jsonParser.getCurrentName();
String value = jsonParser.nextTextValue();
attributes.put(key, value);
}
return attributes;
}
/**
* Parses attributes as a Map<String, Double>, used to parse InsightStatistics
*
* @return attributes for insight statistics
* @throws IOException
*/
private Map<String, Double> parseAttributesWithDoubleValues() throws IOException {
if (jsonParser.nextToken() != JsonToken.START_OBJECT) {
throw new JsonParseException("Not an Attributes object", jsonParser.getCurrentLocation());
}
Map<String, Double> attributes = new HashMap<>();
while (jsonParser.nextToken() != JsonToken.END_OBJECT) {
String key = jsonParser.getCurrentName();
Double value = jsonParser.getValueAsDouble();
attributes.put(key, value);
}
return attributes;
}
/**
* This method convert a String to UUID type. Currently EventID is in UUID type.
*
* @param str that need to convert to UUID
* @return the UUID.
*/
private UUID convertToUUID(String str) {
return UUID.fromString(str);
}
/**
* This method convert a String to Date type. When parse error happened return current date.
*
* @param dateInString the String to convert to Date
* @return Date the date and time in coordinated universal time
* @throws IOException
*/
private Date convertToDate(String dateInString) throws IOException {
Date date = null;
if (dateInString != null) {
try {
date = LibraryUtils.getUtcSdf().parse(dateInString);
} catch (ParseException e) {
throw new IOException("Cannot parse " + dateInString + " as Date", e);
}
}
return date;
}
}
| 5,877 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/serializer/package-info.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
/**
* Serialize events or sources.
*/
package com.amazonaws.services.cloudtrail.processinglibrary.serializer;
| 5,878 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/serializer/S3SNSSourceSerializer.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.serializer;
import com.amazonaws.services.cloudtrail.processinglibrary.factory.SourceSerializerFactory;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailSource;
import com.amazonaws.services.cloudtrail.processinglibrary.utils.SNSMessageBodyExtractor;
import com.amazonaws.services.sqs.model.Message;
import com.fasterxml.jackson.databind.JsonNode;
import java.io.IOException;
/**
* The <code>S3SnsSourceSerializer</code> extracts CloudTrail log file information from notifications that Amazon S3 sends
* to an SNS topic. Use {@link SourceSerializerFactory#createS3SNSSourceSerializer()} for initialization.
*/
public class S3SNSSourceSerializer implements SourceSerializer{
private SNSMessageBodyExtractor messageExtractor;
private S3SourceSerializer s3Serializer;
public S3SNSSourceSerializer(SNSMessageBodyExtractor messageExtractor, S3SourceSerializer s3Serializer) {
this.messageExtractor = messageExtractor;
this.s3Serializer = s3Serializer;
}
@Override
public CloudTrailSource getSource(Message sqsMessage) throws IOException {
JsonNode s3MessageNode = messageExtractor.getMessageBody(sqsMessage);
return s3Serializer.getCloudTrailSource(sqsMessage, s3MessageNode);
}
}
| 5,879 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/progress/BasicPollQueueInfo.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.progress;
/**
* Provides basic Amazon SQS queue polling messages information.
*/
public class BasicPollQueueInfo implements ProgressInfo{
private boolean isSuccess;
private int polledMessageCount;
public BasicPollQueueInfo(int polledMessageCount, boolean isSuccess) {
super();
this.isSuccess = isSuccess;
this.polledMessageCount = polledMessageCount;
}
@Override
public boolean isSuccess() {
return isSuccess;
}
@Override
public void setIsSuccess(boolean isSuccess) {
this.isSuccess = isSuccess;
}
/**
* @return The number of messages that are polled successfully.
*/
public int getSuccessPolledMessageCount() {
return polledMessageCount;
}
@Override
public String toString() {
return String.format("{isSuccess: %s, polledMessageCount: %s}", isSuccess, polledMessageCount);
}
}
| 5,880 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/progress/ProgressSourceInfo.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.progress;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailSource;
/**
* In addition to ProgressInfo, ProgressSourceInfo provides {@link CloudTrailSource} information.
*/
public interface ProgressSourceInfo extends ProgressInfo{
/**
* @return CloudTrail source.
*/
public CloudTrailSource getSource();
}
| 5,881 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/progress/ProgressInfo.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.progress;
/**
* The interface that provides the current processing progress information.
*
* When report start progress, {@link #isSuccess()} will always be false.
*/
public interface ProgressInfo {
/**
* @return <code>true</code> if execution is successful.
*/
boolean isSuccess();
/**
* @param isSuccess Set <code>true</code> if execution is successful. Otherwise, <code>false</code>.
*/
void setIsSuccess(boolean isSuccess);
}
| 5,882 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/progress/BasicProcessLogInfo.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.progress;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailLog;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailSource;
/**
* Provides basic CloudTrail log processing information.
*/
public class BasicProcessLogInfo implements ProgressLogInfo, ProgressSourceInfo {
private boolean isSuccess;
private CloudTrailLog log;
private CloudTrailSource source;
public BasicProcessLogInfo(CloudTrailSource source, CloudTrailLog log, boolean isSuccess) {
this.source = source;
this.log = log;
this.isSuccess = isSuccess;
}
@Override
public boolean isSuccess() {
return isSuccess;
}
@Override
public void setIsSuccess(boolean isSuccess) {
this.isSuccess = isSuccess;
}
@Override
public CloudTrailLog getLog() {
return log;
}
@Override
public CloudTrailSource getSource() {
return source;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("{isSuccess: ");
builder.append(isSuccess);
builder.append(", ");
if (log != null) {
builder.append("log: ");
builder.append(log);
builder.append(", ");
}
if (source != null) {
builder.append("source: ");
builder.append(source);
}
builder.append("}");
return builder.toString();
}
}
| 5,883 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/progress/ProgressState.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.progress;
/**
* CloudTrail progress state.
*/
public enum ProgressState {
/**
* Report progress when polling messages from SQS queue.
*/
pollQueue,
/**
* Report progress when parsing a message from SQS queue.
*/
parseMessage,
/**
* Report progress when deleting a message from SQS queue.
*/
deleteMessage,
/**
* Report progress when deleting a filtered out message from SQS queue.
*/
deleteFilteredMessage,
/**
* Report progress when processing source.
*/
processSource,
/**
* Report progress when downloading log file.
*/
downloadLog,
/**
* Report progress when processing log file.
*/
processLog,
/**
* Report progress when uncaught exception happened.
*/
uncaughtException
}
| 5,884 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/progress/ProgressStatus.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.progress;
/**
* Provides contextual information about the state of an AWS CloudTrail
* Processing Library operation.
*/
public class ProgressStatus {
private ProgressState progressState;
private ProgressInfo progressInfo;
/**
* Initializes a new <code>ProgressStatus</code> object.
*
* @param progressState The {@link ProgressState}.
* @param progressInfo The {@link ProgressInfo}.
*/
public ProgressStatus(ProgressState progressState, ProgressInfo progressInfo) {
this.progressState = progressState;
this.progressInfo = progressInfo;
}
/**
* @return The state of the progress.
*/
public ProgressState getProgressState() {
return progressState;
}
/**
* Sets the <code>ProgressState</code> of this object.
*/
public void setProgressState(ProgressState progressState) {
this.progressState = progressState;
}
/**
* @return The basic progress information.
*/
public ProgressInfo getProgressInfo() {
return progressInfo;
}
/**
* Sets the <code>ProgressInfo</code> for this object.
*
* @param progressInfo The progressInfo to set.
*/
public void setProgressInfo(ProgressInfo progressInfo) {
this.progressInfo = progressInfo;
}
/**
* Creates a string representation of this object.
*
* @return A string containing the values of {@link ProgressState} and {@link ProgressInfo}.
*/
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("{");
if (progressState != null)
builder.append("progressState: ").append(progressState).append(", ");
if (progressInfo != null)
builder.append("progressInfo: ").append(progressInfo);
builder.append("}");
return builder.toString();
}
/**
* Calculates a hash code for the current state of this {@link ProgressStatus}.
* <p>
* The hash code will change if the values of {@link ProgressState} and {@link ProgressInfo} change.
*
* @return The hash code value.
*/
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((progressState == null) ? 0 : progressState.hashCode());
result = prime * result + ((progressInfo == null) ? 0 : progressInfo.hashCode());
return result;
}
/**
* Compares this object with another {@link ProgressStatus} object.
*
* @return <code>true</code> if the objects are equal. Otherwise, <code>false</code>.
*/
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
ProgressStatus other = (ProgressStatus) obj;
if (progressState != other.progressState)
return false;
if (progressInfo == null) {
if (other.progressInfo != null)
return false;
} else if (!progressInfo.equals(other.progressInfo))
return false;
return true;
}
}
| 5,885 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/progress/BasicParseMessageInfo.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.progress;
import com.amazonaws.services.sqs.model.Message;
/**
* Provide basic message parsing information.
*/
public class BasicParseMessageInfo implements ProgressMessageInfo {
private boolean isSuccess;
private Message message;
public BasicParseMessageInfo(Message message, boolean isSuccess) {
super();
this.isSuccess = isSuccess;
this.message = message;
}
@Override
public Message getMessage() {
return message;
}
@Override
public boolean isSuccess() {
return isSuccess;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("{isSuccess: ");
builder.append(isSuccess);
builder.append(", MessageToParse: ");
builder.append(message.toString());
builder.append("}");
return builder.toString();
}
@Override
public void setIsSuccess(boolean isSuccess) {
this.isSuccess = isSuccess;
}
}
| 5,886 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/progress/ProgressMessageInfo.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.progress;
import com.amazonaws.services.sqs.model.Message;
/**
* In addition to {@link ProgressInfo}, provides SQS {@link Message} information.
*/
public interface ProgressMessageInfo extends ProgressInfo{
/**
* @return SQS message.
*/
public Message getMessage();
}
| 5,887 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/progress/BasicProcessSourceInfo.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.progress;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailSource;
/**
* Provides basic source processing information.
*/
public class BasicProcessSourceInfo implements ProgressSourceInfo{
private boolean isSuccess;
private CloudTrailSource source;
public BasicProcessSourceInfo(CloudTrailSource source, boolean isSuccess) {
super();
this.isSuccess = isSuccess;
this.source = source;
}
@Override
public boolean isSuccess() {
return isSuccess;
}
@Override
public void setIsSuccess(boolean isSuccess) {
this.isSuccess = isSuccess;
}
@Override
public CloudTrailSource getSource() {
return source;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("{isSuccess: ");
builder.append(isSuccess);
builder.append(", ");
if (source != null) {
builder.append("source: ");
builder.append(source);
}
builder.append("}");
return builder.toString();
}
}
| 5,888 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/progress/package-info.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
/**
* Classes to provide processing progress to users.
*/
package com.amazonaws.services.cloudtrail.processinglibrary.progress;
| 5,889 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/progress/ProgressLogInfo.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.progress;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailLog;
/**
* In addition to {@link ProgressInfo}, provides {@link CloudTrailLog} information.
*/
public interface ProgressLogInfo extends ProgressInfo {
/**
* @return CloudTrail log.
*/
public CloudTrailLog getLog();
}
| 5,890 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/utils/SourceIdentifier.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.utils;
import com.amazonaws.services.cloudtrail.processinglibrary.model.internal.SourceType;
import java.util.regex.Pattern;
/**
* Identify the source type by checking the given source string and event name if applied. Specifically,
* the <code>source</code> is usually the S3 object key and <code>eventName</code> defined by Amazon S3.
* The following are valid source types: CloudTrailLog | Other
*/
public class SourceIdentifier {
private static final String CREATE_EVENT_PREFIX = "ObjectCreated:";
/**
* Regex for the name format of CloudTrail log file objects that deliver to AWS S3 bucket:
* AccountID_CloudTrail_RegionName_YYYYMMDDTHHmmZ_UniqueString.FileNameFormat
*
* We need this regex to filter out non-CloudTrail log files as it is possible that S3 send other object notifications
*/
private static final Pattern CT_LOGFILE_PATTERN = Pattern.compile(".+_CloudTrail_[\\w\\-]+_\\d{8}T\\d{4}Z_[\\w]+\\.json\\.gz");
/**
* Identify the source type.
* @param source the name of S3 object which is put to the bucket.
* @return {@link SourceType}
*/
public SourceType identify(String source) {
return getCloudTrailSourceType(source);
}
/**
* Identify the source type with event action.
* @param source the S3 object name
* @param eventName the event name defined by Amazon S3.
* @return {@link SourceType}
*/
public SourceType identifyWithEventName(String source, String eventName) {
if (eventName.startsWith(CREATE_EVENT_PREFIX)) {
return getCloudTrailSourceType(source);
}
return SourceType.Other;
}
private SourceType getCloudTrailSourceType(String source) {
if (CT_LOGFILE_PATTERN.matcher(source).matches()) {
return SourceType.CloudTrailLog;
}
return SourceType.Other;
}
}
| 5,891 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/utils/SNSMessageBodyExtractor.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.utils;
import com.amazonaws.services.sqs.model.Message;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import java.io.IOException;
/**
* Extract message body from the SNS notification, specifically, the value of the 'Message' attribute.
*/
public class SNSMessageBodyExtractor {
private static final String MESSAGE = "Message";
private final ObjectMapper mapper;
public SNSMessageBodyExtractor(ObjectMapper mapper) {
this.mapper = mapper;
}
public JsonNode getMessageBody(Message sqsMessage) throws IOException, NullPointerException {
return mapper.readTree(getMessageText(sqsMessage));
}
public String getMessageText(Message sqsMessage) throws IOException, NullPointerException {
return mapper.readTree(sqsMessage.getBody()).get(MESSAGE).textValue();
}
}
| 5,892 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/utils/EventBuffer.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.utils;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
/**
* Provides a buffer-like store for AWS CloudTrail events.
*/
public class EventBuffer<T> {
private List<T> bufferedEvents;
private int bufferSize;
/**
* Initialize a new <code>EventBuffer</code>.
*
* @param bufferSize the number of events that can be held in the buffer.
*/
public EventBuffer(final int bufferSize) {
LibraryUtils.checkCondition(bufferSize < 1, "Event Buffer size cannot be " + bufferSize + ", must be at lease 1.");
bufferedEvents = new LinkedList<>();
this.bufferSize = bufferSize;
}
/**
* Indicates whether the buffer has reached the number of events configured in the constructor.
*
* @return <code>true</code> if the current buffer is full; <code>false</code> otherwise.
*/
public boolean isBufferFull() {
return bufferedEvents.size() >= bufferSize;
}
/**
* Add a event to the buffer.
*
* @param event An object of the type configured for this buffer.
*/
public void addEvent(T event) {
bufferedEvents.add(event);
}
/**
* Get a list of objects held by the buffer.
* <p>
* The number of returned objects will be from zero to the configured buffer size.
*
* @return a <a href="http://docs.oracle.com/javase/7/docs/api/java/util/List.html">List</a> containing the buffered
* objects.
*/
public List<T> getEvents() {
List<T> returnEvents = new ArrayList<T>();
if (bufferedEvents.isEmpty()) {
return returnEvents;
}
int returnSize = isBufferFull() ? bufferSize : bufferedEvents.size();
for (int i = 0 ; i < returnSize ; i++) {
returnEvents.add(bufferedEvents.remove(0));
}
return returnEvents;
}
}
| 5,893 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/utils/LibraryUtils.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.utils;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.services.cloudtrail.processinglibrary.exceptions.ProcessingLibraryException;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.ExceptionHandler;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.ProgressReporter;
import com.amazonaws.services.cloudtrail.processinglibrary.model.SourceAttributeKeys;
import com.amazonaws.services.cloudtrail.processinglibrary.progress.ProgressStatus;
import com.amazonaws.services.sqs.model.Message;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.text.SimpleDateFormat;
import java.util.Locale;
import java.util.TimeZone;
/**
* Utility methods used by the AWS CloudTrail Processing Library.
*/
public class LibraryUtils {
private static final String UNDER_SCORE = "_";
private static final String FORWARD_SLASH = "/";
private static final String AMAZONAWS_COM = ".amazonaws.com/";
private static final String UTC_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ss'Z'";
private static final String UTC_TIME_ZONE = "UTC";
/**
* Check that an object is not <code>null</code>; throw an exception if it
* is.
*
* @param argument the Object to check.
* @param message a description string that will be sent with the exception.
* @throws IllegalStateException if the passed-in object is <code>null</code>.
*/
public static void checkArgumentNotNull(Object argument, String message) {
if (argument == null) {
throw new IllegalStateException(message);
}
}
/**
* Check a conditional value or expression, if <code>true</code>, throw an
* exception.
*
* @param condition a boolean value or an expression to check.
* @param message a description string that will be sent with the exception.
* @throws IllegalStateException if the condition expression is <code>true</code>.
*/
public static void checkCondition(boolean condition, String message) {
if (condition) {
throw new IllegalStateException(message);
}
}
/**
* Convert an
* <a href="http://docs.oracle.com/javase/7/docs/api/java/io/InputStream.html">InputSteam</a> to a byte array.
*
* @param inputStream the <code>InputStream</code> to convert.
* @return a byte array containing the data from the input stream.
* @throws IOException if the <code>InputStream</code> could not be converted.
*/
public static byte[] toByteArray(InputStream inputStream) throws IOException {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
int nRead;
byte[] bytes = new byte[1024];
while ((nRead = inputStream.read(bytes, 0, 1024)) != -1) {
buffer.write(bytes, 0, nRead);
}
return buffer.toByteArray();
}
/**
* Split an HTTP representation of an Amazon S3 URL to bucket name and object key.
* <p>
* For example:
* <pre>
* input: s3ObjectHttpUrl = http://s3-us-west-2.amazonaws.com/mybucket/myobjectpath1/myobjectpath2/myobject.extension
* output: {"mybucket", "myobjectpath1/myobjectpath2/myobject.extension"}
* </pre>
*
* @param s3ObjectHttpUrl the URL of the S3 object to split.
* @return a two-element string array: the first element is the bucket name,
* and the second element is the object key.
*/
public static String[] toBucketNameObjectKey(String s3ObjectHttpUrl) {
if (s3ObjectHttpUrl == null) {
return null;
}
int start = s3ObjectHttpUrl.indexOf(AMAZONAWS_COM);
int length = s3ObjectHttpUrl.length();
if (start != -1) {
String bucketNameAndObjectKey = s3ObjectHttpUrl.substring(start + AMAZONAWS_COM.length(), length);
return bucketNameAndObjectKey.split(FORWARD_SLASH, 2);
}
return null;
}
/**
* Extract the account ID from an S3 object key.
* <p>
* For example:
* <pre>
* input: https://s3-us-west-2.amazonaws.com/mybucket/AWSLogs/123456789012/CloudTrail/us-east-1/2014/02/14/123456789012_CloudTrail_us-east-1_20140214T2230Z_K0UsfksWvF8TBJZy.json.gz
* output: 1234567890
* </pre>
*
* @param objectKey The object key to query.
* @return the account ID used to access the object.
*/
public static String extractAccountIdFromObjectKey(String objectKey) {
if (objectKey == null) {
return null;
}
int start = objectKey.lastIndexOf(FORWARD_SLASH);
if (start != -1) {
int end = objectKey.indexOf(UNDER_SCORE, start + FORWARD_SLASH.length());
if (end != -1) {
return objectKey.substring(start + FORWARD_SLASH.length(), end);
}
}
return null;
}
/**
* Add the account ID attribute to the <code>sqsMessage</code> if it does not exist.
* @param sqsMessage The SQS message.
* @param s3ObjectKey The S3 object key.
*/
public static void setMessageAccountId(Message sqsMessage, String s3ObjectKey) {
if (!sqsMessage.getAttributes().containsKey(SourceAttributeKeys.ACCOUNT_ID.getAttributeKey())) {
String accountId = extractAccountIdFromObjectKey(s3ObjectKey);
if (accountId != null) {
sqsMessage.addAttributesEntry(SourceAttributeKeys.ACCOUNT_ID.getAttributeKey(), accountId);
}
}
}
/**
* A wrapper function of handling exceptions that have a known root cause, such as {@link AmazonServiceException}.
* @param exceptionHandler the {@link ExceptionHandler} to handle exceptions.
* @param progressStatus the current progress status {@link ProgressStatus}.
* @param e the exception needs to be handled.
* @param message the exception message.
*/
public static void handleException(ExceptionHandler exceptionHandler, ProgressStatus progressStatus, Exception e, String message) {
ProcessingLibraryException exception = new ProcessingLibraryException(message, e, progressStatus);
exceptionHandler.handleException(exception);
}
/**
* A wrapper function of handling uncaught exceptions.
* @param exceptionHandler the {@link ExceptionHandler} to handle exceptions.
* @param progressStatus the current progress status {@link ProgressStatus}.
* @param message the exception message.
*/
public static void handleException(ExceptionHandler exceptionHandler, ProgressStatus progressStatus, String message) {
ProcessingLibraryException exception = new ProcessingLibraryException(message, progressStatus);
exceptionHandler.handleException(exception);
}
/**
* A wrapper function of reporting the result of the processing.
* @param progressReporter the {@link ProgressReporter} to report the end of process.
* @param processSuccess the result of process.
* @param progressStatus the current progress status {@link ProgressStatus}.
* @param reportObject the object to send, usually the object returned by {@link ProgressReporter#reportStart(ProgressStatus)}.
*/
public static void endToProcess(ProgressReporter progressReporter, boolean processSuccess, ProgressStatus progressStatus, Object reportObject) {
progressStatus.getProgressInfo().setIsSuccess(processSuccess);
progressReporter.reportEnd(progressStatus, reportObject);
}
/**
* SimpleDateFormat is not thread safe. Defining it as a static ThreadLocal to synchronize is less expensive than
* creating a SimpleDateFormat object each time.
*/
private static ThreadLocal<SimpleDateFormat> utcSdf = new ThreadLocal<SimpleDateFormat>() {
@Override
protected SimpleDateFormat initialValue() {
SimpleDateFormat sdf = new SimpleDateFormat(UTC_DATE_FORMAT, Locale.US); // $NON_NLS_L$
sdf.setTimeZone(TimeZone.getTimeZone(UTC_TIME_ZONE));
return sdf;
}
};
/**
* Get a timestamp in
* <a href="http://docs.oracle.com/javase/7/docs/api/java/text/SimpleDateFormat.html">SimpleDateFormat</a>.
*
* @return the current timestamp.
*/
public static SimpleDateFormat getUtcSdf() {
return utcSdf.get();
}
}
| 5,894 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/utils/package-info.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
/**
* Utility classes.
*/
package com.amazonaws.services.cloudtrail.processinglibrary.utils;
| 5,895 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/exceptions/CallbackException.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.exceptions;
import com.amazonaws.services.cloudtrail.processinglibrary.progress.ProgressStatus;
/**
* The exception from call back to implementation of AWS CloudTrail Processing Library interfaces.
*/
public class CallbackException extends ProcessingLibraryException{
private static final long serialVersionUID = -2425808722370565843L;
/**
* Initializes a new <code>CallbackException</code> with a message and status.
*
* @param message A string that provides information about the exception.
* @param status The {@link ProgressStatus} of the operation that was in progress when the exception occurred.
*/
public CallbackException(String message, ProgressStatus status) {
super(message, status);
}
/**
* Initializes a new <code>CallbackException</code> with a message, inner exception, and status.
*
* @param message A string that provides information about the exception.
* @param e An inner exception that is carried along with this exception.
* @param status The {@link ProgressStatus} of the operation that was in progress when the exception occurred.
*/
public CallbackException(String message, Exception e, ProgressStatus status) {
super(message, e, status);
}
}
| 5,896 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/exceptions/ProcessingLibraryException.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.exceptions;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.ExceptionHandler;
import com.amazonaws.services.cloudtrail.processinglibrary.progress.ProgressStatus;
/**
* Exceptions of this type are handled by an implementation of the {@link ExceptionHandler} interface.
* <p>
* The status of the operation that was in progress when the exception occurred can be retrieved by calling
* the {@link #getStatus()}.
* </p>
*/
public class ProcessingLibraryException extends Exception {
private static final long serialVersionUID = 8757412348402829171L;
/**
* The {@link ProgressStatus} of the operation that was in progress when the exception occurred.
*/
private ProgressStatus status;
/**
* Initializes a new <code>ProcessingLibraryException</code> with a message and status.
*
* @param message A string that provides information about the exception.
* @param status The {@link ProgressStatus} of the operation that was in progress when the exception occurred.
*/
public ProcessingLibraryException(String message, ProgressStatus status) {
super(message);
this.status = status;
}
/**
* Initializes a new <code>ProcessingLibraryException</code> with a message, inner exception, and status.
*
* @param message A string that provides information about the exception.
* @param e An inner exception that is carried along with this exception.
* @param status The {@link ProgressStatus} of the operation that was in progress when the exception occurred.
*/
public ProcessingLibraryException(String message, Exception e, ProgressStatus status) {
super(message, e);
this.status = status;
}
/**
* Get the status of the operation that was in progress when the exception occurred.
*
* @return A {@link ProgressStatus} object that provides information about when the exception occurred.
*/
public ProgressStatus getStatus() {
return status;
}
}
| 5,897 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/exceptions/package-info.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
/**
* Exceptions that used in AWS CloudTrail Processing Library
*/
package com.amazonaws.services.cloudtrail.processinglibrary.exceptions;
| 5,898 |
0 | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary | Create_ds/aws-cloudtrail-processing-library/src/main/java/com/amazonaws/services/cloudtrail/processinglibrary/manager/BasicS3Manager.java | /*******************************************************************************
* Copyright 2010-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
******************************************************************************/
package com.amazonaws.services.cloudtrail.processinglibrary.manager;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.services.cloudtrail.processinglibrary.configuration.ProcessingConfiguration;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.ExceptionHandler;
import com.amazonaws.services.cloudtrail.processinglibrary.interfaces.ProgressReporter;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailLog;
import com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailSource;
import com.amazonaws.services.cloudtrail.processinglibrary.progress.BasicProcessLogInfo;
import com.amazonaws.services.cloudtrail.processinglibrary.progress.ProgressState;
import com.amazonaws.services.cloudtrail.processinglibrary.progress.ProgressStatus;
import com.amazonaws.services.cloudtrail.processinglibrary.utils.LibraryUtils;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.model.S3Object;
import com.amazonaws.services.s3.model.S3ObjectInputStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.io.IOException;
/**
* Manages Amazon S3 service-related operations.
*/
public class BasicS3Manager implements S3Manager{
private static final Log logger = LogFactory.getLog(S3Manager.class);
private AmazonS3 s3Client;
private ProcessingConfiguration config;
private ExceptionHandler exceptionHandler;
private ProgressReporter progressReporter;
/**
* S3Manager constructor.
*
* @param s3Client A {@link AmazonS3}.
* @param config A {@link ProcessingConfiguration}.
* @param exceptionHandler An implementation of {@link ExceptionHandler} used to handle errors.
* @param progressReporter An implementation of {@link ProgressReporter} used to report progress.
*/
public BasicS3Manager(AmazonS3 s3Client,
ProcessingConfiguration config,
ExceptionHandler exceptionHandler,
ProgressReporter progressReporter) {
this.config = config;
this.exceptionHandler = exceptionHandler;
this.progressReporter = progressReporter;
this.s3Client = s3Client;
validate();
}
/**
* Downloads an AWS CloudTrail log from the specified source.
*
* @param ctLog The {@link CloudTrailLog} to download
* @param source The {@link CloudTrailSource} to download the log from.
* @return A byte array containing the log data.
*/
public byte[] downloadLog(CloudTrailLog ctLog, CloudTrailSource source) {
boolean success = false;
ProgressStatus downloadLogStatus = new ProgressStatus(ProgressState.downloadLog, new BasicProcessLogInfo(source, ctLog, success));
final Object downloadSourceReportObject = progressReporter.reportStart(downloadLogStatus);
byte[] s3ObjectBytes = null;
// start to download CloudTrail log
try {
S3Object s3Object = this.getObject(ctLog.getS3Bucket(), ctLog.getS3ObjectKey());
try (S3ObjectInputStream s3InputStream = s3Object.getObjectContent()){
s3ObjectBytes = LibraryUtils.toByteArray(s3InputStream);
}
ctLog.setLogFileSize(s3Object.getObjectMetadata().getContentLength());
success = true;
logger.info("Downloaded log file " + ctLog.getS3ObjectKey() + " from " + ctLog.getS3Bucket());
} catch (AmazonServiceException | IOException e) {
String exceptionMessage = String.format("Fail to download log file %s/%s.", ctLog.getS3Bucket(), ctLog.getS3ObjectKey());
LibraryUtils.handleException(exceptionHandler, downloadLogStatus, e, exceptionMessage);
} finally {
LibraryUtils.endToProcess(progressReporter, success, downloadLogStatus, downloadSourceReportObject);
}
return s3ObjectBytes;
}
/**
* Download an S3 object.
*
* @param bucketName The S3 bucket name from which to download the object.
* @param objectKey The S3 key name of the object to download.
* @return The downloaded
* <a href="http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/s3/model/S3Object.html">S3Object</a>.
*/
public S3Object getObject(String bucketName, String objectKey) {
try {
return s3Client.getObject(bucketName, objectKey);
} catch (AmazonServiceException e) {
logger.error("Failed to get object " + objectKey + " from s3 bucket " + bucketName);
throw e;
}
}
/**
* Validates input parameters.
*/
private void validate() {
LibraryUtils.checkArgumentNotNull(config, "configuration is null");
LibraryUtils.checkArgumentNotNull(exceptionHandler, "exceptionHandler is null");
LibraryUtils.checkArgumentNotNull(progressReporter, "progressReporter is null");
LibraryUtils.checkArgumentNotNull(s3Client, "s3Client is null");
}
}
| 5,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.