index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/kinesis | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/kinesis/source/CamelSourceAWSKinesisITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.kinesis.source;
import java.util.concurrent.ExecutionException;
import org.apache.camel.kafkaconnector.aws.v2.kinesis.common.TestKinesisConfiguration;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.test.CamelSourceTestSupport;
import org.apache.camel.kafkaconnector.common.test.TestMessageConsumer;
import org.apache.camel.test.infra.aws.common.AWSCommon;
import org.apache.camel.test.infra.aws.common.services.AWSService;
import org.apache.camel.test.infra.aws2.clients.AWSSDKClientUtils;
import org.apache.camel.test.infra.aws2.services.AWSServiceFactoryWithTimeout;
import org.apache.camel.test.infra.common.TestUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
import org.junit.jupiter.api.extension.RegisterExtension;
import software.amazon.awssdk.services.kinesis.KinesisClient;
import static org.apache.camel.kafkaconnector.aws.v2.kinesis.common.KinesisUtils.createStream;
import static org.apache.camel.kafkaconnector.aws.v2.kinesis.common.KinesisUtils.deleteStream;
import static org.apache.camel.kafkaconnector.aws.v2.kinesis.common.KinesisUtils.putRecords;
import static org.junit.jupiter.api.Assertions.assertEquals;
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@EnabledIfSystemProperty(named = "enable.slow.tests", matches = "true")
public class CamelSourceAWSKinesisITCase extends CamelSourceTestSupport {
@RegisterExtension
public static AWSService awsService = AWSServiceFactoryWithTimeout.createKinesisService();
private String streamName;
private KinesisClient kinesisClient;
private String topicName;
private final int expect = 10;
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-aws-kinesis-source-kafka-connector"};
}
@BeforeEach
public void setUp() {
topicName = getTopicForTest(this);
streamName = AWSCommon.KINESIS_STREAM_BASE_NAME + "-" + TestUtils.randomWithRange(0, 100);
kinesisClient = AWSSDKClientUtils.newKinesisClient();
createStream(kinesisClient, streamName);
}
@AfterEach
public void tearDown() {
deleteStream(kinesisClient, streamName);
}
protected void produceTestData() {
putRecords(kinesisClient, streamName, expect);
}
protected void verifyMessages(TestMessageConsumer<?> consumer) {
int received = consumer.consumedMessages().size();
assertEquals(received, expect, "Didn't process the expected amount of messages");
}
@Test
@Timeout(120)
public void testBasicSendReceive() throws ExecutionException, InterruptedException {
ConnectorPropertyFactory connectorPropertyFactory = CamelAWSKinesisPropertyFactory
.basic()
.withKafkaTopic(topicName)
.withAmazonConfig(awsService.getConnectionProperties())
.withConfiguration(TestKinesisConfiguration.class.getName())
.withStream(streamName);
runTest(connectorPropertyFactory, topicName, expect);
}
}
| 9,200 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/kinesis | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/kinesis/sink/CamelAWSKinesisPropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.kinesis.sink;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.apache.camel.kafkaconnector.aws.v2.common.AWSPropertiesUtils;
import org.apache.camel.kafkaconnector.common.SinkConnectorPropertyFactory;
import org.apache.camel.test.infra.aws.common.AWSConfigs;
/**
* Creates the set of properties used by a Camel Kinesis Source Connector
*/
final class CamelAWSKinesisPropertyFactory extends SinkConnectorPropertyFactory<CamelAWSKinesisPropertyFactory> {
public static final Map<String, String> SPRING_STYLE = new HashMap<>();
static {
SPRING_STYLE.put(AWSConfigs.ACCESS_KEY, "camel.kamelet.aws-kinesis-sink.accessKey");
SPRING_STYLE.put(AWSConfigs.SECRET_KEY, "camel.kamelet.aws-kinesis-sink.secretKey");
SPRING_STYLE.put(AWSConfigs.REGION, "camel.kamelet.aws-kinesis-sink.region");
}
private CamelAWSKinesisPropertyFactory() {
}
public CamelAWSKinesisPropertyFactory withAmazonConfig(Properties amazonConfigs) {
return withAmazonConfig(amazonConfigs, this.SPRING_STYLE);
}
public CamelAWSKinesisPropertyFactory withAmazonConfig(Properties amazonConfigs, Map<String, String> style) {
AWSPropertiesUtils.setCommonProperties(amazonConfigs, style, this);
return this;
}
public CamelAWSKinesisPropertyFactory withStream(String streamName) {
return setProperty("camel.kamelet.aws-kinesis-sink.stream", streamName);
}
public CamelAWSKinesisPropertyFactory withConfiguration(String configurationClass) {
return setProperty("camel.component.aws2-kinesis.configuration",
classRef(configurationClass));
}
public static CamelAWSKinesisPropertyFactory basic() {
return new CamelAWSKinesisPropertyFactory()
.withName("CamelAwsKinesisSinkConnector")
.withTasksMax(1)
.withConnectorClass("org.apache.camel.kafkaconnector.awskinesissink.CamelAwskinesissinkSinkConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter")
.setProperty("camel.component.kamelet.location", "kamelets");
}
}
| 9,201 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/kinesis | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/kinesis/sink/CamelSinkAWSKinesisITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.kinesis.sink;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.camel.kafkaconnector.CamelSinkTask;
import org.apache.camel.kafkaconnector.aws.v2.kinesis.common.KinesisUtils;
import org.apache.camel.kafkaconnector.aws.v2.kinesis.common.TestKinesisConfiguration;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.test.CamelSinkTestSupport;
import org.apache.camel.kafkaconnector.common.test.StringMessageProducer;
import org.apache.camel.test.infra.aws.common.AWSCommon;
import org.apache.camel.test.infra.aws.common.services.AWSService;
import org.apache.camel.test.infra.aws2.clients.AWSSDKClientUtils;
import org.apache.camel.test.infra.aws2.services.AWSServiceFactoryWithTimeout;
import org.apache.camel.test.infra.common.TestUtils;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.services.kinesis.KinesisClient;
import software.amazon.awssdk.services.kinesis.model.GetRecordsRequest;
import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse;
import software.amazon.awssdk.services.kinesis.model.Record;
import static org.apache.camel.kafkaconnector.aws.v2.kinesis.common.KinesisUtils.createStream;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@EnabledIfSystemProperty(named = "enable.slow.tests", matches = "true")
public class CamelSinkAWSKinesisITCase extends CamelSinkTestSupport {
@RegisterExtension
public static AWSService awsService = AWSServiceFactoryWithTimeout.createKinesisService();
private static final Logger LOG = LoggerFactory.getLogger(CamelSinkAWSKinesisITCase.class);
private String streamName;
private KinesisClient kinesisClient;
private volatile int received;
private final int expect = 10;
private static class CustomProducer extends StringMessageProducer {
public CustomProducer(String bootstrapServer, String topicName, int count) {
super(bootstrapServer, topicName, count);
}
@Override
public Map<String, String> messageHeaders(String text, int current) {
Map<String, String> headers = new HashMap<>();
headers.put(CamelSinkTask.HEADER_CAMEL_PREFIX + "CamelAwsKinesisPartitionKey",
"partition-" + current);
return headers;
}
}
@Override
protected void consumeMessages(CountDownLatch latch) {
try {
GetRecordsRequest getRecordsRequest = KinesisUtils.getGetRecordsRequest(kinesisClient, streamName);
while (true) {
GetRecordsResponse response = kinesisClient.getRecords(getRecordsRequest);
List<Record> recordList = response.records();
received = recordList.size();
for (Record record : recordList) {
LOG.info("Received record: {}", record.data());
if (received >= expect) {
return;
}
}
if (!waitForData()) {
return;
}
}
} catch (Exception e) {
LOG.error("Error consuming records: {}", e.getMessage(), e);
} finally {
latch.countDown();
}
}
@Override
protected void verifyMessages(CountDownLatch latch) throws InterruptedException {
if (latch.await(110, TimeUnit.SECONDS)) {
assertEquals(expect, received, "Didn't process the expected amount of messages: " + received + " != " + expect);
} else {
fail(String.format("Failed to receive the messages within the specified time: received %d of %d",
received, expect));
}
}
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-aws-kinesis-sink-kafka-connector"};
}
@BeforeEach
public void setUp() {
streamName = AWSCommon.KINESIS_STREAM_BASE_NAME + "-" + TestUtils.randomWithRange(0, 100);
kinesisClient = AWSSDKClientUtils.newKinesisClient();
received = 0;
createStream(kinesisClient, streamName);
}
@Test
@Timeout(120)
public void testBasicSendReceive() throws Exception {
Properties amazonProperties = awsService.getConnectionProperties();
String topicName = getTopicForTest(this);
ConnectorPropertyFactory connectorPropertyFactory = CamelAWSKinesisPropertyFactory
.basic()
.withTopics(topicName)
.withAmazonConfig(amazonProperties)
.withConfiguration(TestKinesisConfiguration.class.getName())
.withStream(streamName);
runTest(connectorPropertyFactory, new CustomProducer(getKafkaService().getBootstrapServers(), topicName, expect));
}
}
| 9,202 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/kinesis | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/kinesis/common/KinesisUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.kinesis.common;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.apache.camel.test.infra.common.TestUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.awscore.exception.AwsServiceException;
import software.amazon.awssdk.core.SdkBytes;
import software.amazon.awssdk.services.kinesis.KinesisClient;
import software.amazon.awssdk.services.kinesis.model.CreateStreamRequest;
import software.amazon.awssdk.services.kinesis.model.CreateStreamResponse;
import software.amazon.awssdk.services.kinesis.model.DeleteStreamRequest;
import software.amazon.awssdk.services.kinesis.model.DeleteStreamResponse;
import software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest;
import software.amazon.awssdk.services.kinesis.model.DescribeStreamResponse;
import software.amazon.awssdk.services.kinesis.model.GetRecordsRequest;
import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse;
import software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest;
import software.amazon.awssdk.services.kinesis.model.GetShardIteratorResponse;
import software.amazon.awssdk.services.kinesis.model.KinesisException;
import software.amazon.awssdk.services.kinesis.model.PutRecordsRequest;
import software.amazon.awssdk.services.kinesis.model.PutRecordsRequestEntry;
import software.amazon.awssdk.services.kinesis.model.PutRecordsResponse;
import software.amazon.awssdk.services.kinesis.model.Record;
import software.amazon.awssdk.services.kinesis.model.ResourceInUseException;
import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException;
import software.amazon.awssdk.services.kinesis.model.Shard;
import static org.junit.jupiter.api.Assertions.fail;
public final class KinesisUtils {
private static final Logger LOG = LoggerFactory.getLogger(KinesisUtils.class);
private KinesisUtils() {
}
private static void doCreateStream(KinesisClient kinesisClient, String streamName) {
CreateStreamRequest request = CreateStreamRequest.builder()
.streamName(streamName)
.shardCount(1)
.build();
try {
CreateStreamResponse response = kinesisClient.createStream(request);
if (response.sdkHttpResponse().isSuccessful()) {
LOG.info("Stream created successfully");
} else {
fail("Failed to create the stream");
}
} catch (KinesisException e) {
LOG.error("Unable to create stream: {}", e.getMessage(), e);
fail("Unable to create stream");
}
}
public static void createStream(KinesisClient kinesisClient, String streamName) {
try {
LOG.info("Checking whether the stream exists already");
int status = getStreamStatus(kinesisClient, streamName);
LOG.info("Kinesis stream check result: {}", status);
} catch (KinesisException e) {
if (LOG.isTraceEnabled()) {
LOG.info("The stream does not exist, auto creating it: {}", e.getMessage(), e);
} else {
LOG.info("The stream does not exist, auto creating it: {}", e.getMessage());
}
doCreateStream(kinesisClient, streamName);
TestUtils.waitFor(() -> {
try {
GetRecordsRequest getRecordsRequest = KinesisUtils.getGetRecordsRequest(kinesisClient, streamName);
GetRecordsResponse response = kinesisClient.getRecords(getRecordsRequest);
List<Record> recordList = response.records();
LOG.debug("Checking for stream creation by reading {} records: SUCCESS!", recordList.size());
return true;
} catch (Exception exc) {
LOG.debug("Checking for stream creation by reading records: FAILURE, retrying..");
return false;
}
});
}
}
private static int getStreamStatus(KinesisClient kinesisClient, String streamName) {
DescribeStreamRequest request = DescribeStreamRequest.builder()
.streamName(streamName)
.build();
DescribeStreamResponse response = kinesisClient.describeStream(request);
return response.sdkHttpResponse().statusCode();
}
public static void doDeleteStream(KinesisClient kinesisClient, String streamName) {
DeleteStreamRequest request = DeleteStreamRequest.builder()
.streamName(streamName)
.build();
DeleteStreamResponse response = kinesisClient.deleteStream(request);
if (response.sdkHttpResponse().isSuccessful()) {
LOG.info("Stream deleted successfully");
} else {
fail("Failed to delete the stream");
}
}
public static void deleteStream(KinesisClient kinesisClient, String streamName) {
try {
LOG.info("Checking whether the stream exists already");
DescribeStreamRequest request = DescribeStreamRequest.builder()
.streamName(streamName)
.build();
DescribeStreamResponse response = kinesisClient.describeStream(request);
if (response.sdkHttpResponse().isSuccessful()) {
LOG.info("Kinesis stream check result");
doDeleteStream(kinesisClient, streamName);
}
} catch (ResourceNotFoundException e) {
LOG.info("The stream does not exist, skipping deletion");
} catch (ResourceInUseException e) {
LOG.info("The stream exist but cannot be deleted because it's in use");
doDeleteStream(kinesisClient, streamName);
}
}
public static void putRecords(KinesisClient kinesisClient, String streamName, int count) {
List<PutRecordsRequestEntry> putRecordsRequestEntryList = new ArrayList<>();
LOG.debug("Adding data to the Kinesis stream");
for (int i = 0; i < count; i++) {
String partition = String.format("partitionKey-%d", i);
PutRecordsRequestEntry putRecordsRequestEntry = PutRecordsRequestEntry.builder()
.data(SdkBytes.fromByteArray(String.valueOf(i).getBytes()))
.partitionKey(partition)
.build();
LOG.debug("Added data {} (as bytes) to partition {}", i, partition);
putRecordsRequestEntryList.add(putRecordsRequestEntry);
}
LOG.debug("Done creating the data records");
PutRecordsRequest putRecordsRequest = PutRecordsRequest
.builder()
.streamName(streamName)
.records(putRecordsRequestEntryList)
.build();
int retries = 5;
do {
try {
PutRecordsResponse response = kinesisClient.putRecords(putRecordsRequest);
if (response.sdkHttpResponse().isSuccessful()) {
LOG.debug("Done putting the data records into the stream");
} else {
fail("Unable to put all the records into the stream");
}
break;
} catch (AwsServiceException e) {
retries--;
/*
This works around the "... Cannot deserialize instance of `...AmazonKinesisException` out of NOT_AVAILABLE token
It may take some time for the local Kinesis backend to be fully up - even though the container is
reportedly up and running. Therefore, it tries a few more times
*/
LOG.trace("Failed to put the records: {}. Retrying in 2 seconds ...", e.getMessage());
if (retries == 0) {
LOG.error("Failed to put the records: {}", e.getMessage(), e);
throw e;
}
try {
Thread.sleep(TimeUnit.SECONDS.toMillis(2));
} catch (InterruptedException ex) {
break;
}
}
} while (retries > 0);
}
private static boolean hasShards(KinesisClient kinesisClient, DescribeStreamRequest describeStreamRequest) {
DescribeStreamResponse streamRes = kinesisClient.describeStream(describeStreamRequest);
return !streamRes.streamDescription().shards().isEmpty();
}
private static List<Shard> getAllShards(KinesisClient kinesisClient, DescribeStreamRequest describeStreamRequest) {
List<Shard> shards = new ArrayList<>();
DescribeStreamResponse streamRes;
do {
streamRes = kinesisClient.describeStream(describeStreamRequest);
shards.addAll(streamRes.streamDescription().shards());
} while (streamRes.streamDescription().hasMoreShards());
return shards;
}
public static GetRecordsRequest getGetRecordsRequest(KinesisClient kinesisClient, String streamName) {
DescribeStreamRequest describeStreamRequest = DescribeStreamRequest.builder()
.streamName(streamName)
.build();
TestUtils.waitFor(() -> hasShards(kinesisClient, describeStreamRequest));
List<Shard> shards = getAllShards(kinesisClient, describeStreamRequest);
GetShardIteratorRequest iteratorRequest = GetShardIteratorRequest.builder()
.streamName(streamName)
.shardId(shards.get(0).shardId())
.shardIteratorType("TRIM_HORIZON")
.build();
GetShardIteratorResponse iteratorResponse = kinesisClient.getShardIterator(iteratorRequest);
return GetRecordsRequest
.builder()
.shardIterator(iteratorResponse.shardIterator())
.build();
}
}
| 9,203 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/kinesis | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/kinesis/common/TestKinesisConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.kinesis.common;
import org.apache.camel.component.aws2.kinesis.Kinesis2Configuration;
import org.apache.camel.test.infra.aws2.clients.AWSSDKClientUtils;
import software.amazon.awssdk.services.kinesis.KinesisClient;
public class TestKinesisConfiguration extends Kinesis2Configuration {
private KinesisClient kinesisClient;
@Override
public KinesisClient getAmazonKinesisClient() {
if (kinesisClient == null) {
kinesisClient = AWSSDKClientUtils.newKinesisClient();
}
return kinesisClient;
}
}
| 9,204 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/clients/AWSSNSClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.clients;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.services.sns.SnsClient;
import software.amazon.awssdk.services.sns.model.CreateTopicRequest;
import software.amazon.awssdk.services.sns.model.CreateTopicResponse;
import software.amazon.awssdk.services.sns.model.ListTopicsRequest;
import software.amazon.awssdk.services.sns.model.ListTopicsResponse;
import software.amazon.awssdk.services.sns.model.SubscribeRequest;
import software.amazon.awssdk.services.sns.model.SubscribeResponse;
import software.amazon.awssdk.services.sns.model.Topic;
public class AWSSNSClient {
private static final Logger LOG = LoggerFactory.getLogger(AWSSNSClient.class);
private final SnsClient sns;
private final int maxNumberOfMessages = 1;
public AWSSNSClient(SnsClient sns) {
this.sns = sns;
}
public List<Topic> getTopics() {
ListTopicsRequest request = ListTopicsRequest.builder()
.build();
ListTopicsResponse result = sns.listTopics(request);
return result.topics();
}
public String createTopic(String topic) {
CreateTopicRequest request = CreateTopicRequest.builder()
.name(topic)
.build();
CreateTopicResponse response = sns.createTopic(request);
if (response.sdkHttpResponse().isSuccessful()) {
return response.topicArn();
}
LOG.warn("Unable to create the topic: {}", response.sdkHttpResponse().statusCode());
return null;
}
public void subscribeSQS(String topicArn, String sqsArn) {
SubscribeRequest request = SubscribeRequest.builder()
.protocol("sqs")
.endpoint(sqsArn)
.returnSubscriptionArn(true)
.topicArn(topicArn)
.build();
SubscribeResponse response = sns.subscribe(request);
if (!response.sdkHttpResponse().isSuccessful()) {
LOG.warn("Unable to create sqs subscription from sqs queue {} to sns topic: {}, status code: {}", sqsArn, topicArn, response.sdkHttpResponse().statusCode());
}
}
}
| 9,205 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/clients/AWSSQSClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.clients;
import java.util.List;
import java.util.function.Predicate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.services.sqs.SqsClient;
import software.amazon.awssdk.services.sqs.model.CreateQueueRequest;
import software.amazon.awssdk.services.sqs.model.CreateQueueResponse;
import software.amazon.awssdk.services.sqs.model.DeleteQueueRequest;
import software.amazon.awssdk.services.sqs.model.DeleteQueueResponse;
import software.amazon.awssdk.services.sqs.model.GetQueueUrlRequest;
import software.amazon.awssdk.services.sqs.model.GetQueueUrlResponse;
import software.amazon.awssdk.services.sqs.model.Message;
import software.amazon.awssdk.services.sqs.model.QueueDoesNotExistException;
import software.amazon.awssdk.services.sqs.model.ReceiveMessageRequest;
import software.amazon.awssdk.services.sqs.model.ReceiveMessageResponse;
import software.amazon.awssdk.services.sqs.model.SendMessageRequest;
public class AWSSQSClient {
private static final Logger LOG = LoggerFactory.getLogger(AWSSQSClient.class);
private final SqsClient sqs;
private final int maxNumberOfMessages = 1;
public AWSSQSClient(SqsClient sqs) {
this.sqs = sqs;
}
public String getQueue(String queue) {
GetQueueUrlRequest getQueueUrlRequest = GetQueueUrlRequest.builder()
.queueName(queue)
.build();
GetQueueUrlResponse getQueueUrlResult = sqs.getQueueUrl(getQueueUrlRequest);
return getQueueUrlResult.queueUrl();
}
public String createQueue(String queue) {
final CreateQueueRequest createFifoQueueRequest = CreateQueueRequest.builder()
.queueName(queue)
.build();
LOG.debug("Queue: {} QueueName: {} createFifoQueueRequest: {}", queue, createFifoQueueRequest.queueName(), createFifoQueueRequest);
CreateQueueResponse response = sqs.createQueue(createFifoQueueRequest);
if (response.sdkHttpResponse().isSuccessful()) {
return response.queueUrl();
}
LOG.warn("Unable to create the queue: {}", response.sdkHttpResponse().statusCode());
return null;
}
public void receive(String queue, Predicate<List<Message>> predicate) {
String queueUrl;
try {
queueUrl = getQueue(queue);
} catch (QueueDoesNotExistException e) {
queueUrl = createQueue(queue);
}
LOG.debug("Consuming messages from {}", queueUrl);
int maxWaitTime = 10;
final ReceiveMessageRequest request = ReceiveMessageRequest.builder()
.queueUrl(queueUrl)
.waitTimeSeconds(maxWaitTime)
.maxNumberOfMessages(maxNumberOfMessages)
.build();
while (true) {
ReceiveMessageResponse response = sqs.receiveMessage(request);
if (!response.sdkHttpResponse().isSuccessful()) {
LOG.warn("Did not receive a success response from SQS: status code {}",
response.sdkHttpResponse().statusCode());
}
List<Message> messages = response.messages();
if (!predicate.test(messages)) {
return;
}
}
}
public void send(String queue, String body) {
String queueUrl;
try {
queueUrl = getQueue(queue);
} catch (QueueDoesNotExistException e) {
queueUrl = createQueue(queue);
}
LOG.debug("Sending messages to {}", queueUrl);
SendMessageRequest request = SendMessageRequest.builder()
.queueUrl(queueUrl)
.messageBody(body)
.build();
sqs.sendMessage(request);
}
public boolean deleteQueue(String queue) {
String queueUrl;
try {
queueUrl = getQueue(queue);
DeleteQueueRequest deleteQueueRequest = DeleteQueueRequest.builder()
.queueUrl(queueUrl)
.build();
DeleteQueueResponse result = sqs.deleteQueue(deleteQueueRequest);
if (!result.sdkHttpResponse().isSuccessful()) {
LOG.warn("Unable to delete queue {}", queue);
return false;
}
return true;
} catch (QueueDoesNotExistException e) {
return true;
}
}
public String getOrCreateQueue(String queue) {
String queueUrl;
try {
queueUrl = getQueue(queue);
} catch (QueueDoesNotExistException e) {
queueUrl = createQueue(queue);
}
return queueUrl;
}
}
| 9,206 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/kms | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/kms/sink/CamelAWSKMSPropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.kms.sink;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.apache.camel.kafkaconnector.aws.v2.common.AWSPropertiesUtils;
import org.apache.camel.kafkaconnector.common.SinkConnectorPropertyFactory;
import org.apache.camel.test.infra.aws.common.AWSConfigs;
public class CamelAWSKMSPropertyFactory extends SinkConnectorPropertyFactory<CamelAWSKMSPropertyFactory> {
public static final Map<String, String> SPRING_STYLE = new HashMap<>();
public static final Map<String, String> KAFKA_STYLE = new HashMap<>();
static {
SPRING_STYLE.put(AWSConfigs.ACCESS_KEY, "camel.component.aws2-kms.accessKey");
SPRING_STYLE.put(AWSConfigs.SECRET_KEY, "camel.component.aws2-kms.secretKey");
SPRING_STYLE.put(AWSConfigs.REGION, "camel.component.aws2-kms.region");
KAFKA_STYLE.put(AWSConfigs.ACCESS_KEY, "camel.component.aws2-kms.access-key");
KAFKA_STYLE.put(AWSConfigs.SECRET_KEY, "camel.component.aws2-kms.secret-key");
KAFKA_STYLE.put(AWSConfigs.REGION, "camel.component.aws2-kms.region");
}
public CamelAWSKMSPropertyFactory withSinkPathLabel(String value) {
return setProperty("camel.sink.path.label", value);
}
public CamelAWSKMSPropertyFactory withSinkEndpointOperation(String value) {
return setProperty("camel.sink.endpoint.operation", value);
}
public CamelAWSKMSPropertyFactory withConfiguration(String value) {
return setProperty("camel.component.aws2-kms.configuration", classRef(value));
}
public CamelAWSKMSPropertyFactory withAmazonConfig(Properties amazonConfigs) {
return withAmazonConfig(amazonConfigs, this.SPRING_STYLE);
}
public CamelAWSKMSPropertyFactory withAmazonConfig(Properties amazonConfigs, Map<String, String> style) {
AWSPropertiesUtils.setCommonProperties(amazonConfigs, style, this);
return this;
}
public static CamelAWSKMSPropertyFactory basic() {
return new CamelAWSKMSPropertyFactory()
.withTasksMax(1)
.withName("CamelAws2kmsSinkConnector")
.withConnectorClass("org.apache.camel.kafkaconnector.aws2kms.CamelAws2kmsSinkConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter");
}
}
| 9,207 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/kms | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/kms/sink/TestKMS2Configuration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.kms.sink;
import org.apache.camel.component.aws2.kms.KMS2Configuration;
import org.apache.camel.test.infra.aws2.clients.AWSSDKClientUtils;
import software.amazon.awssdk.services.kms.KmsClient;
public class TestKMS2Configuration extends KMS2Configuration {
private KmsClient client;
@Override
public KmsClient getKmsClient() {
if (client == null) {
client = AWSSDKClientUtils.newKMSClient();
}
return client;
}
}
| 9,208 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/kms | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/kms/sink/CamelSinkAWSKMSITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.kms.sink;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.camel.kafkaconnector.CamelSinkTask;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.test.CamelSinkTestSupport;
import org.apache.camel.kafkaconnector.common.test.StringMessageProducer;
import org.apache.camel.test.infra.aws.common.services.AWSService;
import org.apache.camel.test.infra.aws2.clients.AWSSDKClientUtils;
import org.apache.camel.test.infra.aws2.services.AWSServiceFactoryWithTimeout;
import org.apache.camel.test.infra.common.TestUtils;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.services.kms.KmsClient;
import software.amazon.awssdk.services.kms.model.DescribeKeyRequest;
import software.amazon.awssdk.services.kms.model.DescribeKeyResponse;
import software.amazon.awssdk.services.kms.model.KeyListEntry;
import software.amazon.awssdk.services.kms.model.ListKeysResponse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@EnabledIfSystemProperty(named = "enable.slow.tests", matches = "true")
public class CamelSinkAWSKMSITCase extends CamelSinkTestSupport {
@RegisterExtension
public static AWSService awsService = AWSServiceFactoryWithTimeout.createKMSService();
private static final Logger LOG = LoggerFactory.getLogger(CamelSinkAWSKMSITCase.class);
private String logicalName;
private KmsClient client;
private volatile int received;
private final int expect = 10;
private static class CustomProducer extends StringMessageProducer {
public CustomProducer(String bootstrapServer, String topicName, int count) {
super(bootstrapServer, topicName, count);
}
@Override
public Map<String, String> messageHeaders(String text, int current) {
Map<String, String> headers = new HashMap<>();
headers.put(CamelSinkTask.HEADER_CAMEL_PREFIX + "CamelAwsKMSKeyId",
String.valueOf(current));
headers.put(CamelSinkTask.HEADER_CAMEL_PREFIX + "CamelAwsKMSDescription",
"test key " + current);
return headers;
}
}
@Override
protected void consumeMessages(CountDownLatch latch) {
try {
while (true) {
ListKeysResponse response = client.listKeys();
List<KeyListEntry> keys = response.keys();
received = keys.size();
for (KeyListEntry entry : keys) {
DescribeKeyRequest describeKeyRequest = DescribeKeyRequest.builder().keyId(entry.keyId()).build();
DescribeKeyResponse describeKeyResponse = client.describeKey(describeKeyRequest);
LOG.info("Received key: {} / {}: {}", entry.keyId(), entry.keyArn(),
describeKeyResponse.keyMetadata().description());
}
if (received >= expect) {
return;
}
if (!waitForData()) {
return;
}
}
} finally {
latch.countDown();
}
}
@Override
protected void verifyMessages(CountDownLatch latch) throws InterruptedException {
if (latch.await(110, TimeUnit.SECONDS)) {
// There is 1 default key from localstack, so the check here is different.
assertTrue(received >= expect, "Should have processed at least : " + expect
+ " keys, but processed only " + received);
} else {
fail(String.format("Failed to receive the messages within the specified time: received %d of %d",
received, expect));
}
}
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-aws2-kms-kafka-connector"};
}
@BeforeEach
public void setUp() {
logicalName = "kms-" + TestUtils.randomWithRange(0, 100);
client = AWSSDKClientUtils.newKMSClient();
received = 0;
}
@Test
@Timeout(120)
public void testBasicSendReceive() throws Exception {
Properties amazonProperties = awsService.getConnectionProperties();
String topicName = getTopicForTest(this);
ConnectorPropertyFactory connectorPropertyFactory = CamelAWSKMSPropertyFactory
.basic()
.withTopics(topicName)
.withAmazonConfig(amazonProperties)
.withConfiguration(TestKMS2Configuration.class.getName())
.withSinkEndpointOperation("createKey")
.withSinkPathLabel(logicalName);
runTest(connectorPropertyFactory, new CustomProducer(getKafkaService().getBootstrapServers(), topicName, expect));
}
}
| 9,209 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/s3 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/s3/source/CamelSourceAWSS3LargeFilesITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.s3.source;
import java.io.File;
import java.io.IOException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import org.apache.camel.kafkaconnector.aws.v2.s3.common.S3Utils;
import org.apache.camel.kafkaconnector.aws.v2.s3.common.TestS3Configuration;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.test.CamelSourceTestSupport;
import org.apache.camel.kafkaconnector.common.test.TestMessageConsumer;
import org.apache.camel.test.infra.aws.common.AWSCommon;
import org.apache.camel.test.infra.aws.common.services.AWSService;
import org.apache.camel.test.infra.aws2.clients.AWSSDKClientUtils;
import org.apache.camel.test.infra.aws2.services.AWSServiceFactory;
import org.apache.camel.test.infra.common.TestUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.services.s3.S3Client;
import static org.apache.camel.kafkaconnector.aws.v2.s3.common.S3Utils.createBucket;
import static org.apache.camel.kafkaconnector.aws.v2.s3.common.S3Utils.deleteBucket;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
/* To run this test create (large) files in the a test directory
(ie.: dd if=/dev/random of=large.test bs=512 count=50000)
Note: they must have the .test extension.
Then run it with:
mvn -DskipIntegrationTests=false -Daws-service.s3.test.directory=/path/to/manual-s3
-Dit.test=CamelSourceAWSS3LargeFilesITCase verify
*/
@EnabledIfSystemProperty(named = "aws-service.s3.test.directory", matches = ".*",
disabledReason = "Manual test that requires the user to provide a directory with files")
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
public class CamelSourceAWSS3LargeFilesITCase extends CamelSourceTestSupport {
@RegisterExtension
public static AWSService service = AWSServiceFactory.createS3Service();
private static final Logger LOG = LoggerFactory.getLogger(CamelSourceAWSS3LargeFilesITCase.class);
private S3Client awsS3Client;
private String bucketName;
private String topicName;
private int expect;
private File[] files;
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-aws-s3-source-kafka-connector"};
}
@BeforeAll
public void setupTestFiles() throws IOException {
String filePath = System.getProperty("aws-service.s3.test.directory");
File baseTestDir = new File(filePath);
files = S3Utils.getFilesToSend(baseTestDir);
expect = files.length;
}
@BeforeEach
public void setUp() {
topicName = getTopicForTest(this);
awsS3Client = AWSSDKClientUtils.newS3Client();
bucketName = AWSCommon.DEFAULT_S3_BUCKET + TestUtils.randomWithRange(0, 100);
try {
createBucket(awsS3Client, bucketName);
} catch (Exception e) {
LOG.error("Unable to create bucket: {}", e.getMessage(), e);
fail("Unable to create bucket");
}
}
@AfterEach
public void tearDown() {
try {
deleteBucket(awsS3Client, bucketName);
} catch (Exception e) {
LOG.warn("Unable to delete bucked: {}", e.getMessage(), e);
}
}
@Override
protected void produceTestData() {
S3Utils.sendFilesFromPath(awsS3Client, bucketName, files);
}
@Override
protected void verifyMessages(TestMessageConsumer<?> consumer) {
int received = consumer.consumedMessages().size();
assertEquals(expect, received, "Didn't process the expected amount of messages");
}
@Test
@Timeout(value = 60, unit = TimeUnit.MINUTES)
public void testBasicSendReceiveWithKafkaStyleLargeFile() throws ExecutionException, InterruptedException {
ConnectorPropertyFactory connectorPropertyFactory = CamelAWSS3PropertyFactory
.basic()
.withKafkaTopic(topicName)
.withConfiguration(TestS3Configuration.class.getName())
.withBucketNameOrArn(bucketName)
.withAmazonConfig(service.getConnectionProperties());
runTest(connectorPropertyFactory, topicName, expect);
}
}
| 9,210 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/s3 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/s3/source/CamelSourceAWSS3ITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.s3.source;
import java.io.File;
import java.io.IOException;
import java.net.URL;
import java.util.concurrent.ExecutionException;
import org.apache.camel.kafkaconnector.aws.v2.s3.common.S3Utils;
import org.apache.camel.kafkaconnector.aws.v2.s3.common.TestS3Configuration;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.test.CamelSourceTestSupport;
import org.apache.camel.kafkaconnector.common.test.TestMessageConsumer;
import org.apache.camel.test.infra.aws.common.AWSCommon;
import org.apache.camel.test.infra.aws.common.services.AWSService;
import org.apache.camel.test.infra.aws2.clients.AWSSDKClientUtils;
import org.apache.camel.test.infra.aws2.services.AWSServiceFactoryWithTimeout;
import org.apache.camel.test.infra.common.TestUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.services.s3.S3Client;
import static org.apache.camel.kafkaconnector.aws.v2.s3.common.S3Utils.createBucket;
import static org.apache.camel.kafkaconnector.aws.v2.s3.common.S3Utils.deleteBucket;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@EnabledIfSystemProperty(named = "enable.slow.tests", matches = "true")
public class CamelSourceAWSS3ITCase extends CamelSourceTestSupport {
@RegisterExtension
public static AWSService service = AWSServiceFactoryWithTimeout.createS3Service();
private static final Logger LOG = LoggerFactory.getLogger(CamelSourceAWSS3ITCase.class);
private S3Client awsS3Client;
private String bucketName;
private String topicName;
private int expect;
private File[] files;
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-aws-s3-source-kafka-connector"};
}
@BeforeAll
public void setupTestFiles() throws IOException {
final URL resourceDir = this.getClass().getResource(".");
final File baseTestDir = new File(resourceDir.getFile());
files = S3Utils.getFilesToSend(baseTestDir);
expect = files.length;
}
@BeforeEach
public void setUp() {
topicName = getTopicForTest(this);
awsS3Client = AWSSDKClientUtils.newS3Client();
bucketName = AWSCommon.DEFAULT_S3_BUCKET + TestUtils.randomWithRange(0, 100);
try {
createBucket(awsS3Client, bucketName);
} catch (Exception e) {
LOG.error("Unable to create bucket: {}", e.getMessage(), e);
fail("Unable to create bucket");
}
}
@AfterEach
public void tearDown() {
try {
deleteBucket(awsS3Client, bucketName);
} catch (Exception e) {
LOG.warn("Unable to delete bucked: {}", e.getMessage(), e);
}
}
@Override
protected void produceTestData() {
S3Utils.sendFilesFromPath(awsS3Client, bucketName, files);
}
@Override
protected void verifyMessages(TestMessageConsumer<?> consumer) {
int received = consumer.consumedMessages().size();
assertEquals(expect, received, "Didn't process the expected amount of messages");
}
@Test
@Timeout(180)
public void testBasicSendReceive() throws ExecutionException, InterruptedException {
ConnectorPropertyFactory connectorPropertyFactory = CamelAWSS3PropertyFactory
.basic()
.withKafkaTopic(topicName)
.withConfiguration(TestS3Configuration.class.getName())
.withBucketNameOrArn(bucketName)
.withAmazonConfig(service.getConnectionProperties());
runTest(connectorPropertyFactory, topicName, expect);
}
@Test
@Timeout(180)
public void testBasicSendReceiveWithMaxMessagesPerPoll() throws ExecutionException, InterruptedException {
ConnectorPropertyFactory connectorPropertyFactory = CamelAWSS3PropertyFactory
.basic()
.withKafkaTopic(topicName)
.withConfiguration(TestS3Configuration.class.getName())
.withMaxMessagesPerPoll(5)
.withBucketNameOrArn(bucketName)
.withAmazonConfig(service.getConnectionProperties());
runTest(connectorPropertyFactory, topicName, expect);
}
}
| 9,211 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/s3 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/s3/source/CamelAWSS3PropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.s3.source;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.apache.camel.kafkaconnector.aws.v2.common.AWSPropertiesUtils;
import org.apache.camel.kafkaconnector.common.SourceConnectorPropertyFactory;
import org.apache.camel.test.infra.aws.common.AWSConfigs;
/**
* Creates the set of properties used by a Camel JMS Sink Connector
*/
final class CamelAWSS3PropertyFactory extends SourceConnectorPropertyFactory<CamelAWSS3PropertyFactory> {
public static final Map<String, String> SPRING_STYLE = new HashMap<>();
static {
SPRING_STYLE.put(AWSConfigs.ACCESS_KEY, "camel.kamelet.aws-s3-source.accessKey");
SPRING_STYLE.put(AWSConfigs.SECRET_KEY, "camel.kamelet.aws-s3-source.secretKey");
SPRING_STYLE.put(AWSConfigs.REGION, "camel.kamelet.aws-s3-source.region");
}
private CamelAWSS3PropertyFactory() {
}
public CamelAWSS3PropertyFactory withAmazonConfig(Properties amazonConfigs) {
return withAmazonConfig(amazonConfigs, this.SPRING_STYLE);
}
public CamelAWSS3PropertyFactory withAmazonConfig(Properties amazonConfigs, Map<String, String> style) {
AWSPropertiesUtils.setCommonProperties(amazonConfigs, style, this);
return this;
}
public CamelAWSS3PropertyFactory withMaxMessagesPerPoll(int value) {
return setProperty("camel.source.endpoint.maxMessagesPerPoll", Integer.toString(value));
}
public CamelAWSS3PropertyFactory withBucketNameOrArn(String bucketNameOrArn) {
return setProperty("camel.kamelet.aws-s3-source.bucketNameOrArn", bucketNameOrArn);
}
public CamelAWSS3PropertyFactory withConfiguration(String configurationClass) {
return setProperty("camel.component.aws2-s3.configuration", classRef(configurationClass));
}
public static CamelAWSS3PropertyFactory basic() {
return new CamelAWSS3PropertyFactory()
.withName("CamelAwss3SourceConnector")
.withTasksMax(1)
.withConnectorClass("org.apache.camel.kafkaconnector.awss3source.CamelAwss3sourceSourceConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter")
.setProperty("camel.component.kamelet.location", "kamelets");
}
}
| 9,212 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/s3 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/s3/sink/CamelSinkAWSS3ITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.s3.sink;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.camel.kafkaconnector.CamelSinkTask;
import org.apache.camel.kafkaconnector.aws.v2.s3.common.S3Utils;
import org.apache.camel.kafkaconnector.aws.v2.s3.common.TestS3Configuration;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.test.CamelSinkTestSupport;
import org.apache.camel.kafkaconnector.common.test.StringMessageProducer;
import org.apache.camel.test.infra.aws.common.AWSCommon;
import org.apache.camel.test.infra.aws.common.services.AWSService;
import org.apache.camel.test.infra.aws2.clients.AWSSDKClientUtils;
import org.apache.camel.test.infra.aws2.services.AWSServiceFactoryWithTimeout;
import org.apache.camel.test.infra.common.TestUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.S3Object;
import static org.apache.camel.kafkaconnector.aws.v2.s3.common.S3Utils.createBucket;
import static org.apache.camel.kafkaconnector.aws.v2.s3.common.S3Utils.deleteBucket;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@EnabledIfSystemProperty(named = "enable.slow.tests", matches = "true")
public class CamelSinkAWSS3ITCase extends CamelSinkTestSupport {
@RegisterExtension
public static AWSService service = AWSServiceFactoryWithTimeout.createS3Service();
private static final Logger LOG = LoggerFactory.getLogger(CamelSinkAWSS3ITCase.class);
private S3Client awsS3Client;
private String bucketName;
private volatile int received;
private int expect = 10;
private class CustomProducer extends StringMessageProducer {
public CustomProducer(String bootstrapServer, String topicName, int count) {
super(bootstrapServer, topicName, count);
}
@Override
public Map<String, String> messageHeaders(String text, int current) {
Map<String, String> headers = new HashMap<>();
headers.put(CamelSinkTask.HEADER_CAMEL_PREFIX + "CamelAwsS3Key",
"file" + current + ".txt");
headers.put(CamelSinkTask.HEADER_CAMEL_PREFIX + "CamelAwsS3BucketName",
bucketName);
return headers;
}
}
@Override
protected void consumeMessages(CountDownLatch latch) {
try {
while (true) {
List<S3Object> objectList = S3Utils.listObjects(awsS3Client, bucketName);
for (S3Object object : objectList) {
LOG.info("Object key: {}", object.key());
}
received = objectList.size();
if (received >= expect) {
return;
}
if (!waitForData()) {
return;
}
}
} finally {
latch.countDown();
}
}
@Override
protected void verifyMessages(CountDownLatch latch) throws InterruptedException {
if (latch.await(110, TimeUnit.SECONDS)) {
assertEquals(expect, received, "Didn't process the expected amount of messages: " + received + " != " + expect);
} else {
fail(String.format("Failed to receive the messages within the specified time: received %d of %d",
received, expect));
}
}
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-aws-s3-sink-kafka-connector"};
}
@BeforeEach
public void setUp() {
awsS3Client = AWSSDKClientUtils.newS3Client();
received = 0;
bucketName = AWSCommon.DEFAULT_S3_BUCKET + TestUtils.randomWithRange(0, 100);
try {
createBucket(awsS3Client, bucketName);
} catch (Exception e) {
LOG.error("Unable to create bucket: {}", e.getMessage(), e);
fail("Unable to create bucket");
}
}
@AfterEach
public void tearDown() {
try {
deleteBucket(awsS3Client, bucketName);
} catch (Exception e) {
LOG.warn("Unable to delete bucked: {}", e.getMessage(), e);
}
}
@Test
@Timeout(180)
public void testBasicSendReceive() throws Exception {
Properties amazonProperties = service.getConnectionProperties();
String topicName = getTopicForTest(this);
ConnectorPropertyFactory testProperties = CamelAWSS3PropertyFactory
.basic()
.withTopics(topicName)
.withConfiguration(TestS3Configuration.class.getName())
.withAmazonConfig(amazonProperties)
.withBucketNameOrArn(bucketName)
.withAutoCreateBucket(true);
runTest(testProperties, new CustomProducer(getKafkaService().getBootstrapServers(), topicName, expect));
}
}
| 9,213 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/s3 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/s3/sink/CamelAWSS3PropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.s3.sink;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.apache.camel.kafkaconnector.aws.v2.common.AWSPropertiesUtils;
import org.apache.camel.kafkaconnector.common.SinkConnectorPropertyFactory;
import org.apache.camel.test.infra.aws.common.AWSConfigs;
public final class CamelAWSS3PropertyFactory extends SinkConnectorPropertyFactory<CamelAWSS3PropertyFactory> {
public static final Map<String, String> SPRING_STYLE = new HashMap<>();
static {
SPRING_STYLE.put(AWSConfigs.ACCESS_KEY, "camel.kamelet.aws-s3-sink.accessKey");
SPRING_STYLE.put(AWSConfigs.SECRET_KEY, "camel.kamelet.aws-s3-sink.secretKey");
SPRING_STYLE.put(AWSConfigs.REGION, "camel.kamelet.aws-s3-sink.region");
}
private CamelAWSS3PropertyFactory() {
}
public CamelAWSS3PropertyFactory withAmazonConfig(Properties amazonConfigs) {
return withAmazonConfig(amazonConfigs, this.SPRING_STYLE);
}
public CamelAWSS3PropertyFactory withAmazonConfig(Properties amazonConfigs, Map<String, String> style) {
AWSPropertiesUtils.setCommonProperties(amazonConfigs, style, this);
return this;
}
public CamelAWSS3PropertyFactory withBucketNameOrArn(String bucketNameOrArn) {
return setProperty("camel.kamelet.aws-s3-sink.bucketNameOrArn", bucketNameOrArn);
}
public CamelAWSS3PropertyFactory withConfiguration(String configurationClass) {
return setProperty("camel.component.aws2-s3.configuration", classRef(configurationClass));
}
public CamelAWSS3PropertyFactory withAutoCreateBucket(boolean value) {
return setProperty("camel.kamelet.aws-s3-sink.autoCreateBucket", value);
}
public static CamelAWSS3PropertyFactory basic() {
return new CamelAWSS3PropertyFactory()
.withTasksMax(1)
.withName("CamelAws2s3SinkConnectorConfig")
.withConnectorClass("org.apache.camel.kafkaconnector.awss3sink.CamelAwss3sinkSinkConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter")
.setProperty("camel.component.kamelet.location", "kamelets");
}
}
| 9,214 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/s3 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/s3/common/TestS3Configuration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.s3.common;
import org.apache.camel.component.aws2.s3.AWS2S3Configuration;
import org.apache.camel.test.infra.aws2.clients.AWSSDKClientUtils;
import software.amazon.awssdk.services.s3.S3Client;
public class TestS3Configuration extends AWS2S3Configuration {
private S3Client s3Client;
private S3Client buildClient() {
return AWSSDKClientUtils.newS3Client();
}
@Override
public S3Client getAmazonS3Client() {
if (s3Client == null) {
s3Client = buildClient();
}
return s3Client;
}
}
| 9,215 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/s3 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/s3/common/S3Utils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.s3.common;
import java.io.File;
import java.io.IOException;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.CreateBucketRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketRequest;
import software.amazon.awssdk.services.s3.model.DeleteObjectRequest;
import software.amazon.awssdk.services.s3.model.ListObjectsV2Request;
import software.amazon.awssdk.services.s3.model.ListObjectsV2Response;
import software.amazon.awssdk.services.s3.model.PutObjectRequest;
import software.amazon.awssdk.services.s3.model.S3Object;
public final class S3Utils {
private static final Logger LOG = LoggerFactory.getLogger(S3Utils.class);
private S3Utils() {
}
public static List<S3Object> listObjects(S3Client s3Client, String bucketName) {
try {
ListObjectsV2Request listObjectsRequest = ListObjectsV2Request.builder()
.bucket(bucketName)
.build();
ListObjectsV2Response objectListing = s3Client.listObjectsV2(listObjectsRequest);
return objectListing.contents();
} catch (Exception e) {
LOG.debug("Error listing: {}", e.getMessage(), e);
throw e;
}
}
/**
* Delete an S3 bucket using the provided client. Coming from AWS documentation:
* https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html
*
* AWS SDK v1 doc for reference:
* https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-or-empty-bucket.html#delete-bucket-sdk-java
* @param s3Client the AmazonS3 client instance used to delete the bucket
* @param bucketName a String containing the bucket name
*/
public static void deleteBucket(S3Client s3Client, String bucketName) {
// Delete all objects from the bucket. This is sufficient
// for non versioned buckets. For versioned buckets, when you attempt to delete objects, Amazon S3 inserts
// delete markers for all objects, but doesn't delete the object versions.
// To delete objects from versioned buckets, delete all of the object versions before deleting
// the bucket (see below for an example).
ListObjectsV2Request listObjectsRequest = ListObjectsV2Request.builder()
.bucket(bucketName)
.build();
ListObjectsV2Response objectListing;
do {
objectListing = s3Client.listObjectsV2(listObjectsRequest);
for (S3Object s3Object : objectListing.contents()) {
s3Client.deleteObject(DeleteObjectRequest.builder().bucket(bucketName).key(s3Object.key()).build());
}
listObjectsRequest = ListObjectsV2Request.builder().bucket(bucketName)
.continuationToken(objectListing.nextContinuationToken())
.build();
} while (objectListing.isTruncated());
s3Client.deleteBucket(DeleteBucketRequest.builder().bucket(bucketName).build());
}
public static void createBucket(S3Client s3Client, String bucketName) {
CreateBucketRequest request = CreateBucketRequest.builder()
.bucket(bucketName)
.build();
s3Client.createBucket(request);
}
public static File[] getFilesToSend(File dir) throws IOException {
File[] files = dir.listFiles(f -> f.getName().endsWith(".test"));
if (files == null) {
throw new IOException("Either I/O error or the path used is not a directory");
}
if (files.length == 0) {
throw new IOException("Not enough files to run the test");
}
return files;
}
public static void sendFilesFromPath(S3Client s3Client, String bucketName, File[] files) {
LOG.debug("Putting S3 objects");
for (File file : files) {
LOG.debug("Trying to read file {}", file.getName());
PutObjectRequest putObjectRequest = PutObjectRequest.builder()
.bucket(bucketName)
.key(file.getName())
.build();
s3Client.putObject(putObjectRequest, file.toPath());
}
}
}
| 9,216 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/sns | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/sns/sink/CamelAWSSNSPropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.sns.sink;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.apache.camel.kafkaconnector.common.SinkConnectorPropertyFactory;
import org.apache.camel.test.infra.aws.common.AWSConfigs;
import software.amazon.awssdk.regions.Region;
/**
* Creates the set of properties used by a Camel JMS Sink Connector
*/
final class CamelAWSSNSPropertyFactory extends SinkConnectorPropertyFactory<CamelAWSSNSPropertyFactory> {
public static final Map<String, String> SPRING_STYLE = new HashMap<>();
static {
SPRING_STYLE.put(AWSConfigs.ACCESS_KEY, "camel.kamelet.aws-sns-sink.accessKey");
SPRING_STYLE.put(AWSConfigs.SECRET_KEY, "camel.kamelet.aws-sns-sink.secretKey");
SPRING_STYLE.put(AWSConfigs.REGION, "camel.kamelet.aws-sns-sink.region");
}
private CamelAWSSNSPropertyFactory() {
}
public CamelAWSSNSPropertyFactory withTopicOrArn(String topicOrArn) {
return setProperty("camel.kamelet.aws-sns-sink.topicNameOrArn", topicOrArn);
}
public CamelAWSSNSPropertyFactory withAmazonConfig(Properties amazonConfigs) {
return withAmazonConfig(amazonConfigs, this.SPRING_STYLE);
}
public CamelAWSSNSPropertyFactory withAutoCreateTopic(boolean value) {
return setProperty("camel.kamelet.aws-sns-sink.autoCreateTopic", value);
}
public CamelAWSSNSPropertyFactory withSubscribeSNStoSQS(String queue) {
return setProperty("camel.component.aws2-sns.subscribeSNStoSQS", "true")
.setProperty("camel.component.aws2-sns.queueUrl", queue);
}
public CamelAWSSNSPropertyFactory withAmazonConfig(Properties amazonConfigs, Map<String, String> style) {
String accessKeyKey = style.get(AWSConfigs.ACCESS_KEY);
String secretKeyKey = style.get(AWSConfigs.SECRET_KEY);
String regionKey = style.get(AWSConfigs.REGION);
setProperty(accessKeyKey, amazonConfigs.getProperty(AWSConfigs.ACCESS_KEY, ""));
setProperty(secretKeyKey, amazonConfigs.getProperty(AWSConfigs.SECRET_KEY, ""));
return setProperty(regionKey, amazonConfigs.getProperty(AWSConfigs.REGION, Region.US_EAST_1.id()));
}
public CamelAWSSNSPropertyFactory withConfiguration(String configurationClass) {
return setProperty("camel.component.aws2-sns.configuration", classRef(configurationClass));
}
public static CamelAWSSNSPropertyFactory basic() {
return new CamelAWSSNSPropertyFactory().withName("CamelAWS2SNSSinkConnector")
.withTasksMax(1)
.withConnectorClass("org.apache.camel.kafkaconnector.awssnssink.CamelAwssnssinkSinkConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter")
.setProperty("camel.component.kamelet.location", "kamelets");
}
}
| 9,217 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/sns | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/sns/sink/TestSnsConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.sns.sink;
import org.apache.camel.component.aws2.sns.Sns2Configuration;
import org.apache.camel.test.infra.aws2.clients.AWSSDKClientUtils;
import software.amazon.awssdk.services.sns.SnsClient;
public class TestSnsConfiguration extends Sns2Configuration {
private SnsClient snsClient;
public TestSnsConfiguration() {
snsClient = AWSSDKClientUtils.newSNSClient();
}
@Override
public void setAmazonSNSClient(SnsClient amazonSNSClient) {
// NO-OP
}
@Override
public SnsClient getAmazonSNSClient() {
return snsClient;
}
}
| 9,218 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/sns | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/sns/sink/CamelSinkAWSSNSITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.sns.sink;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.camel.kafkaconnector.aws.v2.clients.AWSSNSClient;
import org.apache.camel.kafkaconnector.aws.v2.clients.AWSSQSClient;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.test.CamelSinkTestSupport;
import org.apache.camel.test.infra.aws.common.AWSCommon;
import org.apache.camel.test.infra.aws.common.services.AWSService;
import org.apache.camel.test.infra.aws2.clients.AWSSDKClientUtils;
import org.apache.camel.test.infra.aws2.services.AWSServiceFactoryWithTimeout;
import org.apache.camel.test.infra.common.TestUtils;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.services.sqs.model.Message;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@EnabledIfSystemProperty(named = "enable.slow.tests", matches = "true")
public class CamelSinkAWSSNSITCase extends CamelSinkTestSupport {
@RegisterExtension
public static AWSService service = AWSServiceFactoryWithTimeout.createSNSService();
private static final Logger LOG = LoggerFactory.getLogger(CamelSinkAWSSNSITCase.class);
private AWSSQSClient awsSqsClient;
private AWSSNSClient awsSnsClient;
private String queueName;
private String sqsQueueUrl;
private String snsTopicUrl;
private volatile int received;
private final int expect = 10;
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-aws-sns-sink-kafka-connector"};
}
@BeforeEach
public void setUp() {
awsSqsClient = new AWSSQSClient(AWSSDKClientUtils.newSQSClient());
awsSnsClient = new AWSSNSClient(AWSSDKClientUtils.newSNSClient());
queueName = AWSCommon.DEFAULT_SQS_QUEUE_FOR_SNS + "-" + TestUtils.randomWithRange(0, 1000);
sqsQueueUrl = awsSqsClient.createQueue(queueName);
LOG.info("Created SQS queue {}", sqsQueueUrl);
snsTopicUrl = awsSnsClient.createTopic(queueName);
LOG.info("Created SNS topic {}", snsTopicUrl);
awsSnsClient.subscribeSQS(snsTopicUrl, sqsQueueUrl);
LOG.info("Created subscription between SQS queue {} and SNS topic {}", sqsQueueUrl, snsTopicUrl);
received = 0;
}
@Override
protected void verifyMessages(CountDownLatch latch) throws InterruptedException {
if (latch.await(120, TimeUnit.SECONDS)) {
assertEquals(expect, received,
"Didn't process the expected amount of messages: " + received + " != " + expect);
} else {
fail("Failed to receive the messages within the specified time");
}
}
private boolean checkMessages(List<Message> messages) {
for (Message message : messages) {
LOG.info("Received: {}", message.body());
received++;
}
if (received == expect) {
return false;
}
return true;
}
protected void consumeMessages(CountDownLatch latch) {
try {
awsSqsClient.receive(queueName, this::checkMessages);
} catch (Throwable t) {
LOG.error("Failed to consume messages: {}", t.getMessage(), t);
fail(t.getMessage());
} finally {
latch.countDown();
}
}
@Test
@Timeout(value = 90)
public void testBasicSendReceive() throws Exception {
Properties amazonProperties = service.getConnectionProperties();
String topicName = getTopicForTest(this.getClass());
ConnectorPropertyFactory connectorPropertyFactory = CamelAWSSNSPropertyFactory
.basic()
.withName("CamelAWSSNSSinkConnectorDefault")
.withTopics(topicName)
.withTopicOrArn(queueName)
.withConfiguration(TestSnsConfiguration.class.getName())
.withAutoCreateTopic(true)
.withAmazonConfig(amazonProperties);
runTest(connectorPropertyFactory, topicName, expect);
}
}
| 9,219 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/sqs | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/sqs/source/CamelAWSSQSPropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.sqs.source;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.apache.camel.kafkaconnector.aws.v2.common.AWSPropertiesUtils;
import org.apache.camel.kafkaconnector.common.SourceConnectorPropertyFactory;
import org.apache.camel.test.infra.aws.common.AWSConfigs;
/**
* Creates the set of properties used by a Camel JMS Sink Connector
*/
final class CamelAWSSQSPropertyFactory extends SourceConnectorPropertyFactory<CamelAWSSQSPropertyFactory> {
public static final Map<String, String> SPRING_STYLE = new HashMap<>();
static {
SPRING_STYLE.put(AWSConfigs.ACCESS_KEY, "camel.kamelet.aws-sqs-source.accessKey");
SPRING_STYLE.put(AWSConfigs.SECRET_KEY, "camel.kamelet.aws-sqs-source.secretKey");
SPRING_STYLE.put(AWSConfigs.REGION, "camel.kamelet.aws-sqs-source.region");
SPRING_STYLE.put(AWSConfigs.PROTOCOL, "camel.kamelet.aws-sqs-source.protocol");
SPRING_STYLE.put(AWSConfigs.AMAZON_AWS_HOST, "camel.kamelet.aws-sqs-source.amazonAWSHost");
}
private CamelAWSSQSPropertyFactory() {
}
public CamelAWSSQSPropertyFactory withAmazonConfig(Properties amazonConfigs) {
return withAmazonConfig(amazonConfigs, this.SPRING_STYLE);
}
public CamelAWSSQSPropertyFactory withAmazonConfig(Properties amazonConfigs, Map<String, String> style) {
AWSPropertiesUtils.setCommonProperties(amazonConfigs, style, this);
return this;
}
public CamelAWSSQSPropertyFactory withQueueOrArn(String queueOrArn) {
return setProperty("camel.kamelet.aws-sqs-source.queueNameOrArn", queueOrArn);
}
public static CamelAWSSQSPropertyFactory basic() {
return new CamelAWSSQSPropertyFactory()
.withName("CamelAws2sqsSourceConnector")
.withTasksMax(1)
.withConnectorClass("org.apache.camel.kafkaconnector.awssqssource.CamelAwssqssourceSourceConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter")
.setProperty("camel.component.kamelet.location", "kamelets");
}
}
| 9,220 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/sqs | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/sqs/source/CamelSourceAWSSQSITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.sqs.source;
import java.util.concurrent.ExecutionException;
import org.apache.camel.kafkaconnector.aws.v2.clients.AWSSQSClient;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.test.CamelSourceTestSupport;
import org.apache.camel.kafkaconnector.common.test.TestMessageConsumer;
import org.apache.camel.test.infra.aws.common.AWSCommon;
import org.apache.camel.test.infra.aws.common.services.AWSService;
import org.apache.camel.test.infra.aws2.clients.AWSSDKClientUtils;
import org.apache.camel.test.infra.aws2.services.AWSServiceFactoryWithTimeout;
import org.apache.camel.test.infra.common.TestUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@EnabledIfSystemProperty(named = "enable.slow.tests", matches = "true")
public class CamelSourceAWSSQSITCase extends CamelSourceTestSupport {
@RegisterExtension
public static AWSService service = AWSServiceFactoryWithTimeout.createSQSService();
private static final Logger LOG = LoggerFactory.getLogger(CamelSourceAWSSQSITCase.class);
private AWSSQSClient awssqsClient;
private String queueName;
private String topicName;
private final int expect = 10;
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-aws-sqs-source-kafka-connector"};
}
@BeforeEach
public void setUp() {
topicName = getTopicForTest(this);
awssqsClient = new AWSSQSClient(AWSSDKClientUtils.newSQSClient());
queueName = AWSCommon.BASE_SQS_QUEUE_NAME + "-" + TestUtils.randomWithRange(0, 1000);
// TODO: this is a work-around for CAMEL-15833
awssqsClient.createQueue(queueName);
}
@AfterEach
public void tearDown() {
if (!awssqsClient.deleteQueue(queueName)) {
fail("Failed to delete queue");
}
}
@Override
protected void produceTestData() {
LOG.debug("Sending SQS messages");
for (int i = 0; i < expect; i++) {
awssqsClient.send(queueName, "Source test message " + i);
}
LOG.debug("Done sending SQS messages");
}
@Override
protected void verifyMessages(TestMessageConsumer<?> consumer) {
int received = consumer.consumedMessages().size();
assertEquals(received, expect, "Didn't process the expected amount of messages");
}
@Test
@Timeout(90)
public void testBasicSendReceive() throws ExecutionException, InterruptedException {
ConnectorPropertyFactory connectorPropertyFactory = CamelAWSSQSPropertyFactory
.basic()
.withKafkaTopic(topicName)
.withQueueOrArn(queueName)
.withAmazonConfig(service.getConnectionProperties());
runTest(connectorPropertyFactory, topicName, expect);
}
}
| 9,221 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/sqs | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/sqs/sink/CamelAWSSQSPropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.sqs.sink;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.apache.camel.kafkaconnector.aws.v2.common.AWSPropertiesUtils;
import org.apache.camel.kafkaconnector.common.SinkConnectorPropertyFactory;
import org.apache.camel.test.infra.aws.common.AWSConfigs;
/**
* Creates the set of properties used by a Camel JMS Sink Connector
*/
final class CamelAWSSQSPropertyFactory extends SinkConnectorPropertyFactory<CamelAWSSQSPropertyFactory> {
public static final Map<String, String> SPRING_STYLE = new HashMap<>();
static {
SPRING_STYLE.put(AWSConfigs.ACCESS_KEY, "camel.kamelet.aws-sqs-sink.accessKey");
SPRING_STYLE.put(AWSConfigs.SECRET_KEY, "camel.kamelet.aws-sqs-sink.secretKey");
SPRING_STYLE.put(AWSConfigs.REGION, "camel.kamelet.aws-sqs-sink.region");
SPRING_STYLE.put(AWSConfigs.PROTOCOL, "camel.kamelet.aws-sqs-sink.protocol");
SPRING_STYLE.put(AWSConfigs.AMAZON_AWS_HOST, "camel.kamelet.aws-sqs-sink.amazonAWSHost");
}
private CamelAWSSQSPropertyFactory() {
}
public CamelAWSSQSPropertyFactory withAmazonConfig(Properties amazonConfigs) {
return withAmazonConfig(amazonConfigs, this.SPRING_STYLE);
}
public CamelAWSSQSPropertyFactory withAmazonConfig(Properties amazonConfigs, Map<String, String> style) {
AWSPropertiesUtils.setCommonProperties(amazonConfigs, style, this);
return this;
}
public CamelAWSSQSPropertyFactory withQueueNameOrArn(String queueNameOrArn) {
return setProperty("camel.kamelet.aws-sqs-sink.queueNameOrArn", queueNameOrArn);
}
public CamelAWSSQSPropertyFactory withAutoCreateQueue(boolean value) {
return setProperty("camel.kamelet.aws-sqs-sink.autoCreateQueue", value);
}
public static CamelAWSSQSPropertyFactory basic() {
return new CamelAWSSQSPropertyFactory()
.withName("CamelAws2sqsSinkConnector")
.withTasksMax(1)
.withConnectorClass("org.apache.camel.kafkaconnector.awssqssink.CamelAwssqssinkSinkConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter")
.setProperty("camel.component.kamelet.location", "kamelets");
}
}
| 9,222 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/sqs | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/sqs/sink/CamelSinkAWSSQSITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.sqs.sink;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.camel.kafkaconnector.aws.v2.clients.AWSSQSClient;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.test.CamelSinkTestSupport;
import org.apache.camel.test.infra.aws.common.AWSCommon;
import org.apache.camel.test.infra.aws.common.services.AWSService;
import org.apache.camel.test.infra.aws2.clients.AWSSDKClientUtils;
import org.apache.camel.test.infra.aws2.services.AWSServiceFactoryWithTimeout;
import org.apache.camel.test.infra.common.TestUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.services.sqs.model.Message;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@EnabledIfSystemProperty(named = "enable.slow.tests", matches = "true")
public class CamelSinkAWSSQSITCase extends CamelSinkTestSupport {
@RegisterExtension
public static AWSService awsService = AWSServiceFactoryWithTimeout.createSQSService();
private static final Logger LOG = LoggerFactory.getLogger(CamelSinkAWSSQSITCase.class);
private AWSSQSClient awssqsClient;
private String queueName;
private volatile int received;
private final int expect = 10;
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-aws-sqs-sink-kafka-connector"};
}
@BeforeEach
public void setUp() {
awssqsClient = new AWSSQSClient(AWSSDKClientUtils.newSQSClient());
queueName = AWSCommon.BASE_SQS_QUEUE_NAME + "-" + TestUtils.randomWithRange(0, 1000);
String queueUrl = awssqsClient.getOrCreateQueue(queueName);
LOG.debug("Using queue {} for the test", queueUrl);
received = 0;
}
@AfterEach
public void tearDown() {
if (!awssqsClient.deleteQueue(queueName)) {
fail("Failed to delete queue");
}
}
@Override
protected void verifyMessages(CountDownLatch latch) throws InterruptedException {
if (latch.await(110, TimeUnit.SECONDS)) {
assertEquals(expect, received, "Didn't process the expected amount of messages: " + received + " != " + expect);
} else {
fail(String.format("Failed to receive the messages within the specified time: received %d of %d",
received, expect));
}
}
private boolean checkMessages(List<Message> messages) {
for (Message message : messages) {
LOG.info("Received: {}", message.body());
received++;
}
if (received == expect) {
return false;
}
return true;
}
protected void consumeMessages(CountDownLatch latch) {
try {
awssqsClient.receive(queueName, this::checkMessages);
} catch (Throwable t) {
LOG.error("Failed to consume messages: {}", t.getMessage(), t);
} finally {
latch.countDown();
}
}
@Test
@Timeout(value = 120)
public void testBasicSendReceive() {
try {
Properties amazonProperties = awsService.getConnectionProperties();
String topicName = getTopicForTest(this);
ConnectorPropertyFactory testProperties = CamelAWSSQSPropertyFactory
.basic()
.withName("CamelAwssqsSinkConnectorSpringBootStyle")
.withTopics(topicName)
.withAmazonConfig(amazonProperties)
.withAutoCreateQueue(true)
.withQueueNameOrArn(queueName);
runTest(testProperties, topicName, expect);
} catch (Exception e) {
LOG.error("Amazon SQS test failed: {}", e.getMessage(), e);
fail(e.getMessage());
}
}
}
| 9,223 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/common/AWSPropertiesUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.common;
import java.util.Map;
import java.util.Properties;
import org.apache.camel.kafkaconnector.common.BasicConnectorPropertyFactory;
import org.apache.camel.test.infra.aws.common.AWSConfigs;
import software.amazon.awssdk.regions.Region;
public final class AWSPropertiesUtils {
private AWSPropertiesUtils() {
}
public static void setCommonProperties(Properties amazonConfigs, Map<String, String> style,
BasicConnectorPropertyFactory<?> propertyFactory) {
String accessKeyKey = style.get(AWSConfigs.ACCESS_KEY);
String secretKeyKey = style.get(AWSConfigs.SECRET_KEY);
String regionKey = style.get(AWSConfigs.REGION);
String protocolKey = style.get(AWSConfigs.PROTOCOL);
String hostKey = style.get(AWSConfigs.AMAZON_AWS_HOST);
propertyFactory.setProperty(accessKeyKey,
amazonConfigs.getProperty(AWSConfigs.ACCESS_KEY, ""));
propertyFactory.setProperty(secretKeyKey,
amazonConfigs.getProperty(AWSConfigs.SECRET_KEY, ""));
propertyFactory.setProperty(regionKey,
amazonConfigs.getProperty(AWSConfigs.REGION, Region.US_EAST_1.toString()));
String protocol = amazonConfigs.getProperty(AWSConfigs.PROTOCOL, "");
if (protocolKey != null && !protocolKey.isEmpty()) {
if (protocol != null && !protocol.isEmpty()) {
propertyFactory.setProperty(protocolKey, protocol);
}
}
if (hostKey != null && !hostKey.isEmpty()) {
String amazonAwsHost = amazonConfigs.getProperty(AWSConfigs.AMAZON_AWS_HOST, "");
if (amazonAwsHost != null && !amazonAwsHost.isEmpty()) {
propertyFactory.setProperty(hostKey, amazonAwsHost);
}
}
}
}
| 9,224 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/iam | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/iam/sink/CamelSinkAWSIAMITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.iam.sink;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.camel.kafkaconnector.CamelSinkTask;
import org.apache.camel.kafkaconnector.aws.v2.cw.sink.TestCloudWatchConfiguration;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.test.CamelSinkTestSupport;
import org.apache.camel.kafkaconnector.common.test.StringMessageProducer;
import org.apache.camel.test.infra.aws.common.services.AWSService;
import org.apache.camel.test.infra.aws2.clients.AWSSDKClientUtils;
import org.apache.camel.test.infra.aws2.services.AWSServiceFactoryWithTimeout;
import org.apache.camel.test.infra.common.TestUtils;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.services.iam.IamClient;
import software.amazon.awssdk.services.iam.model.ListUsersResponse;
import software.amazon.awssdk.services.iam.model.User;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@EnabledIfSystemProperty(named = "enable.slow.tests", matches = "true")
public class CamelSinkAWSIAMITCase extends CamelSinkTestSupport {
@RegisterExtension
public static AWSService awsService = AWSServiceFactoryWithTimeout.createIAMService();
private static final Logger LOG = LoggerFactory.getLogger(CamelSinkAWSIAMITCase.class);
private IamClient client;
private String logicalName;
private volatile int received;
private final int expect = 10;
private static class CustomProducer extends StringMessageProducer {
public CustomProducer(String bootstrapServer, String topicName, int count) {
super(bootstrapServer, topicName, count);
}
@Override
public Map<String, String> messageHeaders(String text, int current) {
Map<String, String> headers = new HashMap<>();
headers.put(CamelSinkTask.HEADER_CAMEL_PREFIX + "CamelAwsIAMUsername",
"username-" + current);
return headers;
}
}
@Override
protected void consumeMessages(CountDownLatch latch) {
try {
while (true) {
ListUsersResponse response = client.listUsers();
List<User> users = response.users();
received = users.size();
for (User user : users) {
LOG.info("Received user: {}", user.userName());
if (received >= expect) {
return;
}
}
if (!waitForData()) {
return;
}
}
} finally {
latch.countDown();
}
}
@Override
protected void verifyMessages(CountDownLatch latch) throws InterruptedException {
if (latch.await(110, TimeUnit.SECONDS)) {
assertEquals(expect, received, "Didn't process the expected amount of messages: " + received + " != " + expect);
} else {
fail(String.format("Failed to receive the messages within the specified time: received %d of %d",
received, expect));
}
}
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-aws2-iam-kafka-connector"};
}
@BeforeEach
public void setUp() {
client = AWSSDKClientUtils.newIAMClient();
logicalName = "iam-" + TestUtils.randomWithRange(1, 100);
received = 0;
}
@Test
@Timeout(90)
public void testBasicSendReceive() throws Exception {
Properties amazonProperties = awsService.getConnectionProperties();
String topicName = getTopicForTest(this);
ConnectorPropertyFactory testProperties = CamelAWSIAMPropertyFactory
.basic()
.withTopics(topicName)
.withConfiguration(TestCloudWatchConfiguration.class.getName())
.withAmazonConfig(amazonProperties)
.withSinkPathLabel(logicalName)
.withConfiguration(TestIAMConfiguration.class.getName())
.withSinkEndpointOperation("createUser");
runTest(testProperties, new CustomProducer(getKafkaService().getBootstrapServers(), topicName, expect));
}
}
| 9,225 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/iam | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/iam/sink/TestIAMConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.iam.sink;
import org.apache.camel.component.aws2.iam.IAM2Configuration;
import org.apache.camel.test.infra.aws2.clients.AWSSDKClientUtils;
import software.amazon.awssdk.services.iam.IamClient;
public class TestIAMConfiguration extends IAM2Configuration {
private IamClient client;
@Override
public IamClient getIamClient() {
if (client == null) {
client = AWSSDKClientUtils.newIAMClient();
}
return client;
}
}
| 9,226 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/iam | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/iam/sink/CamelAWSIAMPropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.iam.sink;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.apache.camel.kafkaconnector.aws.v2.common.AWSPropertiesUtils;
import org.apache.camel.kafkaconnector.common.SinkConnectorPropertyFactory;
import org.apache.camel.test.infra.aws.common.AWSConfigs;
public class CamelAWSIAMPropertyFactory extends SinkConnectorPropertyFactory<CamelAWSIAMPropertyFactory> {
public static final Map<String, String> SPRING_STYLE = new HashMap<>();
public static final Map<String, String> KAFKA_STYLE = new HashMap<>();
static {
SPRING_STYLE.put(AWSConfigs.ACCESS_KEY, "camel.component.aws2-iam.accessKey");
SPRING_STYLE.put(AWSConfigs.SECRET_KEY, "camel.component.aws2-iam.secretKey");
SPRING_STYLE.put(AWSConfigs.REGION, "camel.component.aws2-iam.region");
KAFKA_STYLE.put(AWSConfigs.ACCESS_KEY, "camel.component.aws2-iam.access-key");
KAFKA_STYLE.put(AWSConfigs.SECRET_KEY, "camel.component.aws2-iam.secret-key");
KAFKA_STYLE.put(AWSConfigs.REGION, "camel.component.aws2-iam.region");
}
public CamelAWSIAMPropertyFactory withSinkPathLabel(String value) {
return setProperty("camel.sink.path.label", value);
}
public CamelAWSIAMPropertyFactory withSinkEndpointOperation(String value) {
return setProperty("camel.sink.endpoint.operation", value);
}
public CamelAWSIAMPropertyFactory withConfiguration(String value) {
return setProperty("camel.component.aws2-iam.configuration", classRef(value));
}
public CamelAWSIAMPropertyFactory withAmazonConfig(Properties amazonConfigs) {
return withAmazonConfig(amazonConfigs, this.SPRING_STYLE);
}
public CamelAWSIAMPropertyFactory withAmazonConfig(Properties amazonConfigs, Map<String, String> style) {
AWSPropertiesUtils.setCommonProperties(amazonConfigs, style, this);
return this;
}
public static CamelAWSIAMPropertyFactory basic() {
return new CamelAWSIAMPropertyFactory()
.withTasksMax(1)
.withName("CamelAws2iamSinkConnector")
.withConnectorClass("org.apache.camel.kafkaconnector.aws2iam.CamelAws2iamSinkConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter");
}
}
| 9,227 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/lambda | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/lambda/sink/CamelAWSLambdaPropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.lambda.sink;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.apache.camel.kafkaconnector.aws.v2.common.AWSPropertiesUtils;
import org.apache.camel.kafkaconnector.common.SinkConnectorPropertyFactory;
import org.apache.camel.test.infra.aws.common.AWSConfigs;
public class CamelAWSLambdaPropertyFactory extends SinkConnectorPropertyFactory<CamelAWSLambdaPropertyFactory> {
public static final Map<String, String> SPRING_STYLE = new HashMap<>();
static {
SPRING_STYLE.put(AWSConfigs.ACCESS_KEY, "camel.kamelet.aws-lambda-sink.accessKey");
SPRING_STYLE.put(AWSConfigs.SECRET_KEY, "camel.kamelet.aws-lambda-sink.secretKey");
SPRING_STYLE.put(AWSConfigs.REGION, "camel.kamelet.aws-lambda-sink.region");
}
public CamelAWSLambdaPropertyFactory withAmazonConfig(Properties amazonConfigs) {
return withAmazonConfig(amazonConfigs, this.SPRING_STYLE);
}
public CamelAWSLambdaPropertyFactory withAmazonConfig(Properties amazonConfigs, Map<String, String> style) {
AWSPropertiesUtils.setCommonProperties(amazonConfigs, style, this);
return this;
}
public CamelAWSLambdaPropertyFactory withFunction(String value) {
return setProperty("camel.kamelet.aws-lambda-sink.function", value);
}
public CamelAWSLambdaPropertyFactory withConfiguration(String value) {
return setProperty("camel.component.aws2-lambda.configuration", classRef(value));
}
public static CamelAWSLambdaPropertyFactory basic() {
return new CamelAWSLambdaPropertyFactory()
.withTasksMax(1)
.withName("CamelAws2lambdaSinkConnector")
.withConnectorClass("org.apache.camel.kafkaconnector.awslambdasink.CamelAwslambdasinkSinkConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter")
.setProperty("camel.component.kamelet.location", "kamelets");
}
}
| 9,228 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/lambda | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/lambda/sink/CamelSinkLambdaITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.lambda.sink;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
import org.apache.camel.kafkaconnector.CamelSinkTask;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.clients.kafka.ByteProducerPropertyFactory;
import org.apache.camel.kafkaconnector.common.clients.kafka.ConsumerPropertyFactory;
import org.apache.camel.kafkaconnector.common.clients.kafka.DefaultConsumerPropertyFactory;
import org.apache.camel.kafkaconnector.common.clients.kafka.KafkaClient;
import org.apache.camel.kafkaconnector.common.clients.kafka.ProducerPropertyFactory;
import org.apache.camel.kafkaconnector.common.test.AbstractTestMessageProducer;
import org.apache.camel.kafkaconnector.common.test.CamelSinkTestSupport;
import org.apache.camel.test.infra.aws.common.services.AWSService;
import org.apache.camel.test.infra.aws2.clients.AWSSDKClientUtils;
import org.apache.camel.test.infra.aws2.services.AWSServiceFactoryWithTimeout;
import org.apache.camel.test.infra.common.TestUtils;
import org.apache.kafka.common.utils.Bytes;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.services.lambda.LambdaClient;
import software.amazon.awssdk.services.lambda.model.FunctionConfiguration;
import software.amazon.awssdk.services.lambda.model.ListFunctionsResponse;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@EnabledIfSystemProperty(named = "enable.slow.tests", matches = "true")
public class CamelSinkLambdaITCase extends CamelSinkTestSupport {
@RegisterExtension
public static AWSService awsService = AWSServiceFactoryWithTimeout.createLambdaService();
private static final Logger LOG = LoggerFactory.getLogger(CamelSinkLambdaITCase.class);
private LambdaClient client;
private String function;
private volatile int received;
private final int expect = 1;
private static class CustomProducer extends AbstractTestMessageProducer<Bytes> {
public CustomProducer(String bootstrapServer, String topicName, int count) {
super(bootstrapServer, topicName, count);
}
@Override
protected KafkaClient<String, Bytes> createKafkaClient(String bootstrapServer) {
ConsumerPropertyFactory consumerPropertyFactory = new DefaultConsumerPropertyFactory(bootstrapServer);
ProducerPropertyFactory producerPropertyFactory = new ByteProducerPropertyFactory(bootstrapServer);
return new KafkaClient<>(consumerPropertyFactory, producerPropertyFactory);
}
@Override
public Bytes testMessageContent(int current) {
try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
ZipOutputStream zip = new ZipOutputStream(out);
ZipEntry entry = new ZipEntry("test");
zip.putNextEntry(entry);
zip.write("hello test".getBytes());
zip.closeEntry();
zip.finish();
return Bytes.wrap(out.toByteArray());
} catch (IOException e) {
LOG.error("I/O error writing zip entry: {}", e.getMessage(), e);
fail("I/O error writing zip entry");
}
return null;
}
@Override
public Map<String, String> messageHeaders(Bytes text, int current) {
Map<String, String> headers = new HashMap<>();
headers.put(CamelSinkTask.HEADER_CAMEL_PREFIX + "CamelAwsLambdaOperation",
"createFunction");
headers.put(CamelSinkTask.HEADER_CAMEL_PREFIX + "CamelAwsLambdaRole",
"admin");
headers.put(CamelSinkTask.HEADER_CAMEL_PREFIX + "CamelAwsLambdaRuntime",
"java8");
headers.put(CamelSinkTask.HEADER_CAMEL_PREFIX + "CamelAwsLambdaHandler",
"org.apache.camel.kafkaconnector.SomeHandler");
return headers;
}
}
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-aws-lambda-sink-kafka-connector"};
}
@BeforeEach
public void setUp() {
client = AWSSDKClientUtils.newLambdaClient();
function = "function-" + TestUtils.randomWithRange(0, 100);
LOG.debug("Using function {} for the test", function);
}
@Override
protected void consumeMessages(CountDownLatch latch) {
try {
while (true) {
ListFunctionsResponse response = client.listFunctions();
for (FunctionConfiguration functionConfiguration : response.functions()) {
LOG.info("Retrieved function {}", functionConfiguration.functionName());
if (functionConfiguration.functionName().equals(function)) {
received = 1;
return;
}
}
if (!waitForData()) {
break;
}
}
} finally {
latch.countDown();
}
}
@Override
protected void verifyMessages(CountDownLatch latch) throws InterruptedException {
if (latch.await(110, TimeUnit.SECONDS)) {
assertEquals(expect, received, "Didn't process the expected amount of messages: " + received + " != " + expect);
} else {
fail(String.format("Failed to receive the messages within the specified time: received %d of %d",
received, expect));
}
}
@Test
@Timeout(90)
public void testBasicSendReceive() throws Exception {
Properties amazonProperties = awsService.getConnectionProperties();
String topicName = getTopicForTest(this);
ConnectorPropertyFactory testProperties = CamelAWSLambdaPropertyFactory
.basic()
.withTopics(topicName)
.withConfiguration(TestLambda2Configuration.class.getName())
.withAmazonConfig(amazonProperties)
.withFunction(function);
runTest(testProperties, new CustomProducer(getKafkaService().getBootstrapServers(), topicName, expect));
}
}
| 9,229 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/lambda | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/lambda/sink/TestLambda2Configuration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.lambda.sink;
import org.apache.camel.component.aws2.lambda.Lambda2Configuration;
import org.apache.camel.test.infra.aws2.clients.AWSSDKClientUtils;
import software.amazon.awssdk.services.lambda.LambdaClient;
public class TestLambda2Configuration extends Lambda2Configuration {
private LambdaClient client;
@Override
public LambdaClient getAwsLambdaClient() {
if (client == null) {
client = AWSSDKClientUtils.newLambdaClient();
}
return client;
}
}
| 9,230 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/cw | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/cw/sink/CamelAWSCWPropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.cw.sink;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.apache.camel.kafkaconnector.aws.v2.common.AWSPropertiesUtils;
import org.apache.camel.kafkaconnector.common.SinkConnectorPropertyFactory;
import org.apache.camel.test.infra.aws.common.AWSConfigs;
public class CamelAWSCWPropertyFactory extends SinkConnectorPropertyFactory<CamelAWSCWPropertyFactory> {
public static final Map<String, String> SPRING_STYLE = new HashMap<>();
static {
SPRING_STYLE.put(AWSConfigs.ACCESS_KEY, "camel.kamelet.aws-cloudwatch-sink.accessKey");
SPRING_STYLE.put(AWSConfigs.SECRET_KEY, "camel.kamelet.aws-cloudwatch-sink.secretKey");
SPRING_STYLE.put(AWSConfigs.REGION, "camel.kamelet.aws-cloudwatch-sink.region");
}
public CamelAWSCWPropertyFactory withNamespace(String value) {
return setProperty("camel.kamelet.aws-cloudwatch-sink.cwNamespace", value);
}
public CamelAWSCWPropertyFactory withConfiguration(String value) {
return setProperty("camel.component.aws2-cw.configuration", classRef(value));
}
public CamelAWSCWPropertyFactory withAmazonConfig(Properties amazonConfigs) {
return withAmazonConfig(amazonConfigs, this.SPRING_STYLE);
}
public CamelAWSCWPropertyFactory withAmazonConfig(Properties amazonConfigs, Map<String, String> style) {
AWSPropertiesUtils.setCommonProperties(amazonConfigs, style, this);
return this;
}
public static CamelAWSCWPropertyFactory basic() {
return new CamelAWSCWPropertyFactory()
.withTasksMax(1)
.withName("CamelAWSCWConnector")
.withConnectorClass("org.apache.camel.kafkaconnector.awscloudwatchsink.CamelAwscloudwatchsinkSinkConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter")
.setProperty("camel.sink.contentLogLevel", "INFO")
.setProperty("camel.component.kamelet.location", "kamelets");
}
}
| 9,231 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/cw | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/cw/sink/TestCloudWatchConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.cw.sink;
import org.apache.camel.component.aws2.cw.Cw2Configuration;
import org.apache.camel.test.infra.aws2.clients.AWSSDKClientUtils;
import software.amazon.awssdk.services.cloudwatch.CloudWatchClient;
public class TestCloudWatchConfiguration extends Cw2Configuration {
private CloudWatchClient client;
@Override
public CloudWatchClient getAmazonCwClient() {
if (client == null) {
client = AWSSDKClientUtils.newCloudWatchClient();
}
return client;
}
}
| 9,232 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/cw | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/cw/sink/CamelSinkAWSCWITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.cw.sink;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.camel.kafkaconnector.CamelSinkTask;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.test.CamelSinkTestSupport;
import org.apache.camel.kafkaconnector.common.test.StringMessageProducer;
import org.apache.camel.test.infra.aws.common.services.AWSService;
import org.apache.camel.test.infra.aws2.clients.AWSSDKClientUtils;
import org.apache.camel.test.infra.aws2.services.AWSServiceFactoryWithTimeout;
import org.apache.camel.test.infra.common.TestUtils;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.services.cloudwatch.CloudWatchClient;
import software.amazon.awssdk.services.cloudwatch.model.Dimension;
import software.amazon.awssdk.services.cloudwatch.model.ListMetricsRequest;
import software.amazon.awssdk.services.cloudwatch.model.ListMetricsResponse;
import software.amazon.awssdk.services.cloudwatch.model.Metric;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@EnabledIfSystemProperty(named = "enable.slow.tests", matches = "true")
public class CamelSinkAWSCWITCase extends CamelSinkTestSupport {
@RegisterExtension
public static AWSService awsService = AWSServiceFactoryWithTimeout
.createCloudWatchService();
private static final Logger LOG = LoggerFactory.getLogger(CamelSinkAWSCWITCase.class);
private static String metricName = "test-metric";
private CloudWatchClient client;
private String namespace;
private volatile int received;
private final int expect = 10;
private static class CustomProducer extends StringMessageProducer {
public CustomProducer(String bootstrapServer, String topicName, int count) {
super(bootstrapServer, topicName, count);
}
@Override
public Map<String, String> messageHeaders(String text, int current) {
Map<String, String> headers = new HashMap<>();
headers.put(CamelSinkTask.HEADER_CAMEL_PREFIX + "metric-name", metricName);
//TODO: once this https://github.com/apache/camel-kamelets/pull/522 is published the following headers
// must be changed to metric-dimension-name and metric-dimension-value
headers.put(CamelSinkTask.HEADER_CAMEL_PREFIX + "CamelAwsCwMetricDimensionName",
"test-dimension-" + current);
headers.put(CamelSinkTask.HEADER_CAMEL_PREFIX + "CamelAwsCwMetricDimensionValue", String.valueOf(current));
return headers;
}
}
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-aws-cloudwatch-sink-kafka-connector"};
}
@BeforeEach
public void setUp() {
client = AWSSDKClientUtils.newCloudWatchClient();
namespace = "cw-" + TestUtils.randomWithRange(0, 1000);
LOG.debug("Using namespace {} for the test", namespace);
received = 0;
}
@Override
protected void consumeMessages(CountDownLatch latch) {
try {
ListMetricsRequest request = ListMetricsRequest.builder()
.namespace(namespace)
.metricName(metricName)
.build();
while (true) {
ListMetricsResponse response = client.listMetrics(request);
for (Metric metric : response.metrics()) {
LOG.info("Retrieved metric {}, dimensions {}", metric.metricName(), metric.dimensions());
for (Dimension dimension : metric.dimensions()) {
LOG.info("Dimension {} value: {}", dimension.name(), dimension.value());
received++;
if (received == expect) {
return;
}
}
}
if (!waitForData()) {
break;
}
}
} finally {
latch.countDown();
}
}
@Override
protected void verifyMessages(CountDownLatch latch) throws InterruptedException {
if (latch.await(110, TimeUnit.SECONDS)) {
assertEquals(expect, received, "Didn't process the expected amount of messages: " + received + " != " + expect);
} else {
fail(String.format("Failed to receive the messages within the specified time: received %d of %d",
received, expect));
}
}
@Test
@Timeout(value = 120)
public void testBasicSendReceive() throws Exception {
Properties amazonProperties = awsService.getConnectionProperties();
String topicName = getTopicForTest(this);
ConnectorPropertyFactory testProperties = CamelAWSCWPropertyFactory
.basic()
.withTopics(topicName)
.withConfiguration(TestCloudWatchConfiguration.class.getName())
.withAmazonConfig(amazonProperties)
.withNamespace(namespace);
runTest(testProperties, new CustomProducer(getKafkaService().getBootstrapServers(), topicName, expect));
}
}
| 9,233 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/ec2 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/ec2/sink/CamelSinkAWSEC2ITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.ec2.sink;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.camel.kafkaconnector.CamelSinkTask;
import org.apache.camel.kafkaconnector.aws.v2.cw.sink.TestCloudWatchConfiguration;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.test.CamelSinkTestSupport;
import org.apache.camel.kafkaconnector.common.test.StringMessageProducer;
import org.apache.camel.test.infra.aws.common.services.AWSService;
import org.apache.camel.test.infra.aws2.clients.AWSSDKClientUtils;
import org.apache.camel.test.infra.aws2.services.AWSServiceFactoryWithTimeout;
import org.apache.camel.test.infra.common.TestUtils;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.services.ec2.Ec2Client;
import software.amazon.awssdk.services.ec2.model.DescribeInstanceStatusRequest;
import software.amazon.awssdk.services.ec2.model.InstanceStatus;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
@Disabled("Until this https://github.com/apache/camel-kamelets/issues/516 is implemented and published.")
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@EnabledIfSystemProperty(named = "enable.slow.tests", matches = "true")
public class CamelSinkAWSEC2ITCase extends CamelSinkTestSupport {
@RegisterExtension
public static AWSService awsService = AWSServiceFactoryWithTimeout.createEC2Service();
private static final Logger LOG = LoggerFactory.getLogger(CamelSinkAWSEC2ITCase.class);
private Ec2Client client;
private String logicalName;
private volatile int received;
private final int expect = 10;
private static class CustomProducer extends StringMessageProducer {
public CustomProducer(String bootstrapServer, String topicName, int count) {
super(bootstrapServer, topicName, count);
}
@Override
public Map<String, String> messageHeaders(String text, int current) {
Map<String, String> headers = new HashMap<>();
headers.put(CamelSinkTask.HEADER_CAMEL_PREFIX + "CamelAwsEC2ImageId",
"image-id-" + current);
headers.put(CamelSinkTask.HEADER_CAMEL_PREFIX + "CamelAwsEC2InstanceType", "T1_MICRO");
headers.put(CamelSinkTask.HEADER_CAMEL_PREFIX + "CamelAwsEC2InstanceMinCount", "1");
headers.put(CamelSinkTask.HEADER_CAMEL_PREFIX + "CamelAwsEC2InstanceMaxCount", "1");
headers.put(CamelSinkTask.HEADER_CAMEL_PREFIX + "CamelAwsEC2InstanceSecurityGroups", "default");
return headers;
}
}
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-aws2-ec2-kafka-connector"};
}
@BeforeEach
public void setUp() {
client = AWSSDKClientUtils.newEC2Client();
logicalName = "ec2-" + TestUtils.randomWithRange(1, 100);
received = 0;
}
@Override
protected void consumeMessages(CountDownLatch latch) {
try {
while (true) {
DescribeInstanceStatusRequest request = DescribeInstanceStatusRequest.builder()
.includeAllInstances(true)
.build();
List<InstanceStatus> statusList = client.describeInstanceStatus(request).instanceStatuses();
for (InstanceStatus status : statusList) {
LOG.info("Instance {} has status: {}", status.instanceId(), status);
received++;
if (received >= expect) {
return;
}
}
if (!waitForData()) {
break;
}
}
} finally {
latch.countDown();
}
}
@Override
protected void verifyMessages(CountDownLatch latch) throws InterruptedException {
if (latch.await(110, TimeUnit.SECONDS)) {
assertEquals(expect, received, "Didn't process the expected amount of messages: " + received + " != " + expect);
} else {
fail(String.format("Failed to receive the messages within the specified time: received %d of %d",
received, expect));
}
}
@Test
@Timeout(90)
public void testBasicSendReceive() throws Exception {
Properties amazonProperties = awsService.getConnectionProperties();
String topicName = getTopicForTest(this);
ConnectorPropertyFactory testProperties = CamelAWSEC2PropertyFactory
.basic()
.withTopics(topicName)
.withConfiguration(TestCloudWatchConfiguration.class.getName())
.withAmazonConfig(amazonProperties)
// .withSinkPathLabel(logicalName)
.withConfiguration(TestEC2Configuration.class.getName())
.withSinkEndpointOperation("createAndRunInstances");
runTest(testProperties, new CustomProducer(getKafkaService().getBootstrapServers(), topicName, expect));
}
}
| 9,234 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/ec2 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/ec2/sink/TestEC2Configuration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.ec2.sink;
import org.apache.camel.component.aws2.ec2.AWS2EC2Configuration;
import org.apache.camel.test.infra.aws2.clients.AWSSDKClientUtils;
import software.amazon.awssdk.services.ec2.Ec2Client;
public class TestEC2Configuration extends AWS2EC2Configuration {
private Ec2Client client;
@Override
public Ec2Client getAmazonEc2Client() {
if (client == null) {
client = AWSSDKClientUtils.newEC2Client();
}
return client;
}
}
| 9,235 |
0 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/ec2 | Create_ds/camel-kafka-connector/tests/itests-aws-v2/src/test/java/org/apache/camel/kafkaconnector/aws/v2/ec2/sink/CamelAWSEC2PropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.aws.v2.ec2.sink;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.apache.camel.kafkaconnector.aws.v2.common.AWSPropertiesUtils;
import org.apache.camel.kafkaconnector.common.SinkConnectorPropertyFactory;
import org.apache.camel.test.infra.aws.common.AWSConfigs;
public class CamelAWSEC2PropertyFactory extends SinkConnectorPropertyFactory<CamelAWSEC2PropertyFactory> {
public static final Map<String, String> SPRING_STYLE = new HashMap<>();
static {
SPRING_STYLE.put(AWSConfigs.ACCESS_KEY, "camel.kamelet.aws-ec2-sink.accessKey");
SPRING_STYLE.put(AWSConfigs.SECRET_KEY, "camel.kamelet.aws-ec2-sink.secretKey");
SPRING_STYLE.put(AWSConfigs.REGION, "camel.kamelet.aws-ec2-sink.region");
}
public CamelAWSEC2PropertyFactory withSinkEndpointOperation(String value) {
return setProperty("camel.component.aws2-ec2.operation", value);
}
public CamelAWSEC2PropertyFactory withConfiguration(String value) {
return setProperty("camel.component.aws2-ec2.configuration", classRef(value));
}
public CamelAWSEC2PropertyFactory withAmazonConfig(Properties amazonConfigs) {
return withAmazonConfig(amazonConfigs, this.SPRING_STYLE);
}
public CamelAWSEC2PropertyFactory withAmazonConfig(Properties amazonConfigs, Map<String, String> style) {
AWSPropertiesUtils.setCommonProperties(amazonConfigs, style, this);
return this;
}
public static CamelAWSEC2PropertyFactory basic() {
return new CamelAWSEC2PropertyFactory()
.withTasksMax(1)
.withName("CamelAws2ec2SinkConnector")
.withConnectorClass("org.apache.camel.kafkaconnector.aws2ec2.CamelAws2ec2SinkConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter")
.setProperty("camel.component.kamelet.location", "kamelets");
}
}
| 9,236 |
0 | Create_ds/camel-kafka-connector/tests/itests-elasticsearch/src/test/java/org/apache/camel/kafkaconnector/elasticsearch | Create_ds/camel-kafka-connector/tests/itests-elasticsearch/src/test/java/org/apache/camel/kafkaconnector/elasticsearch/clients/ElasticSearchClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.elasticsearch.clients;
import java.io.IOException;
import java.util.List;
import co.elastic.clients.elasticsearch.ElasticsearchClient;
import co.elastic.clients.elasticsearch._types.query_dsl.QueryBuilders;
import co.elastic.clients.elasticsearch.core.SearchResponse;
import co.elastic.clients.elasticsearch.core.search.Hit;
import co.elastic.clients.elasticsearch.indices.ExistsRequest;
import co.elastic.clients.json.jackson.JacksonJsonpMapper;
import co.elastic.clients.transport.ElasticsearchTransport;
import co.elastic.clients.transport.rest_client.RestClientTransport;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.camel.kafkaconnector.elasticsearch.common.ElasticSearchCommon;
import org.apache.camel.test.infra.common.TestUtils;
import org.apache.http.HttpHost;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.impl.client.BasicCredentialsProvider;
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ElasticSearchClient {
private static final Logger LOG = LoggerFactory.getLogger(ElasticSearchClient.class);
private final ElasticsearchClient client;
private final String index;
public ElasticSearchClient(String host, int port, String index) {
final CredentialsProvider credentialsProvider =
new BasicCredentialsProvider();
credentialsProvider.setCredentials(AuthScope.ANY,
new UsernamePasswordCredentials(ElasticSearchCommon.USERNAME, ElasticSearchCommon.PASSWORD));
RestClientBuilder builder = RestClient.builder(
new HttpHost(host, port, "http"))
.setHttpClientConfigCallback(new RestClientBuilder.HttpClientConfigCallback() {
@Override
public HttpAsyncClientBuilder customizeHttpClient(
HttpAsyncClientBuilder httpClientBuilder) {
return httpClientBuilder
.setDefaultCredentialsProvider(credentialsProvider);
}
});
RestClient httpClient = builder.build();
ElasticsearchTransport transport = new RestClientTransport(
httpClient,
new JacksonJsonpMapper()
);
client = new ElasticsearchClient(transport);
this.index = index;
}
public boolean indexExists() {
try {
ExistsRequest indexRequest = new ExistsRequest.Builder().index(index).build();
return client.indices().exists(indexRequest).value();
} catch (IOException e) {
/*
It may return if failed to parse the response, on timeout or no response from the ES instance.
Assuming it is more likely to timeout or provide no reply either the during the start up or
on overloaded CI environments, we log the I/O error and try again
*/
LOG.error("I/O error trying to query for index existence: {}", e.getMessage(), e);
}
return false;
}
public List<Hit<ObjectNode>> getData() {
try {
SearchResponse<ObjectNode> response = client.search(s ->
s.index(index)
.query(QueryBuilders.matchAll().build()._toQuery()),
ObjectNode.class);
return response.hits().hits();
} catch (IOException e) {
/*
It may return if failed to parse the response, on timeout or no response from the ES instance.
Assuming it is more likely to timeout or provide no reply either the during the start up or
on overloaded CI environments, we log the I/O error and try again
*/
LOG.error("I/O error trying to query for index existence: {}", e.getMessage(), e);
} catch (Throwable e) {
LOG.error("Unhandled error trying to query for index existence: {}", e.getMessage(), e);
}
return null;
}
private boolean hasData(int expect) {
List<Hit<ObjectNode>> searchHits = getData();
if (searchHits == null) {
LOG.debug("There are not search hit to return");
return false;
}
int count = searchHits.size();
if (count != expect) {
LOG.debug("Not enough records: {} available, but {} expected", count, expect);
return false;
}
return true;
}
public void waitForIndex() {
TestUtils.waitFor(this::indexExists);
}
public void waitForData(int expect) {
TestUtils.waitFor(this::hasData, expect);
}
}
| 9,237 |
0 | Create_ds/camel-kafka-connector/tests/itests-elasticsearch/src/test/java/org/apache/camel/kafkaconnector/elasticsearch | Create_ds/camel-kafka-connector/tests/itests-elasticsearch/src/test/java/org/apache/camel/kafkaconnector/elasticsearch/sink/CamelElasticSearchPropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.elasticsearch.sink;
import org.apache.camel.kafkaconnector.common.SinkConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.elasticsearch.common.ElasticSearchCommon;
final class CamelElasticSearchPropertyFactory extends SinkConnectorPropertyFactory<CamelElasticSearchPropertyFactory> {
private CamelElasticSearchPropertyFactory() {
}
public CamelElasticSearchPropertyFactory withClusterName(String clusterName) {
return setProperty("camel.kamelet.elasticsearch-index-sink.clusterName", clusterName);
}
public CamelElasticSearchPropertyFactory withHostAddress(String hostAddress) {
return setProperty("camel.kamelet.elasticsearch-index-sink.hostAddresses", hostAddress);
}
public CamelElasticSearchPropertyFactory withIndexName(String indexName) {
return setProperty("camel.kamelet.elasticsearch-index-sink.indexName", indexName);
}
public static CamelElasticSearchPropertyFactory basic() {
return new CamelElasticSearchPropertyFactory()
.withName("CamelElasticSearchSinkConnector")
.withTasksMax(1)
.withConnectorClass("org.apache.camel.kafkaconnector.elasticsearchindexsink.CamelElasticsearchindexsinkSinkConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter")
.setProperty("camel.kamelet.elasticsearch-index-sink.user", ElasticSearchCommon.USERNAME)
.setProperty("camel.kamelet.elasticsearch-index-sink.password", ElasticSearchCommon.PASSWORD)
.setProperty("camel.kamelet.elasticsearch-index-sink.enableSSL", "false")
.setProperty("camel.component.kamelet.location", "kamelets")
.setProperty("camel.component.properties.environment-variable-mode", "1");
}
}
| 9,238 |
0 | Create_ds/camel-kafka-connector/tests/itests-elasticsearch/src/test/java/org/apache/camel/kafkaconnector/elasticsearch | Create_ds/camel-kafka-connector/tests/itests-elasticsearch/src/test/java/org/apache/camel/kafkaconnector/elasticsearch/sink/CamelSinkElasticSearchITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.elasticsearch.sink;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
import co.elastic.clients.elasticsearch.core.search.Hit;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.test.CamelSinkTestSupport;
import org.apache.camel.kafkaconnector.elasticsearch.clients.ElasticSearchClient;
import org.apache.camel.kafkaconnector.elasticsearch.common.ElasticSearchCommon;
import org.apache.camel.kafkaconnector.elasticsearch.common.ElasticSearchIndexMessageProducer;
import org.apache.camel.kafkaconnector.elasticsearch.common.ElasticSearchLocalContainerServiceHack;
import org.apache.camel.test.infra.elasticsearch.services.ElasticSearchService;
import org.apache.camel.test.infra.elasticsearch.services.RemoteElasticSearchService;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testcontainers.elasticsearch.ElasticsearchContainer;
import static org.apache.camel.test.infra.elasticsearch.services.ElasticSearchServiceFactory.builder;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.fail;
public class CamelSinkElasticSearchITCase extends CamelSinkTestSupport {
@RegisterExtension
public static ElasticSearchService elasticSearch = builder()
.addLocalMapping(new Supplier<ElasticSearchService>() {
@Override
public ElasticSearchService get() {
ElasticsearchContainer container =
new ElasticsearchContainer("docker.elastic.co/elasticsearch/elasticsearch:8.5.2");
container.addEnv("xpack.security.enabled", "true");
//XXX: revert back to the normal lasticSearchLocalContainerService when https://issues.apache.org/jira/browse/CAMEL-19834 is fixed
return new ElasticSearchLocalContainerServiceHack(container);
}
}
).addRemoteMapping(RemoteElasticSearchService::new).build();
private static final Logger LOG = LoggerFactory.getLogger(CamelElasticSearchPropertyFactory.class);
private ElasticSearchClient client;
private String topicName;
private final int expect = 10;
private int received;
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-elasticsearch-index-sink-kafka-connector"};
}
@BeforeEach
public void setUp() {
topicName = getTopicForTest(this);
client = new ElasticSearchClient(elasticSearch.getElasticSearchHost(), elasticSearch.getPort(),
ElasticSearchCommon.DEFAULT_ELASTICSEARCH_INDEX);
received = 0;
}
@Override
protected void consumeMessages(CountDownLatch latch) {
try {
client.waitForIndex();
LOG.debug("Waiting for data");
client.waitForData(expect);
} finally {
latch.countDown();
}
}
@Override
protected void verifyMessages(CountDownLatch latch) throws InterruptedException {
if (latch.await(30, TimeUnit.SECONDS)) {
List<Hit<ObjectNode>> hits = client.getData();
assertNotNull(hits);
hits.forEach(this::verifyHit);
assertEquals(expect, received,
"Didn't process the expected amount of messages: " + received + " != " + expect);
} else {
fail("Failed to receive the messages within the specified time");
}
}
private void verifyHit(Hit<ObjectNode> searchHit) {
ObjectNode source = searchHit.source();
LOG.debug("Search hit: {} ", source);
assertNotNull(source);
assertFalse(source.isEmpty());
assertEquals(String.valueOf(received), source.at("/counter").asText());
received++;
}
@Test
@Timeout(90)
public void testIndexOperation() throws Exception {
ConnectorPropertyFactory propertyFactory = CamelElasticSearchPropertyFactory
.basic()
.withTopics(topicName)
.withClusterName(ElasticSearchCommon.DEFAULT_ELASTICSEARCH_CLUSTER)
.withHostAddress(elasticSearch.getHttpHostAddress())
.withIndexName(ElasticSearchCommon.DEFAULT_ELASTICSEARCH_INDEX);
runTest(propertyFactory, new ElasticSearchIndexMessageProducer(getKafkaService().getBootstrapServers(), topicName, expect));
}
}
| 9,239 |
0 | Create_ds/camel-kafka-connector/tests/itests-elasticsearch/src/test/java/org/apache/camel/kafkaconnector/elasticsearch | Create_ds/camel-kafka-connector/tests/itests-elasticsearch/src/test/java/org/apache/camel/kafkaconnector/elasticsearch/common/ElasticSearchIndexMessageProducer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.elasticsearch.common;
import java.util.Collections;
import java.util.Map;
import org.apache.camel.kafkaconnector.CamelSinkTask;
import org.apache.camel.kafkaconnector.common.clients.kafka.KafkaClient;
import org.apache.camel.kafkaconnector.common.test.AbstractTestMessageProducer;
public class ElasticSearchIndexMessageProducer extends AbstractTestMessageProducer<String> {
public ElasticSearchIndexMessageProducer(String bootStrapServer, String topicName, int count) {
super(bootStrapServer, topicName, count);
}
public ElasticSearchIndexMessageProducer(KafkaClient<String, String> kafkaClient, String topicName, int count) {
super(kafkaClient, topicName, count);
}
@Override
public Map<String, String> messageHeaders(String text, int current) {
return Collections.singletonMap(CamelSinkTask.HEADER_CAMEL_PREFIX + "indexId", String.valueOf(current));
}
@Override
public String testMessageContent(int current) {
return "{\n"
+ " \"tags\": [\n"
+ " \"opster\",\n"
+ " \"elasticsearch\"\n"
+ " ],\n"
+ " \"date\": \"01-01-2020\",\n"
+ " \"counter\": \"" + current + "\"\n"
+ "}";
}
} | 9,240 |
0 | Create_ds/camel-kafka-connector/tests/itests-elasticsearch/src/test/java/org/apache/camel/kafkaconnector/elasticsearch | Create_ds/camel-kafka-connector/tests/itests-elasticsearch/src/test/java/org/apache/camel/kafkaconnector/elasticsearch/common/ElasticSearchLocalContainerServiceHack.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.elasticsearch.common;
import java.io.IOException;
import java.lang.reflect.Field;
import java.nio.file.Files;
import java.nio.file.OpenOption;
import java.nio.file.Path;
import org.apache.camel.test.infra.elasticsearch.services.ElasticSearchLocalContainerService;
import org.testcontainers.elasticsearch.ElasticsearchContainer;
public class ElasticSearchLocalContainerServiceHack extends ElasticSearchLocalContainerService {
public ElasticSearchLocalContainerServiceHack() {
super();
}
public ElasticSearchLocalContainerServiceHack(String imageName) {
super(imageName);
}
public ElasticSearchLocalContainerServiceHack(ElasticsearchContainer container) {
super(container);
}
@Override
public void registerProperties() {
System.setProperty("elasticsearch.host", this.getElasticSearchHost());
System.setProperty("elasticsearch.port", String.valueOf(this.getPort()));
this.getContainer().caCertAsBytes().ifPresent(content -> {
try {
Field certPath = getClass().getSuperclass().getDeclaredField("certPath");
certPath.setAccessible(true); // enables access to private variables
certPath.set(this, Files.createTempFile("http_ca", ".crt"));
Files.write((Path) certPath.get(this), content, new OpenOption[0]);
Field sslContext = getClass().getSuperclass().getDeclaredField("sslContext");
sslContext.setAccessible(true); // enables access to private variables
sslContext.set(this, this.getContainer().createSslContextFromCa());
} catch (IOException | NoSuchFieldException | IllegalAccessException var3) {
throw new RuntimeException(var3);
}
});
}
}
| 9,241 |
0 | Create_ds/camel-kafka-connector/tests/itests-elasticsearch/src/test/java/org/apache/camel/kafkaconnector/elasticsearch | Create_ds/camel-kafka-connector/tests/itests-elasticsearch/src/test/java/org/apache/camel/kafkaconnector/elasticsearch/common/ElasticSearchCommon.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.elasticsearch.common;
import org.testcontainers.elasticsearch.ElasticsearchContainer;
public final class ElasticSearchCommon {
/**
* The default ElasticSearch cluster name for usage during the tests
*/
public static final String DEFAULT_ELASTICSEARCH_CLUSTER = "docker-cluster";
/**
* The default ElasticSearch index for usage during the tests
*/
public static final String DEFAULT_ELASTICSEARCH_INDEX = "ckc-index";
/**
* The default ElasticSearch container username
*/
public static final String USERNAME = "elastic";
/**
* The default ElasticSearch container password
*/
public static final String PASSWORD = ElasticsearchContainer.ELASTICSEARCH_DEFAULT_PASSWORD;
private ElasticSearchCommon() {
}
}
| 9,242 |
0 | Create_ds/camel-kafka-connector/tests/itests-timer/src/test/java/org/apache/camel/kafkaconnector/timer | Create_ds/camel-kafka-connector/tests/itests-timer/src/test/java/org/apache/camel/kafkaconnector/timer/source/CamelSourceTimerITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.timer.source;
import java.util.concurrent.ExecutionException;
import org.apache.camel.kafkaconnector.common.test.CamelSourceTestSupport;
import org.apache.camel.kafkaconnector.common.test.TestMessageConsumer;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import static org.junit.jupiter.api.Assertions.assertEquals;
/**
* A simple test case that checks whether the timer produces the expected number of
* messages
*/
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
public class CamelSourceTimerITCase extends CamelSourceTestSupport {
private final int expect = 1;
private String topicName;
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-timer-source-kafka-connector"};
}
@BeforeEach
public void setUp() {
topicName = getTopicForTest(this);
}
@Override
protected void produceTestData() {
// NO-OP
}
@Override
protected void verifyMessages(TestMessageConsumer<?> consumer) {
int received = consumer.consumedMessages().size();
assertEquals(expect, received, "Did not receive as many messages as expected");
}
@Test
@Timeout(30)
public void testLaunchConnector() throws ExecutionException, InterruptedException {
CamelTimerPropertyFactory connectorPropertyFactory = CamelTimerPropertyFactory
.basic()
.withKafkaTopic(topicName)
.withPeriod(Integer.MAX_VALUE)
.withMessage("hello world!");
runTest(connectorPropertyFactory, topicName, expect);
}
}
| 9,243 |
0 | Create_ds/camel-kafka-connector/tests/itests-timer/src/test/java/org/apache/camel/kafkaconnector/timer | Create_ds/camel-kafka-connector/tests/itests-timer/src/test/java/org/apache/camel/kafkaconnector/timer/source/CamelTimerPropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.timer.source;
import org.apache.camel.kafkaconnector.common.SourceConnectorPropertyFactory;
final class CamelTimerPropertyFactory extends SourceConnectorPropertyFactory<CamelTimerPropertyFactory> {
private CamelTimerPropertyFactory() {
}
public CamelTimerPropertyFactory withPeriod(int period) {
return setProperty("camel.kamelet.timer-source.period", period);
}
public CamelTimerPropertyFactory withMessage(String message) {
return setProperty("camel.kamelet.timer-source.message", message);
}
public static CamelTimerPropertyFactory basic() {
return new CamelTimerPropertyFactory()
.withName("CamelTimerSourceConnector")
.withTasksMax(1)
.withConnectorClass("org.apache.camel.kafkaconnector.timersource.CamelTimersourceSourceConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter")
.setProperty("camel.component.kamelet.location", "kamelets");
}
}
| 9,244 |
0 | Create_ds/camel-kafka-connector/tests/itests-file/src/test/java/org/apache/camel/kafkaconnector/file | Create_ds/camel-kafka-connector/tests/itests-file/src/test/java/org/apache/camel/kafkaconnector/file/sink/CamelSinkFileAppendITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.file.sink;
import java.io.File;
import java.io.IOException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.test.CamelSinkTestSupport;
import org.apache.camel.kafkaconnector.file.sink.util.CustomProducer;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testcontainers.junit.jupiter.Testcontainers;
import static org.apache.camel.kafkaconnector.file.sink.util.FileTestUtil.checkFileContents;
import static org.apache.camel.kafkaconnector.file.sink.util.FileTestUtil.waitForFile;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@Testcontainers
public class CamelSinkFileAppendITCase extends CamelSinkTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(CamelSinkFileAppendITCase.class);
private static final String SINK_DIR = CamelSinkFileAppendITCase.class.getResource(".").getPath();
private static final String FILENAME = "test-append.txt";
private String topicName;
private final int numMessages = 10;
private final int expectedLines = 1;
private CustomProducer producer;
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-file-kafka-connector"};
}
@BeforeEach
public void setUp() {
topicName = getTopicForTest(this);
cleanup();
}
@AfterEach
public void tearDown() {
cleanup();
}
private void cleanup() {
File doneFile = new File(SINK_DIR, FILENAME + ".done");
if (doneFile.exists()) {
doneFile.delete();
}
File testFile = new File(SINK_DIR, FILENAME);
if (testFile.exists()) {
testFile.delete();
}
}
@Override
protected void consumeMessages(CountDownLatch latch) {
try {
File sinkFile = new File(SINK_DIR, FILENAME);
File doneFile = new File(SINK_DIR, FILENAME + ".done");
waitForFile(sinkFile, doneFile);
// We need to give some time for all the messages to be read and appended
Thread.sleep(3000);
} catch (InterruptedException e) {
fail(e.getMessage());
} catch (IOException e) {
fail(e.getMessage());
} finally {
latch.countDown();
}
}
private String verifier(int i) {
if (i == 0) {
return "test0test1test2test3test4test5test6test7test8test9";
}
return "NO MATCH";
}
@Override
protected void verifyMessages(CountDownLatch latch) throws InterruptedException {
if (latch.await(30, TimeUnit.SECONDS)) {
File sinkFile = new File(SINK_DIR, FILENAME);
assertTrue(sinkFile.exists(), String.format("The file %s does not exist", sinkFile.getPath()));
try {
int lines = checkFileContents(sinkFile, this::verifier);
assertEquals(expectedLines, lines, "Did not receive the same amount of messages that were sent");
} catch (IOException e) {
fail(e.getMessage());
}
} else {
fail("Failed to receive the messages within the specified time");
}
}
@Test
@Timeout(90)
public void testBasicSendReceive() throws Exception {
ConnectorPropertyFactory connectorPropertyFactory = CamelFilePropertyFactory.basic()
.withTopics(topicName)
.withDirectoryName(SINK_DIR)
.withFileName(FILENAME)
.withFileExist("Append")
.withDoneFileName(FILENAME + ".done");
producer = new CustomProducer(getKafkaService().getBootstrapServers(), topicName, numMessages);
runTest(connectorPropertyFactory, producer);
}
}
| 9,245 |
0 | Create_ds/camel-kafka-connector/tests/itests-file/src/test/java/org/apache/camel/kafkaconnector/file | Create_ds/camel-kafka-connector/tests/itests-file/src/test/java/org/apache/camel/kafkaconnector/file/sink/CamelFilePropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.file.sink;
import org.apache.camel.kafkaconnector.common.EndpointUrlBuilder;
import org.apache.camel.kafkaconnector.common.SinkConnectorPropertyFactory;
final class CamelFilePropertyFactory extends SinkConnectorPropertyFactory<CamelFilePropertyFactory> {
private CamelFilePropertyFactory() {
}
public CamelFilePropertyFactory withFileName(String fileName) {
return setProperty("camel.sink.endpoint.fileName", fileName);
}
public CamelFilePropertyFactory withDoneFileName(String doneFileName) {
return setProperty("camel.sink.endpoint.doneFileName", doneFileName);
}
public CamelFilePropertyFactory withDirectoryName(String directoryName) {
return setProperty("camel.sink.path.directoryName", directoryName);
}
public CamelFilePropertyFactory withAppendChars(String value) {
return setProperty("camel.sink.endpoint.appendChars", value);
}
public CamelFilePropertyFactory withFileExist(String value) {
return setProperty("camel.sink.endpoint.fileExist", value);
}
public EndpointUrlBuilder<CamelFilePropertyFactory> withUrl(String fileOrDirName) {
String queueUrl = String.format("file://%s", fileOrDirName);
return new EndpointUrlBuilder<>(this::withSinkUrl, queueUrl);
}
public static CamelFilePropertyFactory basic() {
return new CamelFilePropertyFactory()
.withTasksMax(1)
.withName("CamelFileSinkConnector")
.withConnectorClass("org.apache.camel.kafkaconnector.file.CamelFileSinkConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter");
}
}
| 9,246 |
0 | Create_ds/camel-kafka-connector/tests/itests-file/src/test/java/org/apache/camel/kafkaconnector/file | Create_ds/camel-kafka-connector/tests/itests-file/src/test/java/org/apache/camel/kafkaconnector/file/sink/CamelSinkFileAppendCharsITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.file.sink;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.test.CamelSinkTestSupport;
import org.apache.camel.kafkaconnector.file.sink.util.CustomProducer;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testcontainers.junit.jupiter.Testcontainers;
import static org.apache.camel.kafkaconnector.file.sink.util.FileTestUtil.checkFileContents;
import static org.apache.camel.kafkaconnector.file.sink.util.FileTestUtil.waitForFile;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@Testcontainers
public class CamelSinkFileAppendCharsITCase extends CamelSinkTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(CamelSinkFileAppendCharsITCase.class);
private static final String SINK_DIR = CamelSinkFileAppendCharsITCase.class.getResource(".").getPath();
private static final String FILENAME = "test-append-with-chars.txt";
private Map<String, Function<Integer, String>> verifierTable;
private String topicName;
private final int numMessages = 10;
private int expectedLines;
private String currentChar;
private CustomProducer producer;
static {
}
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-file-kafka-connector"};
}
@BeforeEach
public void setUp() {
topicName = getTopicForTest(this);
cleanup();
verifierTable = new HashMap<>();
verifierTable.put("ddd", this::verifierRegularChar);
verifierTable.put("n", this::verifierRegularChar);
verifierTable.put("%0A", this::verifierNewLine);
}
@AfterEach
public void tearDown() {
cleanup();
}
private void cleanup() {
File doneFile = new File(SINK_DIR, FILENAME + ".done");
if (doneFile.exists()) {
doneFile.delete();
}
File testFile = new File(SINK_DIR, FILENAME);
if (testFile.exists()) {
testFile.delete();
}
}
@Override
protected void consumeMessages(CountDownLatch latch) {
try {
File sinkFile = new File(SINK_DIR, FILENAME);
File doneFile = new File(SINK_DIR, FILENAME + ".done");
waitForFile(sinkFile, doneFile);
// We need to give some time for all the messages to be read and appended
Thread.sleep(3000);
} catch (InterruptedException e) {
fail(e.getMessage());
} catch (IOException e) {
fail(e.getMessage());
} finally {
latch.countDown();
}
}
private String verifierRegularChar(int currentLine) {
String expected = "";
for (int i = 0; i < numMessages; i++) {
expected += producer.testMessageContent(i) + currentChar;
}
return expected;
}
private String verifierNewLine(int currentLine) {
return producer.testMessageContent(currentLine);
}
@Override
protected void verifyMessages(CountDownLatch latch) throws InterruptedException {
if (latch.await(30, TimeUnit.SECONDS)) {
File sinkFile = new File(SINK_DIR, FILENAME);
assertTrue(sinkFile.exists(), String.format("The file %s does not exist", sinkFile.getPath()));
try {
Function<Integer, String> verifier = verifierTable.get(currentChar);
int lines = checkFileContents(sinkFile, verifier);
assertEquals(expectedLines, lines, "Did not receive the same amount of messages that were sent");
} catch (IOException e) {
fail(e.getMessage());
}
} else {
fail("Failed to receive the messages within the specified time");
}
}
@ParameterizedTest
@ValueSource(strings = {"ddd", "n"})
@Timeout(90)
public void testBasicSendReceiveWithAppendChar(String appendedChars) throws Exception {
ConnectorPropertyFactory connectorPropertyFactory = CamelFilePropertyFactory.basic()
.withTopics(topicName)
.withDirectoryName(SINK_DIR)
.withFileName(FILENAME)
.withAppendChars(appendedChars)
.withFileExist("Append")
.withDoneFileName(FILENAME + ".done");
producer = new CustomProducer(getKafkaService().getBootstrapServers(), topicName, numMessages);
expectedLines = 1;
currentChar = appendedChars;
runTest(connectorPropertyFactory, producer);
}
@ParameterizedTest
@ValueSource(strings = {"%0A"})
@Timeout(90)
public void testBasicSendReceiveWithAppendSpecialChars(String appendedChars) throws Exception {
ConnectorPropertyFactory connectorPropertyFactory = CamelFilePropertyFactory.basic()
.withTopics(topicName)
.withDirectoryName(SINK_DIR)
.withFileName(FILENAME)
.withAppendChars(appendedChars)
.withFileExist("Append")
.withDoneFileName(FILENAME + ".done");
producer = new CustomProducer(getKafkaService().getBootstrapServers(), topicName, numMessages);
expectedLines = numMessages;
currentChar = appendedChars;
runTest(connectorPropertyFactory, producer);
}
}
| 9,247 |
0 | Create_ds/camel-kafka-connector/tests/itests-file/src/test/java/org/apache/camel/kafkaconnector/file | Create_ds/camel-kafka-connector/tests/itests-file/src/test/java/org/apache/camel/kafkaconnector/file/sink/CamelSinkFileITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.file.sink;
import java.io.File;
import java.io.IOException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.test.CamelSinkTestSupport;
import org.apache.camel.kafkaconnector.file.sink.util.CustomProducer;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testcontainers.junit.jupiter.Testcontainers;
import static org.apache.camel.kafkaconnector.file.sink.util.FileTestUtil.checkFileContents;
import static org.apache.camel.kafkaconnector.file.sink.util.FileTestUtil.waitForFile;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@Testcontainers
public class CamelSinkFileITCase extends CamelSinkTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(CamelSinkFileITCase.class);
private static final String SINK_DIR = CamelSinkFileITCase.class.getResource(".").getPath();
private static final String FILENAME = "test.txt";
private String topicName;
private final int expect = 1;
private CustomProducer producer;
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-file-kafka-connector"};
}
@BeforeEach
public void setUp() {
topicName = getTopicForTest(this);
cleanup();
}
@AfterEach
public void tearDown() {
cleanup();
}
private void cleanup() {
File doneFile = new File(SINK_DIR, FILENAME + ".done");
if (doneFile.exists()) {
doneFile.delete();
}
}
@Override
protected void consumeMessages(CountDownLatch latch) {
try {
File sinkFile = new File(SINK_DIR, FILENAME);
File doneFile = new File(SINK_DIR, FILENAME + ".done");
waitForFile(sinkFile, doneFile);
} catch (InterruptedException e) {
fail(e.getMessage());
} catch (IOException e) {
fail(e.getMessage());
} finally {
latch.countDown();
}
}
private String verifier(int i) {
return producer.testMessageContent(i);
}
@Override
protected void verifyMessages(CountDownLatch latch) throws InterruptedException {
if (latch.await(30, TimeUnit.SECONDS)) {
File sinkFile = new File(SINK_DIR, FILENAME);
assertTrue(sinkFile.exists(), String.format("The file %s does not exist", sinkFile.getPath()));
try {
int lines = checkFileContents(sinkFile, this::verifier);
assertEquals(expect, lines, "Did not receive the same amount of messages that were sent");
} catch (IOException e) {
fail(e.getMessage());
}
} else {
fail("Failed to receive the messages within the specified time");
}
}
@Test
@Timeout(90)
public void testBasicSendReceive() throws Exception {
ConnectorPropertyFactory connectorPropertyFactory = CamelFilePropertyFactory.basic()
.withTopics(topicName)
.withDirectoryName(SINK_DIR)
.withFileName(FILENAME)
.withDoneFileName(FILENAME + ".done");
producer = new CustomProducer(getKafkaService().getBootstrapServers(), topicName, expect);
runTest(connectorPropertyFactory, producer);
}
@Test
@Timeout(90)
public void testBasicSendReceiveUsingUrl() throws Exception {
ConnectorPropertyFactory connectorPropertyFactory = CamelFilePropertyFactory.basic()
.withTopics(topicName)
.withUrl(SINK_DIR)
.append("fileName", FILENAME)
.append("doneFileName", FILENAME + ".done")
.buildUrl();
producer = new CustomProducer(getKafkaService().getBootstrapServers(), topicName, expect);
runTest(connectorPropertyFactory, producer);
}
}
| 9,248 |
0 | Create_ds/camel-kafka-connector/tests/itests-file/src/test/java/org/apache/camel/kafkaconnector/file/sink | Create_ds/camel-kafka-connector/tests/itests-file/src/test/java/org/apache/camel/kafkaconnector/file/sink/util/CustomProducer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.file.sink.util;
import org.apache.camel.kafkaconnector.common.test.StringMessageProducer;
public class CustomProducer extends StringMessageProducer {
private String messageContent = "test";
public CustomProducer(String bootstrapServer, String topicName, int count) {
super(bootstrapServer, topicName, count);
}
@Override
public String testMessageContent(int current) {
return messageContent + current;
}
public String getMessageContent() {
return messageContent;
}
}
| 9,249 |
0 | Create_ds/camel-kafka-connector/tests/itests-file/src/test/java/org/apache/camel/kafkaconnector/file/sink | Create_ds/camel-kafka-connector/tests/itests-file/src/test/java/org/apache/camel/kafkaconnector/file/sink/util/FileTestUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.file.sink.util;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.nio.file.FileSystems;
import java.nio.file.Path;
import java.nio.file.StandardWatchEventKinds;
import java.nio.file.WatchEvent;
import java.nio.file.WatchKey;
import java.nio.file.WatchService;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.jupiter.api.Assertions.assertEquals;
public final class FileTestUtil {
private static final Logger LOG = LoggerFactory.getLogger(FileTestUtil.class);
private FileTestUtil() {
}
public static int checkFileContents(File sinkFile, Function<Integer, String> consumer) throws IOException {
BufferedReader reader = new BufferedReader(new FileReader(sinkFile));
int currentLine = 0;
String line;
do {
line = reader.readLine();
if (line != null) {
assertEquals(consumer.apply(currentLine), line, String.format("Unexpected data: %s", line));
currentLine++;
}
} while (line != null);
return currentLine;
}
public static void waitForFile(File sinkFile, File doneFile) throws IOException, InterruptedException {
WatchService watchService = FileSystems.getDefault().newWatchService();
Path path = sinkFile.getParentFile().toPath();
if (doneFile.exists()) {
return;
}
// We watch for both the file creation and truncation
path.register(watchService, StandardWatchEventKinds.ENTRY_CREATE, StandardWatchEventKinds.ENTRY_MODIFY);
int retries = 30;
do {
WatchKey watchKey = watchService.poll(1, TimeUnit.SECONDS);
if (watchKey == null) {
continue;
}
for (WatchEvent<?> event : watchKey.pollEvents()) {
/*
It should return a Path object for ENTRY_CREATE and ENTRY_MODIFY events
*/
Object context = event.context();
if (!(context instanceof Path)) {
LOG.warn("Received an unexpected event of kind {} for context {}", event.kind(), event.context());
continue;
}
Path contextPath = (Path) context;
if (contextPath.toString().equals(doneFile.getName())) {
LOG.info("Sink file at the build path {} had a matching event of type: {}", sinkFile.getPath(),
event.kind());
return;
} else {
LOG.debug("Ignoring a watch event at build path {} of type {} for file: {}", sinkFile.getPath(),
event.kind(), contextPath.getFileName());
}
}
watchKey.reset();
retries--;
} while (!doneFile.exists() && retries > 0);
}
}
| 9,250 |
0 | Create_ds/camel-kafka-connector/tests/itests-sjms2/src/test/java/org/apache/camel/kafkaconnector/sjms2 | Create_ds/camel-kafka-connector/tests/itests-sjms2/src/test/java/org/apache/camel/kafkaconnector/sjms2/clients/JMSClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.sjms2.clients;
import java.util.function.Function;
import java.util.function.Predicate;
import jakarta.jms.Connection;
import jakarta.jms.ConnectionFactory;
import jakarta.jms.DeliveryMode;
import jakarta.jms.Destination;
import jakarta.jms.JMSException;
import jakarta.jms.Message;
import jakarta.jms.MessageConsumer;
import jakarta.jms.MessageProducer;
import jakarta.jms.Session;
import org.junit.jupiter.api.Assertions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.jupiter.api.Assertions.fail;
/**
* A basic multi-protocol JMS client
*/
public class JMSClient {
private static final Logger LOG = LoggerFactory.getLogger(JMSClient.class);
private Connection connection;
private Session session;
private ConnectionFactory factory;
public JMSClient(Function<String, ? extends ConnectionFactory> connectionFactory,
String url) {
factory = connectionFactory.apply(url);
}
public JMSClient(String className, String url) {
Class<? extends ConnectionFactory> clazz;
try {
clazz = (Class<? extends ConnectionFactory>) Class.forName(className);
factory = clazz.getConstructor(String.class).newInstance(url);
} catch (Exception e) {
LOG.error("Unable to create the JMS client classL {}", e.getMessage(), e);
Assertions.fail(e);
}
}
@SuppressWarnings("UnusedReturnValue")
public static Throwable capturingClose(MessageProducer closeable) {
LOG.debug("Closing the producer ");
if (closeable != null) {
try {
closeable.close();
} catch (Throwable t) {
LOG.warn("Error closing the producer: {}", t.getMessage(), t);
return t;
}
}
return null;
}
private static void capturingClose(Session closeable) {
LOG.debug("Closing the session ");
if (closeable != null) {
try {
closeable.close();
} catch (Throwable t) {
LOG.warn("Error closing the session: {}", t.getMessage(), t);
}
}
}
private static void capturingClose(MessageConsumer closeable) {
LOG.debug("Closing the consumer");
if (closeable != null) {
try {
closeable.close();
} catch (Throwable t) {
LOG.warn("Error closing the consumer: {}", t.getMessage(), t);
}
}
}
private static void capturingClose(Connection closeable) {
LOG.debug("Closing the connection");
if (closeable != null) {
try {
closeable.close();
} catch (Throwable t) {
LOG.warn("Error closing the connection: {}", t.getMessage(), t);
}
}
}
public void start() throws Exception {
LOG.debug("Starting the JMS client");
try {
LOG.debug("Creating the connection");
connection = factory.createConnection();
LOG.debug("Connection created successfully");
LOG.debug("Creating the JMS session");
this.session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
LOG.debug("JMS session created successfully");
} catch (Throwable t) {
LOG.trace("Something wrong happened while initializing the JMS client: {}", t.getMessage(), t);
capturingClose(connection);
throw t;
}
connection.start();
}
public void stop() {
try {
LOG.debug("Stopping the JMS session");
capturingClose(session);
LOG.debug("Stopping the JMS connection");
capturingClose(connection);
} finally {
session = null;
connection = null;
}
}
private Destination createDestination(final String destinationName) {
try {
return session.createQueue(destinationName);
} catch (JMSException e) {
Assertions.fail(e.getMessage());
// unreachable
return null;
}
}
/**
* Receives data from a JMS queue or topic
*
* @param predicate the predicate used to test each received message
* @throws JMSException
*/
public void receive(MessageConsumer consumer, Predicate<Message> predicate, long timeout) throws JMSException {
while (true) {
final Message message = consumer.receive(timeout);
if (!predicate.test(message)) {
return;
}
}
}
/**
* Receives data from a JMS queue or topic
*
* @param predicate the predicate used to test each received message
* @throws JMSException
*/
public void receive(MessageConsumer consumer, Predicate<Message> predicate) throws JMSException {
receive(consumer, predicate, 3000);
}
public MessageConsumer createConsumer(String queue) throws JMSException {
return session.createConsumer(createDestination(queue));
}
/**
* Receives data from a JMS queue or topic
*
* @param queue the queue or topic to receive data from
* @param predicate the predicate used to test each received message
* @throws JMSException
*/
public void receive(final String queue, Predicate<Message> predicate) throws JMSException {
MessageConsumer consumer = null;
try {
consumer = createConsumer(queue);
receive(consumer, predicate);
} finally {
capturingClose(consumer);
}
}
/**
* Sends data to a JMS queue or topic
*
* @param queue the queue or topic to send data to
* @param data the (string) data to send
* @throws JMSException
*/
public void send(final String queue, final String data) throws JMSException {
MessageProducer producer = null;
try {
producer = session.createProducer(createDestination(queue));
producer.setDeliveryMode(DeliveryMode.NON_PERSISTENT);
producer.setTimeToLive(0);
Message message = session.createTextMessage(data);
producer.send(message);
} finally {
capturingClose(producer);
}
}
/**
* Sends data to a JMS queue or topic
*
* @param queue the queue or topic to send data to
* @param data the (string) data to send
* @throws JMSException
*/
public void send(final String queue, int data) throws JMSException {
MessageProducer producer = null;
try {
producer = session.createProducer(createDestination(queue));
producer.setDeliveryMode(DeliveryMode.NON_PERSISTENT);
producer.setTimeToLive(0);
Message message = session.createObjectMessage(data);
producer.send(message);
} finally {
capturingClose(producer);
}
}
public static void produceMessages(JMSClient jmsProducer, String queue, int count, Function<Integer, String> supplier) {
try {
jmsProducer.start();
for (int i = 0; i < count; i++) {
jmsProducer.send(queue, supplier.apply(i));
}
} catch (JMSException e) {
LOG.error("JMS exception trying to send messages to the queue: {}", e.getMessage(), e);
fail(e.getMessage());
} catch (Exception e) {
LOG.error("Failed to send messages to the queue: {}", e.getMessage(), e);
fail(e.getMessage());
} finally {
jmsProducer.stop();
}
}
public static void produceMessages(JMSClient jmsProducer, String queue, int count, String baseText) {
try {
jmsProducer.start();
for (int i = 0; i < count; i++) {
jmsProducer.send(queue, baseText + " " + i);
}
} catch (JMSException e) {
LOG.error("JMS exception trying to send messages to the queue: {}", e.getMessage(), e);
fail(e.getMessage());
} catch (Exception e) {
LOG.error("Failed to send messages to the queue: {}", e.getMessage(), e);
fail(e.getMessage());
} finally {
jmsProducer.stop();
}
}
public static void produceMessages(JMSClient jmsProducer, String queue, int count) {
try {
jmsProducer.start();
for (int i = 0; i < count; i++) {
jmsProducer.send(queue, i);
}
} catch (JMSException e) {
LOG.error("JMS exception trying to send messages to the queue: {}", e.getMessage(), e);
fail(e.getMessage());
} catch (Exception e) {
LOG.error("Failed to send messages to the queue: {}", e.getMessage(), e);
fail(e.getMessage());
} finally {
jmsProducer.stop();
}
}
private static JMSClient newLocalClient(String endpoint) {
String jmsClientType = System.getProperty("jms-service.transport.protocol");
if (jmsClientType == null || jmsClientType.isEmpty() || jmsClientType.equals("qpid")) {
return new JMSClient(org.apache.qpid.jms.JmsConnectionFactory::new, endpoint);
}
throw new UnsupportedOperationException("Invalid JMS transport protocol");
}
private static JMSClient newRemoteClient(String endpoint) {
String tmpConnectionFactory = System.getProperty("camel.component.sjms2.connection-factory");
if (tmpConnectionFactory == null) {
throw new UnsupportedOperationException("JMS connection factory class must be provided");
}
String connectionFactory = tmpConnectionFactory.replace("#class:", "");
String jmsClientType = System.getProperty("jms-service.transport.protocol");
if (jmsClientType == null || jmsClientType.isEmpty() || jmsClientType.equals("qpid")) {
return new JMSClient(connectionFactory, endpoint);
}
if (jmsClientType.equals("openwire")) {
return new JMSClient(connectionFactory, endpoint);
}
throw new UnsupportedOperationException("Invalid JMS transport protocol");
}
public static JMSClient newClient(String endpoint) {
String jmsInstanceType = System.getProperty("jms-service.instance.type");
if (jmsInstanceType == null || !jmsInstanceType.equals("remote")) {
return newLocalClient(endpoint);
}
return newRemoteClient(endpoint);
}
}
| 9,251 |
0 | Create_ds/camel-kafka-connector/tests/itests-sjms2/src/test/java/org/apache/camel/kafkaconnector/sjms2 | Create_ds/camel-kafka-connector/tests/itests-sjms2/src/test/java/org/apache/camel/kafkaconnector/sjms2/source/CamelSourceJMSITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.sjms2.source;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.clients.kafka.KafkaClient;
import org.apache.camel.kafkaconnector.common.test.CamelSourceTestSupport;
import org.apache.camel.kafkaconnector.common.test.IntegerMessageConsumer;
import org.apache.camel.kafkaconnector.common.test.TestMessageConsumer;
import org.apache.camel.kafkaconnector.sjms2.clients.JMSClient;
import org.apache.camel.kafkaconnector.sjms2.common.SJMS2Common;
import org.apache.camel.test.infra.messaging.services.MessagingService;
import org.apache.camel.test.infra.messaging.services.MessagingServiceFactory;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.extension.RegisterExtension;
import static org.junit.jupiter.api.Assertions.assertEquals;
/**
* A simple test case that checks whether the timer produces the expected number of
* messages
*/
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
public class CamelSourceJMSITCase extends CamelSourceTestSupport {
@RegisterExtension
public static MessagingService jmsService = MessagingServiceFactory
.builder()
.addLocalMapping(SJMS2Common::createLocalService)
.build();
private String topicName;
private final int expect = 10;
private JMSClient jmsClient;
private Properties connectionProperties() {
Properties properties = new Properties();
properties.put("camel.component.sjms2.connection-factory", "#class:org.apache.qpid.jms.JmsConnectionFactory");
properties.put("camel.component.sjms2.connection-factory.remoteURI", jmsService.defaultEndpoint());
return properties;
}
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-sjms2-kafka-connector"};
}
@BeforeEach
public void setUp() {
topicName = getTopicForTest(this);
}
@BeforeAll
public void setupClient() {
jmsClient = JMSClient.newClient(jmsService.defaultEndpoint());
}
@Override
protected void produceTestData() {
JMSClient.produceMessages(jmsClient, SJMS2Common.DEFAULT_JMS_QUEUE, expect, "Test string message");
}
@Override
protected void verifyMessages(TestMessageConsumer<?> consumer) {
int received = consumer.consumedMessages().size();
assertEquals(received, expect, "Didn't process the expected amount of messages");
}
@Test
@Timeout(90)
public void testBasicSendReceive() throws ExecutionException, InterruptedException {
ConnectorPropertyFactory connectorPropertyFactory = CamelJMSPropertyFactory
.basic()
.withKafkaTopic(topicName)
.withDestinationName(SJMS2Common.DEFAULT_JMS_QUEUE)
.withConnectionProperties(connectionProperties());
runTest(connectorPropertyFactory, topicName, expect);
}
@Test
@Timeout(90)
public void testBasicSendReceiveUsingUrl() throws ExecutionException, InterruptedException {
ConnectorPropertyFactory connectorPropertyFactory = CamelJMSPropertyFactory
.basic()
.withConnectionProperties(connectionProperties())
.withKafkaTopic(topicName)
.withUrl(SJMS2Common.DEFAULT_JMS_QUEUE)
.buildUrl();
runTest(connectorPropertyFactory, topicName, expect);
}
@Test
@Timeout(90)
public void testIntSendReceive() throws ExecutionException, InterruptedException {
final String jmsQueueName = "testIntSendReceive";
ConnectorPropertyFactory connectorPropertyFactory = CamelJMSPropertyFactory
.basic()
.withKafkaTopic(topicName)
.withDestinationName(jmsQueueName)
.withConnectionProperties(connectionProperties());
KafkaClient<String, Integer> kafkaClient = new KafkaClient<>(getKafkaService().getBootstrapServers());
IntegerMessageConsumer consumer = new IntegerMessageConsumer(kafkaClient, topicName, expect);
runTest(connectorPropertyFactory, consumer, () -> JMSClient.produceMessages(jmsClient, jmsQueueName, expect));
}
}
| 9,252 |
0 | Create_ds/camel-kafka-connector/tests/itests-sjms2/src/test/java/org/apache/camel/kafkaconnector/sjms2 | Create_ds/camel-kafka-connector/tests/itests-sjms2/src/test/java/org/apache/camel/kafkaconnector/sjms2/source/CamelSourceJMSWithAggregation.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.sjms2.source;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.clients.kafka.KafkaClient;
import org.apache.camel.kafkaconnector.common.test.CamelSourceTestSupport;
import org.apache.camel.kafkaconnector.common.test.StringMessageConsumer;
import org.apache.camel.kafkaconnector.common.test.TestMessageConsumer;
import org.apache.camel.kafkaconnector.sjms2.clients.JMSClient;
import org.apache.camel.kafkaconnector.sjms2.common.SJMS2Common;
import org.apache.camel.test.infra.common.TestUtils;
import org.apache.camel.test.infra.messaging.services.MessagingService;
import org.apache.camel.test.infra.messaging.services.MessagingServiceFactory;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.extension.RegisterExtension;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
public class CamelSourceJMSWithAggregation extends CamelSourceTestSupport {
@RegisterExtension
public static MessagingService jmsService = MessagingServiceFactory
.builder()
.addLocalMapping(SJMS2Common::createLocalService)
.build();
private final int sentSize = 10;
private final int expect = 1;
private JMSClient jmsClient;
private String expectedMessage = "";
private String queueName;
private String topicName;
class GreedyConsumer extends StringMessageConsumer {
public GreedyConsumer(KafkaClient<String, String> kafkaClient, String topicName, int count) {
super(kafkaClient, topicName, count);
}
@Override
public void consumeMessages() {
int retries = 10;
do {
kafkaClient.consumeAvailable(super.topicName, super::checkRecord);
if (consumedMessages().size() == 0) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
break;
}
}
} while (consumedMessages().size() == 0);
}
}
private Properties connectionProperties() {
Properties properties = new Properties();
properties.put("camel.component.sjms2.connection-factory", "#class:org.apache.qpid.jms.JmsConnectionFactory");
properties.put("camel.component.sjms2.connection-factory.remoteURI", jmsService.defaultEndpoint());
return properties;
}
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-sjms2-kafka-connector"};
}
@BeforeAll
public void setupClient() {
jmsClient = JMSClient.newClient(jmsService.defaultEndpoint());
for (int i = 0; i < sentSize - 1; i++) {
expectedMessage += "hello;\n";
}
expectedMessage += "hello;";
}
@BeforeEach
public void setUp() {
topicName = getTopicForTest(this);
queueName = SJMS2Common.DEFAULT_JMS_QUEUE + "." + TestUtils.randomWithRange(1, 100);
}
@Override
protected void produceTestData() {
JMSClient.produceMessages(jmsClient, queueName, sentSize,
CamelSourceJMSWithAggregation::textToSend);
}
@Override
protected void verifyMessages(TestMessageConsumer<?> consumer) {
int received = consumer.consumedMessages().size();
Object receivedObject = consumer.consumedMessages().get(0).value();
if (!(receivedObject instanceof String)) {
fail("Unexpected message type");
}
String receivedMessage = (String) receivedObject;
assertEquals(expect, received, "Didn't process the expected amount of messages");
assertEquals(expectedMessage, receivedMessage, "The messages don't match");
}
private static String textToSend(Integer i) {
return "hello;";
}
@Test
@Timeout(90)
public void testBasicSendReceive() throws ExecutionException, InterruptedException {
ConnectorPropertyFactory connectorPropertyFactory = CamelJMSPropertyFactory
.basic()
.withKafkaTopic(topicName)
.withDestinationName(queueName)
.withConnectionProperties(connectionProperties())
.withAggregate("org.apache.camel.kafkaconnector.aggregator.StringAggregator", sentSize,
1000);
KafkaClient<String, String> kafkaClient = new KafkaClient<>(getKafkaService().getBootstrapServers());
GreedyConsumer greedyConsumer = new GreedyConsumer(kafkaClient, topicName, expect);
runTestBlocking(connectorPropertyFactory, greedyConsumer);
}
}
| 9,253 |
0 | Create_ds/camel-kafka-connector/tests/itests-sjms2/src/test/java/org/apache/camel/kafkaconnector/sjms2 | Create_ds/camel-kafka-connector/tests/itests-sjms2/src/test/java/org/apache/camel/kafkaconnector/sjms2/source/CamelJMSPropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.sjms2.source;
import java.util.Properties;
import org.apache.camel.kafkaconnector.common.EndpointUrlBuilder;
import org.apache.camel.kafkaconnector.common.SourceConnectorPropertyFactory;
/**
* Creates the set of properties used by a Camel JMS Sink Connector
*/
final class CamelJMSPropertyFactory extends SourceConnectorPropertyFactory<CamelJMSPropertyFactory> {
private CamelJMSPropertyFactory() {
}
public CamelJMSPropertyFactory withDestinationName(String destinationName) {
return setProperty("camel.source.path.destinationName", destinationName);
}
public static CamelJMSPropertyFactory basic() {
return new CamelJMSPropertyFactory()
.withName("CamelJMSSourceConnector")
.withTasksMax(1)
.withConnectorClass("org.apache.camel.kafkaconnector.sjms2.CamelSjms2SourceConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter");
}
public EndpointUrlBuilder<CamelJMSPropertyFactory> withUrl(String destinationName) {
String url = String.format("sjms2://%s", destinationName);
return new EndpointUrlBuilder<>(this::withSourceUrl, url);
}
public CamelJMSPropertyFactory withConnectionProperties(Properties connectionProperties) {
return merge(connectionProperties);
}
}
| 9,254 |
0 | Create_ds/camel-kafka-connector/tests/itests-sjms2/src/test/java/org/apache/camel/kafkaconnector/sjms2 | Create_ds/camel-kafka-connector/tests/itests-sjms2/src/test/java/org/apache/camel/kafkaconnector/sjms2/sink/CamelSinkJMSStartupITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.sjms2.sink;
import java.time.Duration;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import org.apache.camel.kafkaconnector.common.AbstractKafkaTest;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.clients.kafka.KafkaClient;
import org.apache.camel.kafkaconnector.sjms2.common.SJMS2Common;
import org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.fail;
/**
* A simple test to make sure we are not losing or hiding exception data on errors
*/
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
public class CamelSinkJMSStartupITCase extends AbstractKafkaTest {
private static final Logger LOG = LoggerFactory.getLogger(CamelSinkJMSStartupITCase.class);
private boolean running;
private String trace;
private String topicName;
private Properties connectionProperties() {
Properties properties = new Properties();
properties.put("camel.component.sjms2.connection-factory", "#class:org.apache.qpid.jms.JmsConnectionFactory");
properties.put("camel.component.sjms2.connection-factory.remoteURI", "amqp://invalid");
return properties;
}
@BeforeEach
void setUp() {
topicName = getTopicForTest(this);
}
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-sjms2-kafka-connector"};
}
private void connectorStateCheck(ConnectorStateInfo connectorStateInfo) {
LOG.debug("Checking state for {}", connectorStateInfo.name());
running = connectorStateInfo.tasks().stream().allMatch(t -> isRunning(t));
}
private boolean isRunning(ConnectorStateInfo.TaskState t) {
boolean isRunningState = t.state().equals("RUNNING");
if (!isRunningState) {
trace = t.trace();
}
return isRunningState;
}
private void runTest(ConnectorPropertyFactory connectorPropertyFactory) throws ExecutionException, InterruptedException {
connectorPropertyFactory.log();
getKafkaConnectService().initializeConnector(connectorPropertyFactory);
KafkaClient<String, String> kafkaClient = new KafkaClient<>(getKafkaService().getBootstrapServers());
kafkaClient.produce(topicName, "Sink test message ");
}
private void checkThatFailed() throws InterruptedException {
int i = 25;
do {
kafkaConnectService.connectorStateCheck(this::connectorStateCheck);
i--;
if (i > 0 && running) {
Thread.sleep(Duration.ofSeconds(1).toMillis());
}
} while (i > 0 && running);
assertFalse(running, "The connector should be in a failed state");
LOG.trace(trace);
}
@Test
@Timeout(30)
public void testStartup() {
try {
Properties brokenProp = connectionProperties();
ConnectorPropertyFactory connectorPropertyFactory = CamelJMSPropertyFactory
.basic()
.withTopics(topicName)
.withConnectionProperties(brokenProp)
.withDestinationName(SJMS2Common.DEFAULT_JMS_QUEUE)
.withDeadLetterQueueTopicName("dlq-sink-topic");
// Inject an invalid configuration and check that fails
runTest(connectorPropertyFactory);
checkThatFailed();
} catch (Exception e) {
LOG.error("JMS test failed: {}", e.getMessage(), e);
fail(e.getMessage());
}
}
}
| 9,255 |
0 | Create_ds/camel-kafka-connector/tests/itests-sjms2/src/test/java/org/apache/camel/kafkaconnector/sjms2 | Create_ds/camel-kafka-connector/tests/itests-sjms2/src/test/java/org/apache/camel/kafkaconnector/sjms2/sink/CamelSinkIdempotentJMSITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.sjms2.sink;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import jakarta.jms.JMSException;
import jakarta.jms.Message;
import jakarta.jms.MessageConsumer;
import jakarta.jms.TextMessage;
import org.apache.camel.kafkaconnector.CamelSinkTask;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.clients.kafka.KafkaClient;
import org.apache.camel.kafkaconnector.common.test.CamelSinkTestSupport;
import org.apache.camel.kafkaconnector.sjms2.clients.JMSClient;
import org.apache.camel.kafkaconnector.sjms2.common.SJMS2Common;
import org.apache.camel.test.infra.common.TestUtils;
import org.apache.camel.test.infra.messaging.services.MessagingService;
import org.apache.camel.test.infra.messaging.services.MessagingServiceFactory;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
/**
* Integration tests for the JMS sink using idempotent features
*/
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
public class CamelSinkIdempotentJMSITCase extends CamelSinkTestSupport {
@RegisterExtension
public static MessagingService jmsService = MessagingServiceFactory
.builder()
.addLocalMapping(SJMS2Common::createLocalService)
.build();
private static final Logger LOG = LoggerFactory.getLogger(CamelSinkIdempotentJMSITCase.class);
private String topic;
private String destinationName;
private int received;
private final int expect = 10;
private Properties connectionProperties() {
Properties properties = new Properties();
properties.put("camel.component.sjms2.connection-factory", "#class:org.apache.qpid.jms.JmsConnectionFactory");
properties.put("camel.component.sjms2.connection-factory.remoteURI", jmsService.defaultEndpoint());
return properties;
}
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-sjms2-kafka-connector"};
}
@BeforeEach
public void setUp() {
LOG.info("JMS service running at {}", jmsService.defaultEndpoint());
received = 0;
topic = getTopicForTest(this);
destinationName = SJMS2Common.DEFAULT_JMS_QUEUE + "-" + TestUtils.randomWithRange(0, 100);
}
@Override
protected void consumeMessages(CountDownLatch latch) {
JMSClient jmsClient = null;
try {
jmsClient = JMSClient.newClient(jmsService.defaultEndpoint());
jmsClient.start();
try (MessageConsumer consumer = jmsClient.createConsumer(destinationName)) {
// number of retries until stale
int retries = 10;
while (retries > 0) {
LOG.debug("Waiting for JMS messages (received {} of {} / retry {})", received, expect, retries);
jmsClient.receive(consumer, this::checkRecord, 1000);
// Once staled for 'retries', then it means no more data to receive (hopefully)
if (expect == received) {
retries--;
} else {
retries = 10;
}
}
}
} catch (InterruptedException e) {
LOG.warn("Interrupted, stopping ...");
Thread.currentThread().interrupt();
} catch (Exception e) {
LOG.error("JMS test failed: {}", e.getMessage(), e);
fail(e.getMessage());
} finally {
latch.countDown();
if (jmsClient != null) {
jmsClient.stop();
}
}
}
@Override
protected void verifyMessages(CountDownLatch latch) throws InterruptedException {
if (latch.await(25, TimeUnit.SECONDS)) {
assertEquals(received, expect, "Didn't process the expected amount of messages: " + received
+ " != " + expect);
} else {
fail("Failed to receive the messages within the specified time");
}
}
private boolean checkRecord(Message jmsMessage) {
if (jmsMessage instanceof TextMessage) {
try {
LOG.debug("Received: {}", ((TextMessage) jmsMessage).getText());
received++;
return true;
} catch (JMSException e) {
LOG.error("Failed to read message: {}", e.getMessage(), e);
fail("Failed to read message: " + e.getMessage());
}
}
return false;
}
private void produceMessagesNoProperties() {
try {
KafkaClient<String, String> kafkaClient = new KafkaClient<>(getKafkaService().getBootstrapServers());
for (int i = 0; i < expect; i++) {
LOG.debug("Sending message 1/2");
kafkaClient.produce(topic, "Sink test message " + i);
LOG.debug("Sending message 2/2");
kafkaClient.produce(topic, "Sink test message " + i);
}
} catch (Exception e) {
fail(e.getMessage());
}
}
private void produceMessagesWithProperties() {
try {
KafkaClient<String, String> kafkaClient = new KafkaClient<>(getKafkaService().getBootstrapServers());
for (int i = 0; i < expect; i++) {
Map<String, String> headers = new HashMap<>();
int randomNumber = TestUtils.randomWithRange(1, 1000);
headers.put(CamelSinkTask.HEADER_CAMEL_PREFIX + "MessageNumber", String.valueOf(i));
kafkaClient.produce(topic, "Sink test message " + randomNumber, headers);
kafkaClient.produce(topic, "Sink test message " + randomNumber + 1, headers);
}
} catch (Exception e) {
fail(e.getMessage());
}
}
@Test
@Timeout(90)
public void testIdempotentBodySendReceive() {
try {
ConnectorPropertyFactory connectorPropertyFactory = CamelJMSPropertyFactory
.basic()
.withTopics(topic)
.withConnectionProperties(connectionProperties())
.withDestinationName(destinationName)
.withIdempotency()
.withRepositoryType("memory")
.withExpressionType("body")
.end();
runTest(connectorPropertyFactory, this::produceMessagesNoProperties);
} catch (Exception e) {
LOG.error("JMS test failed: {}", e.getMessage(), e);
fail(e.getMessage());
}
}
@Test
@Timeout(90)
public void testIdempotentHeaderSendReceive() {
try {
ConnectorPropertyFactory connectorPropertyFactory = CamelJMSPropertyFactory
.basic()
.withTopics(topic)
.withConnectionProperties(connectionProperties())
.withDestinationName(destinationName)
.withIdempotency()
.withRepositoryType("memory")
.withExpressionType("header")
.withExpressionHeader("MessageNumber")
.end();
runTest(connectorPropertyFactory, this::produceMessagesWithProperties);
} catch (Exception e) {
LOG.error("JMS test failed: {}", e.getMessage(), e);
fail(e.getMessage());
}
}
}
| 9,256 |
0 | Create_ds/camel-kafka-connector/tests/itests-sjms2/src/test/java/org/apache/camel/kafkaconnector/sjms2 | Create_ds/camel-kafka-connector/tests/itests-sjms2/src/test/java/org/apache/camel/kafkaconnector/sjms2/sink/CamelSinkWithDLQJMSITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.sjms2.sink;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import org.apache.camel.kafkaconnector.common.AbstractKafkaTest;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.clients.kafka.KafkaClient;
import org.apache.camel.kafkaconnector.sjms2.common.SJMS2Common;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
/**
* Integration tests for the JMS sink with a DLQ configuration. This test forces a failure in the sink connector to
* ensure that the failed records are added to the DLQ configured in Kafka.
*/
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
public class CamelSinkWithDLQJMSITCase extends AbstractKafkaTest {
private static final Logger LOG = LoggerFactory.getLogger(CamelSinkWithDLQJMSITCase.class);
private final int expect = 10;
private int errors;
private final int expectedErrors = 1;
private String topicName;
private Properties connectionProperties() {
Properties properties = new Properties();
properties.put("camel.component.sjms2.connection-factory", "#class:org.apache.qpid.jms.JmsConnectionFactory");
properties.put("camel.component.sjms2.connection-factory.remoteURI", "invalid");
return properties;
}
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-sjms2-kafka-connector"};
}
@BeforeEach
public void setUp() {
errors = 0;
topicName = getTopicForTest(this);
}
private <T> boolean checkDqlRecord(ConsumerRecord<String, T> record) {
LOG.debug("Received: {}", record.value());
errors++;
if (errors >= expectedErrors) {
return false;
}
return true;
}
private void runTest(ConnectorPropertyFactory connectorPropertyFactory) throws ExecutionException, InterruptedException {
connectorPropertyFactory.log();
getKafkaConnectService().initializeConnector(connectorPropertyFactory);
LOG.debug("Creating the consumer ...");
KafkaClient<String, String> kafkaClient = new KafkaClient<>(getKafkaService().getBootstrapServers());
for (int i = 0; i < expect; i++) {
kafkaClient.produce(topicName, "Sink test message " + i);
}
LOG.debug("Created the consumer ... About to receive messages");
}
@Test
@Timeout(10)
public void testSendReceiveWithError() {
try {
Properties brokenProp = connectionProperties();
brokenProp.put("camel.component.sjms2.connection-factory.remoteURI", "invalid");
ConnectorPropertyFactory connectorPropertyFactory = CamelJMSPropertyFactory
.basic()
.withTopics(topicName)
.withConnectionProperties(brokenProp)
.withDestinationName(SJMS2Common.DEFAULT_JMS_QUEUE)
.withDeadLetterQueueTopicName("dlq-sink-topic");
runTest(connectorPropertyFactory);
KafkaClient<String, Integer> kafkaClient = new KafkaClient<>(getKafkaService().getBootstrapServers());
kafkaClient.consume("dlq-sink-topic", this::checkDqlRecord);
assertEquals(expectedErrors, errors, "Didn't process the expected amount of messages");
} catch (Exception e) {
LOG.error("JMS test failed: {}", e.getMessage(), e);
fail(e.getMessage());
}
}
}
| 9,257 |
0 | Create_ds/camel-kafka-connector/tests/itests-sjms2/src/test/java/org/apache/camel/kafkaconnector/sjms2 | Create_ds/camel-kafka-connector/tests/itests-sjms2/src/test/java/org/apache/camel/kafkaconnector/sjms2/sink/CamelSinkJMSITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.sjms2.sink;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import jakarta.jms.JMSException;
import jakarta.jms.Message;
import jakarta.jms.MessageConsumer;
import jakarta.jms.TextMessage;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.test.CamelSinkTestSupport;
import org.apache.camel.kafkaconnector.sjms2.clients.JMSClient;
import org.apache.camel.kafkaconnector.sjms2.common.SJMS2Common;
import org.apache.camel.test.infra.messaging.services.MessagingService;
import org.apache.camel.test.infra.messaging.services.MessagingServiceFactory;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
/**
* Integration tests for the JMS sink
*/
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
public class CamelSinkJMSITCase extends CamelSinkTestSupport {
@RegisterExtension
public static MessagingService jmsService = MessagingServiceFactory
.builder()
.addLocalMapping(SJMS2Common::createLocalService)
.build();
private static final Logger LOG = LoggerFactory.getLogger(CamelSinkJMSITCase.class);
private String topicName;
private int received;
private final int expect = 10;
private Properties connectionProperties() {
Properties properties = new Properties();
properties.put("camel.component.sjms2.connection-factory", "#class:org.apache.qpid.jms.JmsConnectionFactory");
properties.put("camel.component.sjms2.connection-factory.remoteURI", jmsService.defaultEndpoint());
return properties;
}
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-sjms2-kafka-connector"};
}
@BeforeEach
public void setUp() {
LOG.info("JMS service running at {}", jmsService.defaultEndpoint());
received = 0;
topicName = getTopicForTest(this);
}
@Override
protected void verifyMessages(CountDownLatch latch) throws InterruptedException {
if (latch.await(35, TimeUnit.SECONDS)) {
assertEquals(received, expect, "Didn't process the expected amount of messages: " + received + " != " + expect);
} else {
fail("Failed to receive the messages within the specified time");
}
}
private boolean checkRecord(Message jmsMessage) {
if (jmsMessage instanceof TextMessage) {
try {
LOG.debug("Received: {}", ((TextMessage) jmsMessage).getText());
received++;
if (received == expect) {
LOG.debug("All messages were received");
return false;
}
return true;
} catch (JMSException e) {
LOG.error("Failed to read message: {}", e.getMessage(), e);
fail("Failed to read message: " + e.getMessage());
}
}
return false;
}
@Override
protected void consumeMessages(CountDownLatch latch) {
JMSClient jmsClient = null;
try {
jmsClient = JMSClient.newClient(jmsService.defaultEndpoint());
jmsClient.start();
try (MessageConsumer consumer = jmsClient.createConsumer(SJMS2Common.DEFAULT_JMS_QUEUE)) {
jmsClient.receive(consumer, this::checkRecord);
}
} catch (Exception e) {
LOG.error("JMS test failed: {}", e.getMessage(), e);
fail(e.getMessage());
} finally {
latch.countDown();
if (jmsClient != null) {
jmsClient.stop();
}
}
}
@Test
@Timeout(90)
public void testBasicSendReceive() throws Exception {
ConnectorPropertyFactory connectorPropertyFactory = CamelJMSPropertyFactory
.basic()
.withTopics(topicName)
.withConnectionProperties(connectionProperties())
.withDestinationName(SJMS2Common.DEFAULT_JMS_QUEUE);
runTest(connectorPropertyFactory, topicName, expect);
}
@Test
@Timeout(90)
public void testBasicSendReceiveUsingUrl() throws Exception {
ConnectorPropertyFactory connectorPropertyFactory = CamelJMSPropertyFactory
.basic()
.withTopics(topicName)
.withConnectionProperties(connectionProperties())
.withUrl(SJMS2Common.DEFAULT_JMS_QUEUE)
.buildUrl();
runTest(connectorPropertyFactory, topicName, expect);
}
}
| 9,258 |
0 | Create_ds/camel-kafka-connector/tests/itests-sjms2/src/test/java/org/apache/camel/kafkaconnector/sjms2 | Create_ds/camel-kafka-connector/tests/itests-sjms2/src/test/java/org/apache/camel/kafkaconnector/sjms2/sink/CamelJMSPropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.sjms2.sink;
import java.util.Properties;
import org.apache.camel.kafkaconnector.common.EndpointUrlBuilder;
import org.apache.camel.kafkaconnector.common.SinkConnectorPropertyFactory;
/**
* Creates the set of properties used by a Camel JMS Sink Connector
*/
final class CamelJMSPropertyFactory extends SinkConnectorPropertyFactory<CamelJMSPropertyFactory> {
private CamelJMSPropertyFactory() {
}
public CamelJMSPropertyFactory withDestinationName(String destinationName) {
return setProperty("camel.sink.path.destinationName", destinationName);
}
public CamelJMSPropertyFactory withConnectionProperties(Properties connectionProperties) {
return merge(connectionProperties);
}
public EndpointUrlBuilder<CamelJMSPropertyFactory> withUrl(String destinationName) {
String queueUrl = String.format("sjms2:%s", destinationName);
return new EndpointUrlBuilder<>(this::withSinkUrl, queueUrl);
}
public static CamelJMSPropertyFactory basic() {
return new CamelJMSPropertyFactory()
.withName("CamelJmsSinkConnector")
.withTasksMax(1)
.withConnectorClass("org.apache.camel.kafkaconnector.sjms2.CamelSjms2SinkConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter");
}
}
| 9,259 |
0 | Create_ds/camel-kafka-connector/tests/itests-sjms2/src/test/java/org/apache/camel/kafkaconnector/sjms2 | Create_ds/camel-kafka-connector/tests/itests-sjms2/src/test/java/org/apache/camel/kafkaconnector/sjms2/common/SJMS2Common.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.sjms2.common;
import org.apache.camel.test.infra.dispatch.router.services.DispatchRouterContainer;
import org.apache.camel.test.infra.messaging.services.MessagingLocalContainerService;
public final class SJMS2Common {
/**
* The default JMS queue name used during the tests
*/
public static final String DEFAULT_JMS_QUEUE = "ckc.queue";
private SJMS2Common() {
}
public static MessagingLocalContainerService<DispatchRouterContainer> createLocalService() {
DispatchRouterContainer container = new DispatchRouterContainer();
return new MessagingLocalContainerService<>(container, c -> container.defaultEndpoint());
}
}
| 9,260 |
0 | Create_ds/camel-kafka-connector/tests/itests-netty/src/test/java/org/apache/camel/kafkaconnector/netty | Create_ds/camel-kafka-connector/tests/itests-netty/src/test/java/org/apache/camel/kafkaconnector/netty/source/CamelSourceNettyITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.netty.source;
import java.io.PrintWriter;
import java.net.Socket;
import java.util.concurrent.ExecutionException;
import org.apache.camel.kafkaconnector.common.test.CamelSourceTestSupport;
import org.apache.camel.kafkaconnector.common.test.TestMessageConsumer;
import org.apache.camel.kafkaconnector.common.utils.NetworkUtils;
import org.apache.camel.test.infra.common.TestUtils;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
public class CamelSourceNettyITCase extends CamelSourceTestSupport {
private final String host = NetworkUtils.getHostname();
private final int port = NetworkUtils.getFreePort();
private final int expect = 1;
private String topicName;
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-netty-kafka-connector"};
}
@BeforeEach
public void setUp() {
topicName = getTopicForTest(this);
}
@Override
protected void produceTestData() {
TestUtils.waitFor(() -> NetworkUtils.portIsOpen(host, port));
sendMessage();
}
void sendMessage() {
try (Socket s = new Socket(NetworkUtils.getHostname(), port);
PrintWriter out = new PrintWriter(s.getOutputStream())) {
out.print("Hello CKC!");
out.flush();
} catch (Exception e) {
fail(e.getMessage(), e);
}
}
@Override
protected void verifyMessages(TestMessageConsumer<?> consumer) {
int received = consumer.consumedMessages().size();
Object receivedObject = consumer.consumedMessages().get(0).value();
assertEquals(expect, received, "Did not receive as many messages as expected");
assertEquals("Hello CKC!", receivedObject, "Received message content differed");
}
@Test
@Timeout(30)
public void testLaunchConnector() throws ExecutionException, InterruptedException {
CamelNettyPropertyFactory connectorPropertyFactory = CamelNettyPropertyFactory
.basic()
.withKafkaTopic(topicName)
.withProtocol("tcp")
.withHost(host)
.withPort(port)
// one-way as test client doesn't receive response
.withSync(false);
runTestBlocking(connectorPropertyFactory, topicName, expect);
}
@Test
@Timeout(30)
public void testLaunchConnectorUsingUrl() throws ExecutionException, InterruptedException {
CamelNettyPropertyFactory connectorPropertyFactory = CamelNettyPropertyFactory
.basic()
.withKafkaTopic(topicName)
.withUrl("tcp", host, port)
// one-way as test client doesn't receive response
.append("sync", "false")
.buildUrl();
runTestBlocking(connectorPropertyFactory, topicName, expect);
}
}
| 9,261 |
0 | Create_ds/camel-kafka-connector/tests/itests-netty/src/test/java/org/apache/camel/kafkaconnector/netty | Create_ds/camel-kafka-connector/tests/itests-netty/src/test/java/org/apache/camel/kafkaconnector/netty/source/CamelNettyPropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.netty.source;
import org.apache.camel.kafkaconnector.common.EndpointUrlBuilder;
import org.apache.camel.kafkaconnector.common.SourceConnectorPropertyFactory;
final class CamelNettyPropertyFactory extends SourceConnectorPropertyFactory<CamelNettyPropertyFactory> {
private CamelNettyPropertyFactory() {
}
public CamelNettyPropertyFactory withProtocol(String value) {
return setProperty("camel.source.path.protocol", value);
}
public CamelNettyPropertyFactory withHost(String value) {
return setProperty("camel.source.path.host", value);
}
public CamelNettyPropertyFactory withPort(int value) {
return setProperty("camel.source.path.port", value);
}
public CamelNettyPropertyFactory withSync(boolean value) {
return setProperty("camel.source.endpoint.sync", value);
}
public EndpointUrlBuilder<CamelNettyPropertyFactory> withUrl(String protocol, String host, int port) {
String url = String.format("netty:%s://%s:%s", protocol, host, port);
return new EndpointUrlBuilder<>(this::withSourceUrl, url);
}
public static CamelNettyPropertyFactory basic() {
return new CamelNettyPropertyFactory()
.withName("CamelNettySourceConnector")
.withTasksMax(1)
.withConnectorClass("org.apache.camel.kafkaconnector.netty.CamelNettySourceConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter");
}
}
| 9,262 |
0 | Create_ds/camel-kafka-connector/tests/itests-netty/src/test/java/org/apache/camel/kafkaconnector/netty | Create_ds/camel-kafka-connector/tests/itests-netty/src/test/java/org/apache/camel/kafkaconnector/netty/sink/CamelNettyPropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.netty.sink;
import org.apache.camel.kafkaconnector.common.EndpointUrlBuilder;
import org.apache.camel.kafkaconnector.common.SinkConnectorPropertyFactory;
final class CamelNettyPropertyFactory extends SinkConnectorPropertyFactory<CamelNettyPropertyFactory> {
private CamelNettyPropertyFactory() {
}
public CamelNettyPropertyFactory withProtocol(String value) {
return setProperty("camel.sink.path.protocol", value);
}
public CamelNettyPropertyFactory withHost(String value) {
return setProperty("camel.sink.path.host", value);
}
public CamelNettyPropertyFactory withPort(int value) {
return setProperty("camel.sink.path.port", value);
}
public CamelNettyPropertyFactory withDisconnect(boolean value) {
return setProperty("camel.sink.endpoint.disconnect", value);
}
public CamelNettyPropertyFactory withSync(boolean value) {
return setProperty("camel.sink.endpoint.sync", value);
}
public EndpointUrlBuilder<CamelNettyPropertyFactory> withUrl(String protocol, String host, int port) {
String url = String.format("netty:%s://%s:%s", protocol, host, port);
return new EndpointUrlBuilder<>(this::withSinkUrl, url);
}
public static CamelNettyPropertyFactory basic() {
return new CamelNettyPropertyFactory()
.withName("CamelNettySinkConnector")
.withTasksMax(1)
.withConnectorClass("org.apache.camel.kafkaconnector.netty.CamelNettySinkConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter");
}
}
| 9,263 |
0 | Create_ds/camel-kafka-connector/tests/itests-netty/src/test/java/org/apache/camel/kafkaconnector/netty | Create_ds/camel-kafka-connector/tests/itests-netty/src/test/java/org/apache/camel/kafkaconnector/netty/sink/CamelSinkNettyITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.netty.sink;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.ServerSocket;
import java.net.Socket;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.test.CamelSinkTestSupport;
import org.apache.camel.kafkaconnector.common.utils.NetworkUtils;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
public class CamelSinkNettyITCase extends CamelSinkTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(CamelSinkNettyITCase.class);
private final int port = NetworkUtils.getFreePort();
private String topicName;
private final int expect = 1;
private volatile String received;
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-netty-kafka-connector"};
}
@BeforeEach
public void setUp() {
topicName = getTopicForTest(this);
received = null;
}
@Override
protected void consumeMessages(CountDownLatch latch) {
try (ServerSocket serverSocket = new ServerSocket(port);
Socket socket = serverSocket.accept();
InputStream is = socket.getInputStream();
BufferedReader reader = new BufferedReader(new InputStreamReader(is))) {
received = reader.readLine();
LOG.debug("Received: {}", received);
} catch (IOException e) {
LOG.error("Unable to receive messages: {}", e.getMessage(), e);
} finally {
latch.countDown();
}
}
@Override
protected void verifyMessages(CountDownLatch latch) throws InterruptedException {
String expected = "Sink test message 0";
if (latch.await(30, TimeUnit.SECONDS)) {
assertEquals(expected, received, "Received message content differed");
} else {
fail("Failed to receive the messages within the specified time");
}
}
@Test
@Timeout(30)
public void testBasicSendReceive() throws Exception {
ConnectorPropertyFactory connectorPropertyFactory = CamelNettyPropertyFactory.basic()
.withTopics(topicName)
.withProtocol("tcp")
.withHost(NetworkUtils.getHostname())
.withPort(port)
// disconnect so that it won't keep mock server socket forever
.withDisconnect(true)
// one-way as mock server doesn't send replies
.withSync(false);
runTest(connectorPropertyFactory, topicName, expect);
}
@Test
@Timeout(30)
public void testBasicSendReceiveUsingUrl() throws Exception {
ConnectorPropertyFactory connectorPropertyFactory = CamelNettyPropertyFactory.basic()
.withTopics(topicName)
.withUrl("tcp", NetworkUtils.getHostname(), port)
// disconnect so that it won't keep mock server socket forever
.append("disconnect", "true")
// one-way as mock server doesn't send replies
.append("sync", "false")
.buildUrl();
runTest(connectorPropertyFactory, topicName, expect);
}
}
| 9,264 |
0 | Create_ds/camel-kafka-connector/tests/itests-cassandra/src/test/java/org/apache/camel/kafkaconnector/cassandra | Create_ds/camel-kafka-connector/tests/itests-cassandra/src/test/java/org/apache/camel/kafkaconnector/cassandra/clients/CassandraClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.cassandra.clients;
import java.net.InetSocketAddress;
import com.datastax.oss.driver.api.core.CqlSession;
import org.apache.camel.kafkaconnector.cassandra.clients.dao.TestDataDao;
/**
* A simple client for Cassandra for testing purposes
*/
public class CassandraClient {
private CqlSession session;
public CassandraClient(String host, int port) {
InetSocketAddress socketAddress = new InetSocketAddress(host, port);
session = CqlSession.builder()
.addContactPoint(socketAddress)
.withLocalDatacenter("datacenter1")
.build();
}
public TestDataDao newTestDataDao() {
return new TestDataDao(this.session);
}
}
| 9,265 |
0 | Create_ds/camel-kafka-connector/tests/itests-cassandra/src/test/java/org/apache/camel/kafkaconnector/cassandra/clients | Create_ds/camel-kafka-connector/tests/itests-cassandra/src/test/java/org/apache/camel/kafkaconnector/cassandra/clients/dao/TestResultSetConversionStrategy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.cassandra.clients.dao;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import com.datastax.oss.driver.api.core.cql.ResultSet;
import com.datastax.oss.driver.api.core.cql.Row;
import org.apache.camel.component.cassandra.ResultSetConversionStrategy;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class TestResultSetConversionStrategy implements ResultSetConversionStrategy {
private static final Logger LOG = LoggerFactory.getLogger(TestResultSetConversionStrategy.class);
@Override
public Object getBody(ResultSet resultSet) {
List<String> ret = new ArrayList<>();
Iterator<Row> iterator = resultSet.iterator();
while (iterator.hasNext()) {
Row row = iterator.next();
String data = row.getString("text");
ret.add(data);
LOG.info("Retrieved data: {}", data);
}
return ret;
}
}
| 9,266 |
0 | Create_ds/camel-kafka-connector/tests/itests-cassandra/src/test/java/org/apache/camel/kafkaconnector/cassandra/clients | Create_ds/camel-kafka-connector/tests/itests-cassandra/src/test/java/org/apache/camel/kafkaconnector/cassandra/clients/dao/TestDataDao.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.cassandra.clients.dao;
import java.time.Duration;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.function.Consumer;
import com.datastax.oss.driver.api.core.CqlSession;
import com.datastax.oss.driver.api.core.cql.ResultSet;
import com.datastax.oss.driver.api.core.cql.Row;
import com.datastax.oss.driver.api.core.cql.SimpleStatement;
import com.datastax.oss.driver.api.core.type.DataTypes;
import com.datastax.oss.driver.api.querybuilder.SchemaBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class TestDataDao {
public static final String KEY_SPACE = "ckc_ks";
public static final String TABLE_NAME = "test_data";
private static final Logger LOG = LoggerFactory.getLogger(TestDataDao.class);
private final CqlSession session;
public TestDataDao(CqlSession session) {
this.session = session;
}
public void createKeySpace() {
Map<String, Object> replication = new HashMap<>();
replication.put("class", "SimpleStrategy");
replication.put("replication_factor", 3);
String statement = SchemaBuilder.createKeyspace(KEY_SPACE)
.ifNotExists()
.withReplicationOptions(replication)
.asCql();
LOG.info("Executing {}", statement);
ResultSet rs = session.execute(statement);
if (!rs.wasApplied()) {
LOG.warn("The create key space statement did not execute");
}
}
public void useKeySpace() {
// Use String.format because "Bind variables cannot be used for keyspace names"
String statement = String.format("USE %s", KEY_SPACE);
session.execute(statement);
}
public void createTable() {
SimpleStatement statement = SchemaBuilder.createTable(TABLE_NAME)
.withPartitionKey("id", DataTypes.TIMEUUID)
.withClusteringColumn("text", DataTypes.TEXT)
.builder()
.setTimeout(Duration.ofSeconds(10)).build();
LOG.info("Executing create table {}", statement);
ResultSet rs = session.execute(statement);
if (!rs.wasApplied()) {
LOG.warn("The create table statement did not execute");
}
}
public void dropTable() {
String statement = SchemaBuilder.dropTable(TABLE_NAME)
.asCql();
LOG.info("Executing drop table {}", statement);
ResultSet rs = session.execute(statement);
if (!rs.wasApplied()) {
LOG.warn("The drop table statement did not execute");
}
}
public boolean hasEnoughData(long expected) {
ResultSet rs = session.execute("select count(*) from test_data");
if (rs == null) {
return false;
}
List<Row> all = rs.all();
if (all == null || all.size() == 0) {
return false;
}
long count = all.get(0).getLong("count");
return count == expected;
}
public String getInsertStatement() {
return "insert into test_data(id, text) values (now(), ?)";
}
public String getSelectStatement() {
return "select text from test_data";
}
public void getData(Consumer<String> consumer) {
ResultSet rs = session.execute("select * from test_data");
if (rs != null) {
Iterator<Row> iterator = rs.iterator();
while (iterator.hasNext()) {
Row row = iterator.next();
String data = row.getString("text");
LOG.info("Retrieved data: {}", data);
consumer.accept(data);
}
} else {
LOG.warn("No records were returned");
}
}
public void insert(String text) {
session.execute(getInsertStatement(), text);
}
}
| 9,267 |
0 | Create_ds/camel-kafka-connector/tests/itests-cassandra/src/test/java/org/apache/camel/kafkaconnector/cassandra | Create_ds/camel-kafka-connector/tests/itests-cassandra/src/test/java/org/apache/camel/kafkaconnector/cassandra/source/CamelCassandraPropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.cassandra.source;
import org.apache.camel.kafkaconnector.common.SourceConnectorPropertyFactory;
final class CamelCassandraPropertyFactory extends SourceConnectorPropertyFactory<CamelCassandraPropertyFactory> {
private CamelCassandraPropertyFactory() {
}
public CamelCassandraPropertyFactory withQuery(String query) {
return setProperty("camel.kamelet.cassandra-source.query", query);
}
public CamelCassandraPropertyFactory withHosts(String hosts) {
return setProperty("camel.kamelet.cassandra-source.connectionHost", hosts);
}
public CamelCassandraPropertyFactory withPort(int port) {
return withPort(Integer.toString(port));
}
public CamelCassandraPropertyFactory withPort(String port) {
return setProperty("camel.kamelet.cassandra-source.connectionPort", port);
}
public CamelCassandraPropertyFactory withKeySpace(String value) {
return setProperty("camel.kamelet.cassandra-source.keyspace", value);
}
public CamelCassandraPropertyFactory withResultSetConversionStrategy(String value) {
return setProperty("camel.endpoint.cql.resultSetConversionStrategy", value);
}
public static CamelCassandraPropertyFactory basic() {
return new CamelCassandraPropertyFactory()
.withName("CamelCassandraSourceConnector")
.withTasksMax(1)
.withConnectorClass("org.apache.camel.kafkaconnector.cassandrasource.CamelCassandrasourceSourceConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter")
.setProperty("camel.kamelet.cassandra-source.consistencyLevel", "ONE")
.setProperty("camel.component.kamelet.location", "kamelets");
}
}
| 9,268 |
0 | Create_ds/camel-kafka-connector/tests/itests-cassandra/src/test/java/org/apache/camel/kafkaconnector/cassandra | Create_ds/camel-kafka-connector/tests/itests-cassandra/src/test/java/org/apache/camel/kafkaconnector/cassandra/source/CamelSourceCassandraITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.cassandra.source;
import java.util.concurrent.ExecutionException;
import org.apache.camel.kafkaconnector.cassandra.clients.CassandraClient;
import org.apache.camel.kafkaconnector.cassandra.clients.dao.TestDataDao;
import org.apache.camel.kafkaconnector.cassandra.clients.dao.TestResultSetConversionStrategy;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.test.CamelSourceTestSupport;
import org.apache.camel.kafkaconnector.common.test.TestMessageConsumer;
import org.apache.camel.test.infra.cassandra.services.CassandraService;
import org.apache.camel.test.infra.cassandra.services.CassandraServiceFactory;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.camel.kafkaconnector.common.BasicConnectorPropertyFactory.classRef;
import static org.junit.jupiter.api.Assertions.assertEquals;
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
public class CamelSourceCassandraITCase extends CamelSourceTestSupport {
@RegisterExtension
public static CassandraService cassandraService = CassandraServiceFactory.createService();
private static final Logger LOG = LoggerFactory.getLogger(CamelSourceCassandraITCase.class);
private CassandraClient cassandraClient;
private TestDataDao testDataDao;
private String topicName;
private final int expect = 1;
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-cassandra-source-kafka-connector"};
}
@BeforeAll
public void setUpTestData() {
cassandraClient = new CassandraClient(cassandraService.getCassandraHost(), cassandraService.getCQL3Port());
testDataDao = cassandraClient.newTestDataDao();
testDataDao.createKeySpace();
testDataDao.useKeySpace();
testDataDao.createTable();
for (int i = 0; i < expect; i++) {
testDataDao.insert("Test data " + i);
}
}
@BeforeEach
public void setUpTest() {
topicName = getTopicForTest(this);
}
@AfterAll
public void tearDown() {
if (testDataDao != null) {
try {
testDataDao.dropTable();
} catch (Exception e) {
LOG.warn("Unable to drop the table: {}", e.getMessage(), e);
}
}
}
@Override
protected void produceTestData() {
// NO-OP (done at the testSetup)
}
@Override
protected void verifyMessages(TestMessageConsumer<?> consumer) {
int received = consumer.consumedMessages().size();
assertEquals(received, expect, "Didn't process the expected amount of messages");
}
@Timeout(90)
@Test
public void testRetrieveFromCassandra() throws ExecutionException, InterruptedException {
ConnectorPropertyFactory connectorPropertyFactory = CamelCassandraPropertyFactory
.basic()
.withKafkaTopic(topicName)
.withHosts(cassandraService.getCassandraHost())
.withPort(cassandraService.getCQL3Port())
.withKeySpace(TestDataDao.KEY_SPACE)
.withResultSetConversionStrategy("ONE")
.withQuery(testDataDao.getSelectStatement());
runTest(connectorPropertyFactory, topicName, expect);
}
@Timeout(90)
@Test
public void testRetrieveFromCassandraWithCustomStrategy() throws ExecutionException, InterruptedException {
ConnectorPropertyFactory connectorPropertyFactory = CamelCassandraPropertyFactory
.basic()
.withKafkaTopic(topicName)
.withHosts(cassandraService.getCassandraHost())
.withPort(cassandraService.getCQL3Port())
.withKeySpace(TestDataDao.KEY_SPACE)
.withResultSetConversionStrategy(classRef(TestResultSetConversionStrategy.class.getName()))
.withQuery(testDataDao.getSelectStatement());
runTest(connectorPropertyFactory, topicName, expect);
}
}
| 9,269 |
0 | Create_ds/camel-kafka-connector/tests/itests-cassandra/src/test/java/org/apache/camel/kafkaconnector/cassandra | Create_ds/camel-kafka-connector/tests/itests-cassandra/src/test/java/org/apache/camel/kafkaconnector/cassandra/sink/CamelSinkCassandraITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.cassandra.sink;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.camel.kafkaconnector.cassandra.clients.CassandraClient;
import org.apache.camel.kafkaconnector.cassandra.clients.dao.TestDataDao;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.test.CamelSinkTestSupport;
import org.apache.camel.kafkaconnector.common.test.StringMessageProducer;
import org.apache.camel.test.infra.cassandra.services.CassandraService;
import org.apache.camel.test.infra.cassandra.services.CassandraServiceFactory;
import org.apache.camel.test.infra.common.TestUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
public class CamelSinkCassandraITCase extends CamelSinkTestSupport {
@RegisterExtension
public static CassandraService cassandraService = CassandraServiceFactory.createService();
private static final Logger LOG = LoggerFactory.getLogger(CamelSinkCassandraITCase.class);
private CassandraClient cassandraClient;
private TestDataDao testDataDao;
private String topicName;
private final int expect = 10;
private int received;
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-cassandra-sink-kafka-connector"};
}
@BeforeAll
public void setUpTestData() {
cassandraClient = new CassandraClient(cassandraService.getCassandraHost(), cassandraService.getCQL3Port());
testDataDao = cassandraClient.newTestDataDao();
testDataDao.createKeySpace();
testDataDao.useKeySpace();
}
@BeforeEach
public void setUp() {
topicName = getTopicForTest(this);
received = 0;
testDataDao.createTable();
}
@AfterEach
public void tearDown() {
if (testDataDao != null) {
try {
testDataDao.dropTable();
} catch (Exception e) {
LOG.warn("Unable to drop the table: {}", e.getMessage(), e);
}
}
}
@Override
protected void consumeMessages(CountDownLatch latch) {
try {
if (!TestUtils.waitFor(testDataDao::hasEnoughData, (long) expect)) {
fail("Did not receive enough data");
}
testDataDao.getData(this::checkRetrievedData);
} finally {
latch.countDown();
}
}
@Override
protected void verifyMessages(CountDownLatch latch) throws InterruptedException {
if (latch.await(30, TimeUnit.SECONDS)) {
assertEquals(expect, received,
"Didn't process the expected amount of messages: " + received + " != " + expect);
} else {
fail("Failed to receive the messages within the specified time");
}
}
private void checkRetrievedData(String data) {
if (data != null) {
received++;
}
}
@Timeout(90)
@Test
public void testFetchFromCassandra() throws Exception {
ConnectorPropertyFactory connectorPropertyFactory = CamelCassandraPropertyFactory
.basic()
.withTopics(topicName)
.withHosts(cassandraService.getCassandraHost())
.withPort(cassandraService.getCQL3Port())
.withKeySpace(TestDataDao.KEY_SPACE)
.withQuery(testDataDao.getInsertStatement());
runTest(connectorPropertyFactory, new CassandraStringMessageProducer(getKafkaService().getBootstrapServers(), topicName, expect));
}
private class CassandraStringMessageProducer extends StringMessageProducer {
public CassandraStringMessageProducer(String bootStrapServer, String topicName, int count) {
super(bootStrapServer, topicName, count);
}
@Override
public String testMessageContent(int current) {
return "[{ \"message\": " + current + " }]";
}
}
}
| 9,270 |
0 | Create_ds/camel-kafka-connector/tests/itests-cassandra/src/test/java/org/apache/camel/kafkaconnector/cassandra | Create_ds/camel-kafka-connector/tests/itests-cassandra/src/test/java/org/apache/camel/kafkaconnector/cassandra/sink/CamelCassandraPropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.cassandra.sink;
import org.apache.camel.kafkaconnector.common.SinkConnectorPropertyFactory;
final class CamelCassandraPropertyFactory extends SinkConnectorPropertyFactory<CamelCassandraPropertyFactory> {
private CamelCassandraPropertyFactory() {
}
public CamelCassandraPropertyFactory withKeySpace(String keySpace) {
return setProperty("camel.kamelet.cassandra-sink.keyspace", keySpace);
}
public CamelCassandraPropertyFactory withQuery(String query) {
return setProperty("camel.kamelet.cassandra-sink.query", query);
}
public CamelCassandraPropertyFactory withHosts(String hosts) {
return setProperty("camel.kamelet.cassandra-sink.connectionHost", hosts);
}
public CamelCassandraPropertyFactory withPort(int port) {
return withPort(Integer.toString(port));
}
public CamelCassandraPropertyFactory withPort(String port) {
return setProperty("camel.kamelet.cassandra-sink.connectionPort", port);
}
public static CamelCassandraPropertyFactory basic() {
return new CamelCassandraPropertyFactory()
.withName("CamelCassandraSinkConnector")
.withTasksMax(1)
.withConnectorClass("org.apache.camel.kafkaconnector.cassandrasink.CamelCassandrasinkSinkConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter")
.setProperty("camel.kamelet.cassandra-sink.prepareStatements", "false")
.setProperty("camel.component.kamelet.location", "kamelets");
}
}
| 9,271 |
0 | Create_ds/camel-kafka-connector/tests/itests-https/src/test/java/org/apache/camel/kafkaconnector/https | Create_ds/camel-kafka-connector/tests/itests-https/src/test/java/org/apache/camel/kafkaconnector/https/sink/CamelSinkHTTPSITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.https.sink;
import java.net.URL;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import okhttp3.mockwebserver.MockWebServer;
import okhttp3.mockwebserver.RecordedRequest;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.services.mockweb.MockWebService;
import org.apache.camel.kafkaconnector.common.test.CamelSinkTestSupport;
import org.apache.hc.client5.http.ssl.NoopHostnameVerifier;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
public class CamelSinkHTTPSITCase extends CamelSinkTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(CamelSinkHTTPSITCase.class);
@RegisterExtension
public final MockWebService mockWebService = MockWebService.builder()
.useHttps()
.withKeystore("/server-keystore.jks", "secret")
.build();
private MockWebServer mockServer;
private String topicName;
private final int expect = 10;
private List<RecordedRequest> received;
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-https-kafka-connector"};
}
@BeforeEach
public void setUp() throws Exception {
topicName = getTopicForTest(this);
mockServer = mockWebService.getServer();
received = Collections.emptyList();
}
@Override
protected void consumeMessages(CountDownLatch latch) {
try {
received = IntStream.range(0, expect).mapToObj(i -> {
try {
return mockServer.takeRequest(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
LOG.error("Unable to receive messages: {}", e.getMessage(), e);
return null;
}
}).collect(Collectors.toList());
} finally {
latch.countDown();
}
}
@Override
protected void verifyMessages(CountDownLatch latch) throws InterruptedException {
String expected = "Sink test message ";
if (latch.await(30, TimeUnit.SECONDS)) {
assertEquals(expect, received.size(), "Did not receive the same amount of messages that were sent");
for (RecordedRequest request : received) {
String actual = request.getBody().readUtf8();
LOG.debug("Received: {} ", actual);
assertEquals("/ckc", request.getRequestUrl().encodedPath(), "Received path differed");
assertTrue(actual.startsWith(expected), "Received message content differed");
}
assertEquals(expect, received.size(), "Did not receive the same amount of messages that were sent");
} else {
fail("Failed to receive the messages within the specified time");
}
}
@Test
@Timeout(60)
public void testBasicSendReceive() throws Exception {
mockWebService.enqueueResponses(expect);
LOG.info("Trusted store path: {}", toPath("client-truststore.jks"));
String uri = mockServer.getHostName() + ":" + mockServer.getPort() + "/ckc";
ConnectorPropertyFactory connectorPropertyFactory = CamelHTTPSPropertyFactory.basic()
.withTopics(topicName)
.withHttpUri(uri)
.withSslContextParameters("scp", "file:" + toPath("client-truststore.jks"), "secret")
// let's skip host verification as hostname may vary depending on test env
.withX509HostnameVerifier("x509HostnameVerifier", NoopHostnameVerifier.class);
runTest(connectorPropertyFactory, topicName, expect);
}
@Test
@Timeout(60)
public void testBasicSendReceiveHttpUriWithQueryString() throws Exception {
mockWebService.enqueueResponses(expect);
String uri = mockServer.getHostName() + ":" + mockServer.getPort() + "/ckc?aaa=xxx&bbb=yyy&ccc=zzz";
ConnectorPropertyFactory connectorPropertyFactory = CamelHTTPSPropertyFactory.basic()
.withTopics(topicName)
.withHttpUri(uri)
.withHttpMethod("POST")
.withSslContextParameters("scp", "file:" + toPath("client-truststore.jks"), "secret")
// let's skip host verification as hostname may vary depending on test env
.withX509HostnameVerifier("x509HostnameVerifier", NoopHostnameVerifier.class);
runTest(connectorPropertyFactory, topicName, expect);
}
@Test
@Timeout(60)
public void testBasicSendReceiveUsingUrl() throws Exception {
mockWebService.enqueueResponses(expect);
ConnectorPropertyFactory connectorPropertyFactory = CamelHTTPSPropertyFactory.basic()
.withTopics(topicName)
.withSslContextParameters("scp", "file:" + toPath("client-truststore.jks"), "secret")
// let's skip host verification as hostname may vary depending on test env
.withX509HostnameVerifier("x509HostnameVerifier", NoopHostnameVerifier.class)
.withUrl(mockServer.getHostName(), mockServer.getPort(), "ckc")
.append("sslContextParameters", "#bean:scp")
.append("x509HostnameVerifier", "#bean:x509HostnameVerifier")
.buildUrl();
runTest(connectorPropertyFactory, topicName, expect);
}
private String toPath(String resource) {
URL url = Objects.requireNonNull(getClass().getClassLoader().getResource(resource));
return url.getPath();
}
}
| 9,272 |
0 | Create_ds/camel-kafka-connector/tests/itests-https/src/test/java/org/apache/camel/kafkaconnector/https | Create_ds/camel-kafka-connector/tests/itests-https/src/test/java/org/apache/camel/kafkaconnector/https/sink/CamelHTTPSPropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.https.sink;
import org.apache.camel.LoggingLevel;
import org.apache.camel.kafkaconnector.common.EndpointUrlBuilder;
import org.apache.camel.kafkaconnector.common.SinkConnectorPropertyFactory;
import org.apache.camel.support.jsse.KeyStoreParameters;
import org.apache.camel.support.jsse.SSLContextParameters;
import org.apache.camel.support.jsse.TrustManagersParameters;
final class CamelHTTPSPropertyFactory extends SinkConnectorPropertyFactory<CamelHTTPSPropertyFactory> {
private CamelHTTPSPropertyFactory() {
}
public CamelHTTPSPropertyFactory withHttpUri(String uri) {
return setProperty("camel.sink.path.httpUri", uri);
}
public CamelHTTPSPropertyFactory withHttpMethod(String method) {
return setProperty("camel.sink.endpoint.httpMethod", method);
}
public CamelHTTPSPropertyFactory withSslContextParameters(String bean, String keyStore, String password) {
withBeans("ksp", classRef(KeyStoreParameters.class));
withBeans("ksp.resource", keyStore);
withBeans("ksp.password", password);
withBeans("tmp", classRef(TrustManagersParameters.class));
withBeans("tmp.keyStore", "#bean:ksp");
withBeans(bean, classRef(SSLContextParameters.class));
withBeans(bean + ".trustManagers", "#bean:tmp");
return setProperty("camel.sink.endpoint.sslContextParameters", "#bean:" + bean);
}
public CamelHTTPSPropertyFactory withX509HostnameVerifier(String bean, Class<?> verifierClass) {
withBeans(bean, classRef(verifierClass));
return setProperty("camel.sink.endpoint.x509HostnameVerifier", "#bean:" + bean);
}
public EndpointUrlBuilder<CamelHTTPSPropertyFactory> withUrl(String host, int port, String path) {
String url = String.format("https://%s:%s/%s", host, port, path);
return new EndpointUrlBuilder<>(this::withSinkUrl, url);
}
public static CamelHTTPSPropertyFactory basic() {
return new CamelHTTPSPropertyFactory()
.withName("CamelHttpsSinkConnector")
.withConnectorClass("org.apache.camel.kafkaconnector.https.CamelHttpsSinkConnector")
.withTasksMax(1)
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withSinkContentLogginglevel(LoggingLevel.DEBUG);
}
}
| 9,273 |
0 | Create_ds/camel-kafka-connector/tests/itests-couchbase/src/test/java/org/apache/camel/kafkaconnector/couchbase | Create_ds/camel-kafka-connector/tests/itests-couchbase/src/test/java/org/apache/camel/kafkaconnector/couchbase/sink/CamelCouchbasePropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.couchbase.sink;
import org.apache.camel.kafkaconnector.common.SinkConnectorPropertyFactory;
public class CamelCouchbasePropertyFactory extends SinkConnectorPropertyFactory<CamelCouchbasePropertyFactory> {
public CamelCouchbasePropertyFactory withProtocol(String value) {
return setProperty("camel.kamelet.couchbase-sink.protocol", value);
}
public CamelCouchbasePropertyFactory withHostname(String value) {
return setProperty("camel.kamelet.couchbase-sink.couchbaseHostname", value);
}
public CamelCouchbasePropertyFactory withPort(int value) {
return setProperty("camel.kamelet.couchbase-sink.couchbasePort", value);
}
public CamelCouchbasePropertyFactory withBucket(String value) {
return setProperty("camel.kamelet.couchbase-sink.bucket", value);
}
public CamelCouchbasePropertyFactory withUsername(String value) {
return setProperty("camel.kamelet.couchbase-sink.username", value);
}
public CamelCouchbasePropertyFactory withPassword(String value) {
return setProperty("camel.kamelet.couchbase-sink.password", value);
}
public static CamelCouchbasePropertyFactory basic() {
return new CamelCouchbasePropertyFactory()
.withTasksMax(1)
.withName("CamelCouchbasesinkSinkConnector")
.withConnectorClass("org.apache.camel.kafkaconnector.couchbasesink.CamelCouchbasesinkSinkConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter")
.setProperty("camel.component.kamelet.location", "kamelets");
}
}
| 9,274 |
0 | Create_ds/camel-kafka-connector/tests/itests-couchbase/src/test/java/org/apache/camel/kafkaconnector/couchbase | Create_ds/camel-kafka-connector/tests/itests-couchbase/src/test/java/org/apache/camel/kafkaconnector/couchbase/sink/CamelSinkCouchbaseITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.couchbase.sink;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import com.couchbase.client.core.diagnostics.EndpointPingReport;
import com.couchbase.client.core.diagnostics.PingResult;
import com.couchbase.client.core.diagnostics.PingState;
import com.couchbase.client.core.service.ServiceType;
import com.couchbase.client.java.Cluster;
import com.couchbase.client.java.json.JsonObject;
import com.couchbase.client.java.manager.bucket.BucketSettings;
import com.couchbase.client.java.query.QueryResult;
import org.apache.camel.kafkaconnector.CamelSinkTask;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.test.CamelSinkTestSupport;
import org.apache.camel.kafkaconnector.common.test.StringMessageProducer;
import org.apache.camel.test.infra.common.TestUtils;
import org.apache.camel.test.infra.couchbase.services.CouchbaseService;
import org.apache.camel.test.infra.couchbase.services.CouchbaseServiceFactory;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.fail;
/*
This test is slow and potentially flaky. It might fail on systems with limited resources and slow I/O.
Most probably due to this bug in the couchbase test container:
- https://github.com/testcontainers/testcontainers-java/issues/2993
Therefore, it is marked as slow test and must be explicitly enabled to be run.
*/
@EnabledIfSystemProperty(named = "enable.slow.tests", matches = "true")
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
public class CamelSinkCouchbaseITCase extends CamelSinkTestSupport {
@RegisterExtension
public static CouchbaseService service = CouchbaseServiceFactory.createService();
private static final Logger LOG = LoggerFactory.getLogger(CamelSinkCouchbaseITCase.class);
private String bucketName;
private String topic;
private Cluster cluster;
private final int expect = 10;
private static class CustomProducer extends StringMessageProducer {
public CustomProducer(String bootstrapServer, String topicName, int count) {
super(bootstrapServer, topicName, count);
}
@Override
public String testMessageContent(int current) {
JsonObject jsonObject = JsonObject.create().put("data", String.format("test-%d", current));
return jsonObject.toString();
}
@Override
public Map<String, String> messageHeaders(String text, int current) {
Map<String, String> parameters = new HashMap<>();
parameters.put(CamelSinkTask.HEADER_CAMEL_PREFIX + "CCB_ID", String.valueOf(current));
return parameters;
}
}
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-couchbase-sink-kafka-connector"};
}
@BeforeEach
public void setUp() {
bucketName = "testBucket" + TestUtils.randomWithRange(0, 100);
cluster = Cluster.connect(service.getConnectionString(), service.getUsername(), service.getPassword());
cluster.ping().endpoints().entrySet().forEach(this::checkEndpoints);
LOG.debug("Creating a new bucket named {}", bucketName);
cluster.buckets().createBucket(BucketSettings.create(bucketName));
PingResult pingResult = cluster.bucket(bucketName).ping();
pingResult.endpoints().entrySet().forEach(this::checkEndpoints);
LOG.debug("Bucket created");
topic = getTopicForTest(this);
if (!TestUtils.waitFor(this::isQueryServiceUp)) {
fail("Query Service failed to become ready in 30 seconds.");
}
}
@AfterEach
public void tearDown() {
LOG.debug("Dropping the test bucket named {}", bucketName);
cluster.buckets().dropBucket(bucketName);
LOG.debug("Bucket dropped");
cluster.disconnect();
}
@Override
protected void consumeMessages(CountDownLatch latch) {
try {
TestUtils.waitFor(this::waitForMinimumRecordCount);
} finally {
latch.countDown();
}
}
@Override
protected void verifyMessages(CountDownLatch latch) throws InterruptedException {
if (latch.await(110, TimeUnit.SECONDS)) {
verifyRecords();
} else {
fail("Failed to receive the records within the specified time");
}
}
private void checkEndpoints(Map.Entry<ServiceType, List<EndpointPingReport>> entries) {
entries.getValue().forEach(this::checkStatus);
}
private void checkStatus(EndpointPingReport endpointPingReport) {
if (endpointPingReport.state() == PingState.OK) {
LOG.debug("Endpoint {} is ok", endpointPingReport.id());
} else {
LOG.warn("Endpoint {} is not OK", endpointPingReport.id());
}
}
private boolean waitForMinimumRecordCount() {
try {
String query = String.format("select count(*) as count from `%s`", bucketName);
QueryResult queryResult = cluster.query(query);
List<JsonObject> results = queryResult.rowsAsObject();
if (results.isEmpty()) {
return false;
}
int size = results.get(0).getInt("count");
if (size < expect) {
LOG.info("There are only {} records at the moment", size);
return false;
}
return size == expect;
} catch (Exception e) {
LOG.warn("Exception while waiting for the records to arrive: {}", e.getMessage(), e);
}
return false;
}
private boolean isQueryServiceUp() {
try {
String query = String.format("select count(*) as count from `%s`", bucketName);
QueryResult queryResult = cluster.query(query);
queryResult.rowsAsObject();
return true;
} catch (Exception e) {
LOG.warn("Exception while checking if Query service is up: {}", e.getMessage(), e);
return false;
}
}
private void verifyRecords() {
String query = String.format("select * from `%s` USE KEYS \"1\"", bucketName);
QueryResult queryResult = cluster.query(query);
List<JsonObject> results = queryResult.rowsAsObject();
assertFalse(results.isEmpty(), "There should be at least 1 record on the result");
LOG.debug("Received record: {}", results.get(0));
}
@Test
@Timeout(90)
public void testBasicSendReceive() throws Exception {
ConnectorPropertyFactory factory = CamelCouchbasePropertyFactory.basic()
.withTopics(topic)
.withBucket(bucketName)
.withProtocol("http")
.withHostname(service.getHostname())
.withPort(service.getPort())
.withUsername(service.getUsername())
.withPassword(service.getPassword());
runTest(factory, new CustomProducer(getKafkaService().getBootstrapServers(), topic, expect));
}
}
| 9,275 |
0 | Create_ds/camel-kafka-connector/tests/itests-common-http/src/test/java/org/apache/camel/kafkaconnector/common/services | Create_ds/camel-kafka-connector/tests/itests-common-http/src/test/java/org/apache/camel/kafkaconnector/common/services/mockweb/MockWebService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.common.services.mockweb;
import java.security.KeyStore;
import java.util.stream.IntStream;
import javax.net.ssl.KeyManagerFactory;
import javax.net.ssl.SSLContext;
import javax.net.ssl.TrustManagerFactory;
import okhttp3.mockwebserver.MockResponse;
import okhttp3.mockwebserver.MockWebServer;
import org.junit.jupiter.api.extension.AfterEachCallback;
import org.junit.jupiter.api.extension.BeforeEachCallback;
import org.junit.jupiter.api.extension.ExtensionContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MockWebService implements BeforeEachCallback, AfterEachCallback {
private static final Logger LOG = LoggerFactory.getLogger(MockWebService.class);
private boolean useHttps;
private String keystore;
private String keystorePassword;
private String truststore;
private String truststorePassword;
private MockWebServer server;
@Override
public void beforeEach(ExtensionContext extensionContext) throws Exception {
LOG.debug("Starting MockWebServer...");
server = new MockWebServer();
if (useHttps) {
KeyManagerFactory kmFactory = null;
if (keystore != null) {
KeyStore keyStore = KeyStore.getInstance("JKS");
String password = keystorePassword == null ? "" : keystorePassword;
keyStore.load(getClass().getResourceAsStream(keystore), password.toCharArray());
kmFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
kmFactory.init(keyStore, password.toCharArray());
}
TrustManagerFactory tmFactory = null;
if (truststore != null) {
KeyStore trustStore = KeyStore.getInstance("JKS");
String password = truststorePassword == null ? "" : truststorePassword;
trustStore.load(getClass().getResourceAsStream(truststore), password.toCharArray());
tmFactory = TrustManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
tmFactory.init(trustStore);
}
SSLContext sslContext = SSLContext.getInstance("TLS");
sslContext.init(
kmFactory == null ? null : kmFactory.getKeyManagers(),
tmFactory == null ? null : tmFactory.getTrustManagers(),
null);
server.useHttps(sslContext.getSocketFactory(), false);
LOG.debug("Use HTTPS: keystore={}, truststore={}", keystore, truststore);
}
server.start();
LOG.info("MockWebServer started");
}
@Override
public void afterEach(ExtensionContext extensionContext) throws Exception {
if (server != null) {
server.shutdown();
server = null;
LOG.info("MockWebServer shutdown");
}
}
public void enqueueResponses(int count) {
IntStream.range(0, count).forEach(i -> {
server.enqueue(new MockResponse().setResponseCode(200));
});
}
public MockWebServer getServer() {
return server;
}
public static Builder builder() {
return new Builder();
}
public static class Builder {
private boolean useHttps;
private String keystore;
private String keystorePassword;
private String truststore;
private String truststorePassword;
public Builder useHttps() {
useHttps = true;
return this;
}
public Builder withKeystore(String store, String password) {
this.keystore = store;
this.keystorePassword = password;
return this;
}
public Builder withTruststore(String store, String password) {
this.truststore = store;
this.truststorePassword = password;
return this;
}
public MockWebService build() {
MockWebService service = new MockWebService();
service.useHttps = this.useHttps;
service.keystore = this.keystore;
service.keystorePassword = this.keystorePassword;
service.truststore = this.truststore;
service.truststorePassword = this.truststorePassword;
return service;
}
}
}
| 9,276 |
0 | Create_ds/camel-kafka-connector/tests/itests-netty-http/src/test/java/org/apache/camel/kafkaconnector/nettyhttp | Create_ds/camel-kafka-connector/tests/itests-netty-http/src/test/java/org/apache/camel/kafkaconnector/nettyhttp/source/CamelSourceNettyHTTPITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.nettyhttp.source;
import java.io.IOException;
import java.net.InetAddress;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.test.CamelSourceTestSupport;
import org.apache.camel.kafkaconnector.common.test.TestMessageConsumer;
import org.apache.camel.kafkaconnector.common.utils.NetworkUtils;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
public class CamelSourceNettyHTTPITCase extends CamelSourceTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(CamelSourceNettyHTTPITCase.class);
private static final int HTTP_PORT = NetworkUtils.getFreePort("localhost", 30000, 40000);
private static final String TEST_MESSAGE = "testMessage";
private String topicName;
private final int expect = 1;
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-netty-http-kafka-connector"};
}
@BeforeEach
public void setUp() throws IOException {
topicName = getTopicForTest(this);
}
@Test
@Timeout(90)
public void testBasicSendReceive() throws Exception {
ConnectorPropertyFactory connectorPropertyFactory = CamelNettyHTTPPropertyFactory.basic()
.withKafkaTopic(topicName)
.withReceiveBufferSize(10)
.withHost("0.0.0.0")
.withPort(HTTP_PORT)
.withProtocol("http")
.withCamelTypeConverterTransformTo("java.lang.String");
runTestBlocking(connectorPropertyFactory, topicName, expect);
}
@Override
protected void produceTestData() {
int retriesLeft = 10;
boolean success = false;
while (retriesLeft > 0 && !success) {
try (final CloseableHttpClient httpclient = HttpClients.createDefault()) {
byte[] ipAddr = new byte[]{127, 0, 0, 1};
InetAddress localhost = InetAddress.getByAddress(ipAddr);
final HttpPost httpPost = new HttpPost("http://" + localhost.getHostAddress() + ":" + HTTP_PORT);
LOG.info("Executing request {} {}", httpPost.getMethod(), httpPost.getURI());
httpPost.setEntity(new StringEntity(TEST_MESSAGE));
CloseableHttpResponse response = httpclient.execute(httpPost);
assertEquals(200, response.getStatusLine().getStatusCode());
response.close();
httpPost.releaseConnection();
success = true;
LOG.info("Request success at {} attempt.", retriesLeft);
} catch (IOException e) {
if (retriesLeft == 1) {
e.printStackTrace();
fail("There should be no exceptions in sending the http test message.");
} else {
retriesLeft--;
try {
Thread.sleep(1000);
} catch (InterruptedException interruptedException) {
interruptedException.printStackTrace();
}
}
}
}
}
@Override
protected void verifyMessages(TestMessageConsumer<?> consumer) {
int received = consumer.consumedMessages().size();
assertEquals(expect, received, "Didn't process the expected amount of messages");
assertEquals(TEST_MESSAGE, consumer.consumedMessages().get(0).value().toString());
}
}
| 9,277 |
0 | Create_ds/camel-kafka-connector/tests/itests-netty-http/src/test/java/org/apache/camel/kafkaconnector/nettyhttp | Create_ds/camel-kafka-connector/tests/itests-netty-http/src/test/java/org/apache/camel/kafkaconnector/nettyhttp/source/CamelNettyHTTPPropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.nettyhttp.source;
import org.apache.camel.kafkaconnector.common.SourceConnectorPropertyFactory;
final class CamelNettyHTTPPropertyFactory extends SourceConnectorPropertyFactory<CamelNettyHTTPPropertyFactory> {
private CamelNettyHTTPPropertyFactory() {
}
public CamelNettyHTTPPropertyFactory withHost(String host) {
return setProperty("camel.source.path.host", host);
}
public CamelNettyHTTPPropertyFactory withProtocol(String protocol) {
return setProperty("camel.source.path.protocol", protocol);
}
public CamelNettyHTTPPropertyFactory withPort(int port) {
return setProperty("camel.source.path.port", String.valueOf(port));
}
public CamelNettyHTTPPropertyFactory withSync(boolean sync) {
return setProperty("camel.source.endpoint.sync", String.valueOf(sync));
}
public CamelNettyHTTPPropertyFactory withReceiveBufferSize(int size) {
return setProperty("camel.source.endpoint.receiveBufferSize", String.valueOf(size));
}
public CamelNettyHTTPPropertyFactory withCamelTypeConverterTransformTo(String targetClass) {
setProperty("transforms", "cameltypeconverter");
setProperty("transforms.cameltypeconverter.type", "org.apache.camel.kafkaconnector.transforms.CamelTypeConverterTransform$Value");
return setProperty("transforms.cameltypeconverter.target.type", targetClass);
}
public static CamelNettyHTTPPropertyFactory basic() {
return new CamelNettyHTTPPropertyFactory()
.withTasksMax(1)
.withName("CamelNettyHttpSourceConnector")
.withConnectorClass("org.apache.camel.kafkaconnector.nettyhttp.CamelNettyhttpSourceConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter");
}
}
| 9,278 |
0 | Create_ds/camel-kafka-connector/tests/itests-netty-http/src/test/java/org/apache/camel/kafkaconnector/nettyhttp | Create_ds/camel-kafka-connector/tests/itests-netty-http/src/test/java/org/apache/camel/kafkaconnector/nettyhttp/sink/CamelSinkNettyhttpITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.nettyhttp.sink;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import okhttp3.mockwebserver.MockWebServer;
import okhttp3.mockwebserver.RecordedRequest;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.services.mockweb.MockWebService;
import org.apache.camel.kafkaconnector.common.test.CamelSinkTestSupport;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
public class CamelSinkNettyhttpITCase extends CamelSinkTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(CamelSinkNettyhttpITCase.class);
@RegisterExtension
public final MockWebService mockWebService = MockWebService.builder().build();
private MockWebServer mockServer;
private String topicName;
private final int expect = 1;
private volatile RecordedRequest received;
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-netty-http-kafka-connector"};
}
@BeforeEach
public void setUp() {
topicName = getTopicForTest(this);
mockServer = mockWebService.getServer();
received = null;
}
@Override
protected void consumeMessages(CountDownLatch latch) {
try {
received = mockServer.takeRequest();
} catch (InterruptedException e) {
LOG.error("Unable to receive messages: {}", e.getMessage(), e);
} finally {
latch.countDown();
}
}
@Override
protected void verifyMessages(CountDownLatch latch) throws InterruptedException {
String expected = "Sink test message 0";
if (latch.await(30, TimeUnit.SECONDS)) {
assertEquals("/test", received.getPath(), "Received path differed");
assertEquals(expected, received.getBody().readUtf8(), "Received message content differed");
} else {
fail("Failed to receive the messages within the specified time");
}
}
@Test
@Timeout(30)
public void testBasicSendReceive() throws Exception {
mockWebService.enqueueResponses(expect);
ConnectorPropertyFactory connectorPropertyFactory = CamelNettyhttpPropertyFactory.basic()
.withTopics(topicName)
.withProtocol("http")
.withHost(mockServer.getHostName())
.withPort(mockServer.getPort())
.withPath("test");
runTest(connectorPropertyFactory, topicName, expect);
}
@Test
@Timeout(30)
public void testBasicSendReceiveUsingUrl() throws Exception {
mockWebService.enqueueResponses(expect);
ConnectorPropertyFactory connectorPropertyFactory = CamelNettyhttpPropertyFactory.basic()
.withTopics(topicName)
.withUrl("http", mockServer.getHostName(), mockServer.getPort(), "test")
.buildUrl();
runTest(connectorPropertyFactory, topicName, expect);
}
}
| 9,279 |
0 | Create_ds/camel-kafka-connector/tests/itests-netty-http/src/test/java/org/apache/camel/kafkaconnector/nettyhttp | Create_ds/camel-kafka-connector/tests/itests-netty-http/src/test/java/org/apache/camel/kafkaconnector/nettyhttp/sink/CamelNettyhttpPropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.nettyhttp.sink;
import org.apache.camel.kafkaconnector.common.EndpointUrlBuilder;
import org.apache.camel.kafkaconnector.common.SinkConnectorPropertyFactory;
final class CamelNettyhttpPropertyFactory extends SinkConnectorPropertyFactory<CamelNettyhttpPropertyFactory> {
private CamelNettyhttpPropertyFactory() {
}
public CamelNettyhttpPropertyFactory withProtocol(String value) {
return setProperty("camel.sink.path.protocol", value);
}
public CamelNettyhttpPropertyFactory withHost(String value) {
return setProperty("camel.sink.path.host", value);
}
public CamelNettyhttpPropertyFactory withPort(int value) {
return setProperty("camel.sink.path.port", value);
}
public CamelNettyhttpPropertyFactory withPath(String value) {
return setProperty("camel.sink.path.path", value);
}
public CamelNettyhttpPropertyFactory withDisconnect(boolean value) {
return setProperty("camel.sink.endpoint.disconnect", value);
}
public CamelNettyhttpPropertyFactory withSync(boolean value) {
return setProperty("camel.sink.endpoint.sync", value);
}
public EndpointUrlBuilder<CamelNettyhttpPropertyFactory> withUrl(String protocol, String host, int port, String path) {
String url = String.format("netty-http:%s://%s:%s/%s", protocol, host, port, path);
return new EndpointUrlBuilder<>(this::withSinkUrl, url);
}
public static CamelNettyhttpPropertyFactory basic() {
return new CamelNettyhttpPropertyFactory()
.withName("CamelNettyhttpSinkConnector")
.withTasksMax(1)
.withConnectorClass("org.apache.camel.kafkaconnector.nettyhttp.CamelNettyhttpSinkConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter");
}
}
| 9,280 |
0 | Create_ds/camel-kafka-connector/tests/itests-syslog/src/test/java/org/apache/camel/kafkaconnector/syslog | Create_ds/camel-kafka-connector/tests/itests-syslog/src/test/java/org/apache/camel/kafkaconnector/syslog/source/CamelSyslogPropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.syslog.source;
import org.apache.camel.kafkaconnector.common.SourceConnectorPropertyFactory;
/**
* Creates the set of properties used by a Camel JMS Sink Connector
*/
final class CamelSyslogPropertyFactory extends SourceConnectorPropertyFactory<CamelSyslogPropertyFactory> {
private CamelSyslogPropertyFactory() {
}
public CamelSyslogPropertyFactory withHost(String host) {
return setProperty("camel.source.path.host", host);
}
public CamelSyslogPropertyFactory withPort(int port) {
return setProperty("camel.source.path.port", String.valueOf(port));
}
public CamelSyslogPropertyFactory withProtocol(String protocol) {
return setProperty("camel.source.path.protocol", protocol);
}
public static CamelSyslogPropertyFactory basic() {
return new CamelSyslogPropertyFactory()
.withName("CamelSyslogSourceConnector")
.withTasksMax(1)
.withConnectorClass("org.apache.camel.kafkaconnector.syslog.CamelSyslogSourceConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter");
}
}
| 9,281 |
0 | Create_ds/camel-kafka-connector/tests/itests-syslog/src/test/java/org/apache/camel/kafkaconnector/syslog | Create_ds/camel-kafka-connector/tests/itests-syslog/src/test/java/org/apache/camel/kafkaconnector/syslog/source/CamelSourceSyslogITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.syslog.source;
import java.util.concurrent.ExecutionException;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.clients.kafka.KafkaClient;
import org.apache.camel.kafkaconnector.common.test.CamelSourceTestSupport;
import org.apache.camel.kafkaconnector.common.test.StringMessageConsumer;
import org.apache.camel.kafkaconnector.common.test.TestMessageConsumer;
import org.apache.camel.kafkaconnector.common.utils.NetworkUtils;
import org.apache.camel.kafkaconnector.syslog.services.SyslogService;
import org.apache.camel.test.infra.common.TestUtils;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.RepeatedTest;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
import org.junit.jupiter.api.extension.RegisterExtension;
import static org.junit.jupiter.api.Assertions.assertEquals;
/**
* A simple test case that checks whether the timer produces the expected number of
* messages
*/
@EnabledIfSystemProperty(named = "enable.flaky.tests", matches = "true",
disabledReason = "Quickly spawning multiple Jetty Servers doesn't work well on Github Actions")
public class CamelSourceSyslogITCase extends CamelSourceTestSupport {
private static final String HOST = NetworkUtils.getHostname();
private static final String PROTOCOL = "udp";
private static final int FREE_PORT = NetworkUtils.getFreePort(HOST, NetworkUtils.Protocol.UDP);
@RegisterExtension
public static SyslogService service = SyslogService.sourceSyslogServiceFactory(PROTOCOL, HOST, FREE_PORT);
private final int expect = 1;
private String topicName;
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-syslog-kafka-connector"};
}
@BeforeEach
public void setUp() {
topicName = getTopicForTest(this);
}
@Override
protected void produceTestData() {
String message = "<13>1 2020-05-14T14:47:01.198+02:00 nathannever myapp - - [timeQuality tzKnown=\"1\" isSynced=\"1\" syncAccuracy=\"11266\"] FOO BAR!";
service.getCamelContext().createProducerTemplate().sendBody("direct:test", message);
}
@Override
protected void verifyMessages(TestMessageConsumer<?> consumer) {
int received = consumer.consumedMessages().size();
assertEquals(received, expect, "Didn't process the expected amount of messages");
}
@RepeatedTest(3)
@Test
@Timeout(180)
public void testBasicSend() throws ExecutionException, InterruptedException {
ConnectorPropertyFactory connectorPropertyFactory = CamelSyslogPropertyFactory
.basic()
.withName("CamelSyslogSourceConnector" + TestUtils.randomWithRange(0, 1000))
.withKafkaTopic(topicName)
.withHost(HOST)
.withPort(FREE_PORT)
.withProtocol(PROTOCOL);
KafkaClient<String, String> kafkaClient = new KafkaClient<>(getKafkaService().getBootstrapServers());
StringMessageConsumer stringMessageConsumer = new StringMessageConsumer(kafkaClient, topicName, expect);
runTestBlocking(connectorPropertyFactory, stringMessageConsumer);
}
}
| 9,282 |
0 | Create_ds/camel-kafka-connector/tests/itests-syslog/src/test/java/org/apache/camel/kafkaconnector/syslog | Create_ds/camel-kafka-connector/tests/itests-syslog/src/test/java/org/apache/camel/kafkaconnector/syslog/sink/CamelSyslogPropertyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.syslog.sink;
import org.apache.camel.kafkaconnector.common.SinkConnectorPropertyFactory;
/**
* Creates the set of properties used by a Camel Syslog Sink Connector
*/
final class CamelSyslogPropertyFactory extends SinkConnectorPropertyFactory<CamelSyslogPropertyFactory> {
private CamelSyslogPropertyFactory() {
}
public CamelSyslogPropertyFactory withHost(String host) {
return setProperty("camel.sink.path.host", host);
}
public CamelSyslogPropertyFactory withPort(int port) {
return setProperty("camel.sink.path.port", String.valueOf(port));
}
public CamelSyslogPropertyFactory withProtocol(String protocol) {
return setProperty("camel.sink.path.protocol", protocol);
}
public static CamelSyslogPropertyFactory basic() {
return new CamelSyslogPropertyFactory()
.withName("CamelSyslogSinkConnector")
.withTasksMax(1)
.withConnectorClass("org.apache.camel.kafkaconnector.syslog.CamelSyslogSinkConnector")
.withKeyConverterClass("org.apache.kafka.connect.storage.StringConverter")
.withValueConverterClass("org.apache.kafka.connect.storage.StringConverter");
}
}
| 9,283 |
0 | Create_ds/camel-kafka-connector/tests/itests-syslog/src/test/java/org/apache/camel/kafkaconnector/syslog | Create_ds/camel-kafka-connector/tests/itests-syslog/src/test/java/org/apache/camel/kafkaconnector/syslog/sink/CamelSinkSyslogITCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.syslog.sink;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.camel.Exchange;
import org.apache.camel.Message;
import org.apache.camel.kafkaconnector.common.ConnectorPropertyFactory;
import org.apache.camel.kafkaconnector.common.test.CamelSinkTestSupport;
import org.apache.camel.kafkaconnector.common.test.StringMessageProducer;
import org.apache.camel.kafkaconnector.common.utils.NetworkUtils;
import org.apache.camel.kafkaconnector.syslog.services.SyslogService;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.extension.RegisterExtension;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.fail;
/**
* A simple test case that checks whether the syslog send the expected number of
* messages
*/
public class CamelSinkSyslogITCase extends CamelSinkTestSupport {
private static final String HOST = NetworkUtils.getHostname();
private static final String PROTOCOL = "udp";
private static final int FREE_PORT = NetworkUtils.getFreePort(HOST, NetworkUtils.Protocol.UDP);
private static final String TEST_TXT = "<13>1 2020-05-14T14:47:01.198+02:00 nathannever myapp - - [timeQuality tzKnown=\"1\" isSynced=\"1\" syncAccuracy=\"11266\"] FOO BAR!";
@RegisterExtension
public static SyslogService service = SyslogService.sinkSyslogServiceFactory(PROTOCOL, HOST, FREE_PORT);
private String topicName;
private final int expect = 1;
private static class CustomProducer extends StringMessageProducer {
public CustomProducer(String bootstrapServer, String topicName, int count) {
super(bootstrapServer, topicName, count);
}
@Override
public String testMessageContent(int current) {
return TEST_TXT;
}
}
@Override
protected String[] getConnectorsInTest() {
return new String[] {"camel-syslog-kafka-connector"};
}
@BeforeEach
public void setUp() {
topicName = getTopicForTest(this);
}
@Override
protected void consumeMessages(CountDownLatch latch) {
latch.countDown();
}
@Override
protected void verifyMessages(CountDownLatch latch) throws InterruptedException {
if (latch.await(30, TimeUnit.SECONDS)) {
Exchange exchange = service.getFirstExchangeToBeReceived();
assertNotNull(exchange, "There should have been an exchange received");
Message message = exchange.getIn();
assertNotNull(message, "There should have been a message in the exchange");
String body = message.getBody(String.class);
assertNotNull(body, "The message body should not be null");
assertEquals(TEST_TXT, message.getBody(String.class),
"The received message body does not match the expected message");
} else {
fail("Timed out wait for data to be added to the Kafka cluster");
}
}
@Test
@Timeout(90)
public void testBasicReceive() throws Exception {
ConnectorPropertyFactory connectorPropertyFactory = CamelSyslogPropertyFactory
.basic()
.withTopics(topicName)
.withHost(HOST)
.withPort(FREE_PORT)
.withProtocol("udp");
runTest(connectorPropertyFactory, new CustomProducer(getKafkaService().getBootstrapServers(), topicName, expect));
}
}
| 9,284 |
0 | Create_ds/camel-kafka-connector/tests/itests-syslog/src/test/java/org/apache/camel/kafkaconnector/syslog | Create_ds/camel-kafka-connector/tests/itests-syslog/src/test/java/org/apache/camel/kafkaconnector/syslog/services/SinkRouteConfigurator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.syslog.services;
import org.apache.camel.CamelContext;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.component.syslog.SyslogDataFormat;
import org.apache.camel.component.syslog.netty.Rfc5425FrameDecoder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SinkRouteConfigurator implements RouteConfigurator {
private static final Logger LOG = LoggerFactory.getLogger(SinkRouteConfigurator.class);
private final String protocol;
private final String host;
private final int port;
public SinkRouteConfigurator(String protocol, String host, int port) {
this.protocol = protocol;
this.host = host;
this.port = port;
}
@Override
public void configure(CamelContext camelContext) throws Exception {
camelContext.getRegistry().bind("decoder", new Rfc5425FrameDecoder());
LOG.debug("Adding routes");
camelContext.addRoutes(new RouteBuilder() {
@Override
public void configure() {
fromF("netty:%s://%s:%d?sync=false&decoders=#decoder", protocol, host, port)
.unmarshal(new SyslogDataFormat()).to("seda:syslog");
}
});
}
}
| 9,285 |
0 | Create_ds/camel-kafka-connector/tests/itests-syslog/src/test/java/org/apache/camel/kafkaconnector/syslog | Create_ds/camel-kafka-connector/tests/itests-syslog/src/test/java/org/apache/camel/kafkaconnector/syslog/services/RouteConfigurator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.syslog.services;
import org.apache.camel.CamelContext;
public interface RouteConfigurator {
void configure(CamelContext camelContext) throws Exception;
}
| 9,286 |
0 | Create_ds/camel-kafka-connector/tests/itests-syslog/src/test/java/org/apache/camel/kafkaconnector/syslog | Create_ds/camel-kafka-connector/tests/itests-syslog/src/test/java/org/apache/camel/kafkaconnector/syslog/services/SyslogService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.syslog.services;
import org.apache.camel.CamelContext;
import org.apache.camel.Exchange;
import org.apache.camel.impl.DefaultCamelContext;
import org.apache.camel.test.infra.common.TestUtils;
import org.junit.jupiter.api.extension.AfterAllCallback;
import org.junit.jupiter.api.extension.BeforeAllCallback;
import org.junit.jupiter.api.extension.ExtensionContext;
public class SyslogService implements BeforeAllCallback, AfterAllCallback {
private final CamelContext camelContext = new DefaultCamelContext();
private final RouteConfigurator routeConfigurator;
public SyslogService(RouteConfigurator routeConfigurator) {
this.routeConfigurator = routeConfigurator;
}
@Override
public void beforeAll(ExtensionContext context) throws Exception {
routeConfigurator.configure(camelContext);
camelContext.start();
TestUtils.waitFor(camelContext::isStarted);
}
@Override
public void afterAll(ExtensionContext context) {
camelContext.stop();
TestUtils.waitFor(camelContext::isStopped);
}
public CamelContext getCamelContext() {
return camelContext;
}
public Exchange getFirstExchangeToBeReceived() {
return camelContext.createConsumerTemplate().receive("seda:syslog", 10000L);
}
public static SyslogService sinkSyslogServiceFactory(String protocol, String host, int port) {
SinkRouteConfigurator sinkRouteConfigurator = new SinkRouteConfigurator(protocol, host, port);
return new SyslogService(sinkRouteConfigurator);
}
public static SyslogService sourceSyslogServiceFactory(String protocol, String host, int port) {
SourceRouteConfigurator sourceRouteConfigurator = new SourceRouteConfigurator(protocol, host, port);
return new SyslogService(sourceRouteConfigurator);
}
}
| 9,287 |
0 | Create_ds/camel-kafka-connector/tests/itests-syslog/src/test/java/org/apache/camel/kafkaconnector/syslog | Create_ds/camel-kafka-connector/tests/itests-syslog/src/test/java/org/apache/camel/kafkaconnector/syslog/services/SourceRouteConfigurator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.kafkaconnector.syslog.services;
import org.apache.camel.CamelContext;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.component.syslog.SyslogDataFormat;
import org.apache.camel.component.syslog.netty.Rfc5425Encoder;
import org.apache.camel.kafkaconnector.common.utils.NetworkUtils;
import org.apache.camel.test.infra.common.TestUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SourceRouteConfigurator implements RouteConfigurator {
private static final Logger LOG = LoggerFactory.getLogger(SourceRouteConfigurator.class);
private final String protocol;
private final String host;
private final int port;
public SourceRouteConfigurator(String protocol, String host, int port) {
this.protocol = protocol;
this.host = host;
this.port = port;
}
@Override
public void configure(CamelContext camelContext) throws Exception {
camelContext.getRegistry().bind("encoder", new Rfc5425Encoder());
LOG.debug("Adding routes");
camelContext.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("direct:test")
.marshal(new SyslogDataFormat())
.toF("netty:%s://%s:%d?sync=false&encoders=#encoder&useByteBuf=true&lazyStartProducer=true",
protocol, host, port);
}
});
TestUtils.waitFor(() -> NetworkUtils.portIsOpen(host, port));
}
}
| 9,288 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/WithTestNames.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.TestInfo;
// This is only for the unit tests and integration tests in this module
// It must be copied for use in other modules, because tests in one module
// don't have dependencies on other modules, and we can't put this in a
// regular, non-test jar, because we don't want to add a dependency on
// JUnit in a non-test jar
public class WithTestNames {
private String testName;
@BeforeEach
public void setTestName(TestInfo info) {
testName = info.getTestMethod().orElseThrow().getName();
}
protected String testName() {
return testName;
}
}
| 9,289 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/crypto/BlockedIOStreamTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.crypto;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.security.SecureRandom;
import java.util.Arrays;
import org.apache.accumulo.core.crypto.streams.BlockedInputStream;
import org.apache.accumulo.core.crypto.streams.BlockedOutputStream;
import org.junit.jupiter.api.Test;
public class BlockedIOStreamTest {
private static final SecureRandom random = new SecureRandom();
@Test
public void testLargeBlockIO() throws IOException {
writeRead(1024, 2048);
}
private void writeRead(int blockSize, int expectedSize) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
BlockedOutputStream blockOut = new BlockedOutputStream(baos, blockSize, 1);
String contentString = "My Blocked Content String";
byte[] content = contentString.getBytes(UTF_8);
blockOut.write(content);
blockOut.flush();
String contentString2 = "My Other Blocked Content String";
byte[] content2 = contentString2.getBytes(UTF_8);
blockOut.write(content2);
blockOut.flush();
blockOut.close();
byte[] written = baos.toByteArray();
assertEquals(expectedSize, written.length);
ByteArrayInputStream biis = new ByteArrayInputStream(written);
BlockedInputStream blockIn = new BlockedInputStream(biis, blockSize, blockSize);
DataInputStream dIn = new DataInputStream(blockIn);
dIn.readFully(content, 0, content.length);
String readContentString = new String(content, UTF_8);
assertEquals(contentString, readContentString);
dIn.readFully(content2, 0, content2.length);
String readContentString2 = new String(content2, UTF_8);
assertEquals(contentString2, readContentString2);
blockIn.close();
}
@Test
public void testSmallBufferBlockedIO() throws IOException {
writeRead(16, (12 + 4) * (int) (Math.ceil(25.0 / 12) + Math.ceil(31.0 / 12)));
}
@Test
public void testSpillingOverOutputStream() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
// buffer will be size 12
BlockedOutputStream blockOut = new BlockedOutputStream(baos, 16, 16);
byte[] undersized = new byte[11];
byte[] perfectSized = new byte[12];
byte[] overSized = new byte[13];
byte[] perfectlyOversized = new byte[13];
byte filler = (byte) random.nextInt();
random.nextBytes(undersized);
random.nextBytes(perfectSized);
random.nextBytes(overSized);
random.nextBytes(perfectlyOversized);
// 1 block
blockOut.write(undersized);
blockOut.write(filler);
blockOut.flush();
// 2 blocks
blockOut.write(perfectSized);
blockOut.write(filler);
blockOut.flush();
// 2 blocks
blockOut.write(overSized);
blockOut.write(filler);
blockOut.flush();
// 3 blocks
blockOut.write(undersized);
blockOut.write(perfectlyOversized);
blockOut.write(filler);
blockOut.flush();
blockOut.close();
assertEquals(16 * 8, baos.toByteArray().length);
}
@Test
public void testGiantWrite() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
int blockSize = 16;
// buffer will be size 12
BlockedOutputStream blockOut = new BlockedOutputStream(baos, blockSize, blockSize);
int size = 1024 * 1024 * 128;
byte[] giant = new byte[size];
byte[] pattern = new byte[1024];
random.nextBytes(pattern);
for (int i = 0; i < size / 1024; i++) {
System.arraycopy(pattern, 0, giant, i * 1024, 1024);
}
blockOut.write(giant);
blockOut.flush();
blockOut.close();
baos.close();
int blocks = (int) Math.ceil(size / (blockSize - 4.0));
byte[] byteStream = baos.toByteArray();
assertEquals(blocks * 16, byteStream.length);
DataInputStream blockIn = new DataInputStream(
new BlockedInputStream(new ByteArrayInputStream(byteStream), blockSize, blockSize));
Arrays.fill(giant, (byte) 0);
blockIn.readFully(giant, 0, size);
blockIn.close();
for (int i = 0; i < size / 1024; i++) {
byte[] readChunk = new byte[1024];
System.arraycopy(giant, i * 1024, readChunk, 0, 1024);
assertArrayEquals(pattern, readChunk);
}
}
}
| 9,290 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/crypto/CryptoTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.crypto;
import static com.google.common.collect.MoreCollectors.onlyElement;
import static org.apache.accumulo.core.conf.Property.INSTANCE_CRYPTO_FACTORY;
import static org.apache.accumulo.core.crypto.CryptoUtils.getFileDecrypter;
import static org.apache.accumulo.core.spi.crypto.CryptoEnvironment.Scope.TABLE;
import static org.apache.accumulo.core.spi.crypto.CryptoEnvironment.Scope.WAL;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.io.UncheckedIOException;
import java.security.InvalidKeyException;
import java.security.NoSuchAlgorithmException;
import java.security.NoSuchProviderException;
import java.security.SecureRandom;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import javax.crypto.Cipher;
import javax.crypto.NoSuchPaddingException;
import javax.crypto.spec.SecretKeySpec;
import org.apache.accumulo.core.classloader.ClassLoaderUtil;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.rfile.RFile;
import org.apache.accumulo.core.client.rfile.RFileWriter;
import org.apache.accumulo.core.client.summary.Summarizer;
import org.apache.accumulo.core.client.summary.SummarizerConfiguration;
import org.apache.accumulo.core.client.summary.Summary;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.ConfigurationCopy;
import org.apache.accumulo.core.conf.DefaultConfiguration;
import org.apache.accumulo.core.crypto.streams.NoFlushOutputStream;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.spi.crypto.AESCryptoService;
import org.apache.accumulo.core.spi.crypto.CryptoEnvironment;
import org.apache.accumulo.core.spi.crypto.CryptoEnvironment.Scope;
import org.apache.accumulo.core.spi.crypto.CryptoService;
import org.apache.accumulo.core.spi.crypto.CryptoService.CryptoException;
import org.apache.accumulo.core.spi.crypto.CryptoServiceFactory;
import org.apache.accumulo.core.spi.crypto.FileDecrypter;
import org.apache.accumulo.core.spi.crypto.FileEncrypter;
import org.apache.accumulo.core.spi.crypto.GenericCryptoServiceFactory;
import org.apache.accumulo.core.spi.crypto.NoCryptoService;
import org.apache.accumulo.core.spi.crypto.NoCryptoServiceFactory;
import org.apache.accumulo.core.spi.crypto.PerTableCryptoServiceFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
public class CryptoTest {
private static final int MARKER_INT = 0xCADEFEDD;
private static final String MARKER_STRING = "1 2 3 4 5 6 7 8 a b c d e f g h ";
private static final Configuration hadoopConf = new Configuration();
public enum ConfigMode {
CRYPTO_OFF, CRYPTO_TABLE_ON, CRYPTO_WAL_ON, CRYPTO_TABLE_ON_DISABLED, CRYPTO_WAL_ON_DISABLED
}
@BeforeAll
public static void setupKeyFiles() throws IOException {
setupKeyFiles(CryptoTest.class);
}
public static void setupKeyFiles(Class<?> testClass) throws IOException {
FileSystem fs = FileSystem.getLocal(hadoopConf);
Path aesPath = new Path(keyPath(testClass));
try (FSDataOutputStream out = fs.create(aesPath)) {
out.writeUTF("sixteenbytekey"); // 14 + 2 from writeUTF
}
try (FSDataOutputStream out = fs.create(new Path(emptyKeyPath(testClass)))) {
// auto close after creating
assertNotNull(out);
}
}
public static ConfigurationCopy getAccumuloConfig(ConfigMode configMode, Class<?> testClass) {
ConfigurationCopy cfg = new ConfigurationCopy(DefaultConfiguration.getInstance());
switch (configMode) {
case CRYPTO_TABLE_ON_DISABLED:
cfg.set(INSTANCE_CRYPTO_FACTORY, PerTableCryptoServiceFactory.class.getName());
cfg.set(PerTableCryptoServiceFactory.TABLE_SERVICE_NAME_PROP,
AESCryptoService.class.getName());
cfg.set(AESCryptoService.KEY_URI_PROPERTY, CryptoTest.keyPath(testClass));
cfg.set(AESCryptoService.ENCRYPT_ENABLED_PROPERTY, "false");
break;
case CRYPTO_TABLE_ON:
cfg.set(INSTANCE_CRYPTO_FACTORY, PerTableCryptoServiceFactory.class.getName());
cfg.set(PerTableCryptoServiceFactory.TABLE_SERVICE_NAME_PROP,
AESCryptoService.class.getName());
cfg.set(AESCryptoService.KEY_URI_PROPERTY, CryptoTest.keyPath(testClass));
cfg.set(AESCryptoService.ENCRYPT_ENABLED_PROPERTY, "true");
break;
case CRYPTO_WAL_ON_DISABLED:
cfg.set(INSTANCE_CRYPTO_FACTORY, GenericCryptoServiceFactory.class.getName());
cfg.set(GenericCryptoServiceFactory.GENERAL_SERVICE_NAME_PROP,
AESCryptoService.class.getName());
cfg.set(AESCryptoService.KEY_URI_PROPERTY, CryptoTest.keyPath(testClass));
cfg.set(AESCryptoService.ENCRYPT_ENABLED_PROPERTY, "false");
break;
case CRYPTO_WAL_ON:
cfg.set(INSTANCE_CRYPTO_FACTORY, GenericCryptoServiceFactory.class.getName());
cfg.set(GenericCryptoServiceFactory.GENERAL_SERVICE_NAME_PROP,
AESCryptoService.class.getName());
cfg.set(AESCryptoService.KEY_URI_PROPERTY, CryptoTest.keyPath(testClass));
cfg.set(AESCryptoService.ENCRYPT_ENABLED_PROPERTY, "true");
break;
case CRYPTO_OFF:
break;
}
return cfg;
}
private ConfigurationCopy getAccumuloConfig(ConfigMode configMode) {
return getAccumuloConfig(configMode, getClass());
}
private Map<String,String> getAllCryptoProperties(ConfigMode configMode) {
var cc = getAccumuloConfig(configMode);
return cc.getAllCryptoProperties();
}
public static String keyPath(Class<?> testClass) {
return System.getProperty("user.dir") + "/target/" + testClass.getSimpleName() + "-testkeyfile";
}
public static String emptyKeyPath(Class<?> testClass) {
return System.getProperty("user.dir") + "/target/" + testClass.getSimpleName()
+ "-emptykeyfile";
}
@Test
public void simpleGCMTest() throws Exception {
AESCryptoService cs = new AESCryptoService();
cs.init(getAllCryptoProperties(ConfigMode.CRYPTO_TABLE_ON));
CryptoEnvironment encEnv = new CryptoEnvironmentImpl(TABLE, null, null);
FileEncrypter encrypter = cs.getFileEncrypter(encEnv);
byte[] params = encrypter.getDecryptionParameters();
assertNotNull(params);
ByteArrayOutputStream out = new ByteArrayOutputStream();
DataOutputStream dataOut = new DataOutputStream(out);
CryptoUtils.writeParams(params, dataOut);
OutputStream encrypted = encrypter.encryptStream(dataOut);
assertNotNull(encrypted);
DataOutputStream cipherOut = new DataOutputStream(encrypted);
cipherOut.writeUTF(MARKER_STRING);
cipherOut.close();
dataOut.close();
encrypted.close();
out.close();
byte[] cipherText = out.toByteArray();
// decrypt
ByteArrayInputStream in = new ByteArrayInputStream(cipherText);
FileDecrypter decrypter = getFileDecrypter(cs, TABLE, null, new DataInputStream(in));
DataInputStream decrypted = new DataInputStream(decrypter.decryptStream(in));
String plainText = decrypted.readUTF();
decrypted.close();
in.close();
assertEquals(MARKER_STRING, plainText);
}
@Test
public void testAESCryptoServiceWAL() throws Exception {
AESCryptoService cs = new AESCryptoService();
cs.init(getAllCryptoProperties(ConfigMode.CRYPTO_WAL_ON));
byte[] resultingBytes = encrypt(cs, Scope.WAL);
String stringifiedBytes = Arrays.toString(resultingBytes);
String stringifiedMarkerBytes = getStringifiedBytes(null, MARKER_STRING, MARKER_INT);
assertNotEquals(stringifiedBytes, stringifiedMarkerBytes);
decrypt(cs, resultingBytes, Scope.WAL);
}
/**
* AESCryptoService is configured but only for reading
*/
@Test
public void testAESCryptoServiceWALDisabled() throws Exception {
AESCryptoService csEnabled = new AESCryptoService();
AESCryptoService csDisabled = new AESCryptoService();
csEnabled.init(getAllCryptoProperties(ConfigMode.CRYPTO_WAL_ON));
csDisabled.init(getAllCryptoProperties(ConfigMode.CRYPTO_WAL_ON_DISABLED));
// make sure we can read encrypted
byte[] encryptedBytes = encrypt(csEnabled, Scope.WAL);
String stringEncryptedBytes = Arrays.toString(encryptedBytes);
String stringifiedMarkerBytes = getStringifiedBytes(null, MARKER_STRING, MARKER_INT);
assertNotEquals(stringEncryptedBytes, stringifiedMarkerBytes);
decrypt(csDisabled, encryptedBytes, Scope.WAL);
// make sure we don't encrypt when disabled
byte[] plainBytes = encrypt(csDisabled, Scope.WAL);
String stringPlainBytes = Arrays.toString(plainBytes);
assertNotEquals(stringEncryptedBytes, stringPlainBytes);
decrypt(csDisabled, plainBytes, Scope.WAL);
}
@Test
public void testAESCryptoServiceRFILE() throws Exception {
AESCryptoService cs = new AESCryptoService();
cs.init(getAllCryptoProperties(ConfigMode.CRYPTO_TABLE_ON));
byte[] resultingBytes = encrypt(cs, TABLE);
String stringifiedBytes = Arrays.toString(resultingBytes);
String stringifiedMarkerBytes = getStringifiedBytes(null, MARKER_STRING, MARKER_INT);
assertNotEquals(stringifiedBytes, stringifiedMarkerBytes);
decrypt(cs, resultingBytes, TABLE);
}
/**
* AESCryptoService is configured but only for reading
*/
@Test
public void testAESCryptoServiceTableDisabled() throws Exception {
AESCryptoService csEnabled = new AESCryptoService();
AESCryptoService csDisabled = new AESCryptoService();
csEnabled.init(getAllCryptoProperties(ConfigMode.CRYPTO_TABLE_ON));
csDisabled.init(getAllCryptoProperties(ConfigMode.CRYPTO_TABLE_ON_DISABLED));
// make sure we can read encrypted
byte[] encryptedBytes = encrypt(csEnabled, TABLE);
String stringEncryptedBytes = Arrays.toString(encryptedBytes);
String stringifiedMarkerBytes = getStringifiedBytes(null, MARKER_STRING, MARKER_INT);
assertNotEquals(stringEncryptedBytes, stringifiedMarkerBytes);
decrypt(csDisabled, encryptedBytes, TABLE);
// make sure we don't encrypt when disabled
byte[] plainBytes = encrypt(csDisabled, TABLE);
String stringPlainBytes = Arrays.toString(plainBytes);
assertNotEquals(stringEncryptedBytes, stringPlainBytes);
decrypt(csDisabled, plainBytes, TABLE);
}
@Test
public void testNoEncryptionWAL() throws Exception {
CryptoService cs = NoCryptoServiceFactory.NONE;
byte[] encryptedBytes = encrypt(cs, Scope.WAL);
String stringifiedBytes = Arrays.toString(encryptedBytes);
String stringifiedMarkerBytes =
getStringifiedBytes("U+1F47B".getBytes(), MARKER_STRING, MARKER_INT);
assertEquals(stringifiedBytes, stringifiedMarkerBytes);
decrypt(cs, encryptedBytes, Scope.WAL);
}
@Test
public void testNoEncryptionRFILE() throws Exception {
CryptoService cs = new NoCryptoService();
byte[] encryptedBytes = encrypt(cs, TABLE);
String stringifiedBytes = Arrays.toString(encryptedBytes);
String stringifiedMarkerBytes =
getStringifiedBytes("U+1F47B".getBytes(), MARKER_STRING, MARKER_INT);
assertEquals(stringifiedBytes, stringifiedMarkerBytes);
decrypt(cs, encryptedBytes, TABLE);
}
@Test
public void testRFileClientEncryption() throws Exception {
AccumuloConfiguration cryptoOnConf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
FileSystem fs = FileSystem.getLocal(hadoopConf);
ArrayList<Key> keys = testData();
SummarizerConfiguration sumConf =
SummarizerConfiguration.builder(KeyCounter.class.getName()).build();
String file = "target/testFile1.rf";
fs.delete(new Path(file), true);
try (RFileWriter writer = RFile.newWriter().to(file).withFileSystem(fs)
.withTableProperties(cryptoOnConf).withSummarizers(sumConf).build()) {
Value empty = new Value();
writer.startDefaultLocalityGroup();
for (Key key : keys) {
writer.append(key, empty);
}
}
// test to make sure the RFile is encrypted
ArrayList<Key> keysRead = new ArrayList<>();
try (Scanner iter = RFile.newScanner().from(file).withFileSystem(fs).build()) {
assertThrows(UncheckedIOException.class, () -> iter.forEach(e -> keysRead.add(e.getKey())),
"The file was expected to be encrypted but was not");
assertEquals(0, keysRead.size());
}
keysRead.clear();
try (Scanner iter = RFile.newScanner().from(file).withFileSystem(fs)
.withTableProperties(cryptoOnConf).build()) {
iter.forEach(e -> keysRead.add(e.getKey()));
}
assertEquals(keys, keysRead);
Collection<Summary> summaries =
RFile.summaries().from(file).withFileSystem(fs).withTableProperties(cryptoOnConf).read();
Summary summary = summaries.stream().collect(onlyElement());
assertEquals(keys.size(), (long) summary.getStatistics().get("keys"));
assertEquals(1, summary.getStatistics().size());
assertEquals(0, summary.getFileStatistics().getInaccurate());
assertEquals(1, summary.getFileStatistics().getTotal());
}
@Test
// This test is to ensure when Crypto is configured that it can read unencrypted files
public void testReadNoCryptoWithCryptoConfigured() throws Exception {
AccumuloConfiguration cryptoOffConf = getAccumuloConfig(ConfigMode.CRYPTO_OFF);
AccumuloConfiguration cryptoOnConf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
FileSystem fs = FileSystem.getLocal(hadoopConf);
ArrayList<Key> keys = testData();
String file = "target/testFile2.rf";
fs.delete(new Path(file), true);
try (RFileWriter writer =
RFile.newWriter().to(file).withFileSystem(fs).withTableProperties(cryptoOffConf).build()) {
Value empty = new Value();
writer.startDefaultLocalityGroup();
for (Key key : keys) {
writer.append(key, empty);
}
}
ArrayList<Key> keysRead;
try (Scanner iter = RFile.newScanner().from(file).withFileSystem(fs)
.withTableProperties(cryptoOnConf).build()) {
keysRead = new ArrayList<>();
iter.forEach(e -> keysRead.add(e.getKey()));
}
assertEquals(keys, keysRead);
}
@Test
public void testMissingConfigProperties() throws ReflectiveOperationException {
var cryptoProps = getAllCryptoProperties(ConfigMode.CRYPTO_TABLE_ON);
var droppedProperty = cryptoProps.remove(AESCryptoService.KEY_URI_PROPERTY);
assertNotNull(droppedProperty);
String configuredClass = cryptoProps.get(INSTANCE_CRYPTO_FACTORY.getKey());
CryptoEnvironment env = new CryptoEnvironmentImpl(TABLE, TableId.of("5"), null);
Class<? extends CryptoServiceFactory> clazz =
ClassLoaderUtil.loadClass(configuredClass, CryptoServiceFactory.class);
CryptoServiceFactory factory = clazz.getDeclaredConstructor().newInstance();
assertThrows(NullPointerException.class, () -> factory.getService(env, cryptoProps));
CryptoEnvironment env2 = new CryptoEnvironmentImpl(WAL);
var cryptoProps2 = getAllCryptoProperties(ConfigMode.CRYPTO_WAL_ON);
droppedProperty = cryptoProps2.remove(GenericCryptoServiceFactory.GENERAL_SERVICE_NAME_PROP);
assertNotNull(droppedProperty);
assertThrows(NullPointerException.class, () -> factory.getService(env2, cryptoProps2));
}
@Test
public void testAESKeyUtilsGeneratesKey() throws NoSuchAlgorithmException,
NoSuchProviderException, NoSuchPaddingException, InvalidKeyException {
// verify valid key sizes (corresponds to 128, 192, and 256 bits)
for (int i : new int[] {16, 24, 32}) {
verifyKeySizeForCBC(RANDOM.get(), i);
}
// verify invalid key sizes
for (int i : new int[] {1, 2, 8, 11, 15, 64, 128}) {
assertThrows(InvalidKeyException.class, () -> verifyKeySizeForCBC(RANDOM.get(), i));
}
}
// this has to be a separate method, for spotbugs, because spotbugs annotation doesn't seem to
// apply to the lambda inline
@SuppressFBWarnings(value = "CIPHER_INTEGRITY", justification = "CBC is being tested")
private void verifyKeySizeForCBC(SecureRandom sr, int sizeInBytes)
throws InvalidKeyException, NoSuchAlgorithmException, NoSuchPaddingException {
java.security.Key key = AESCryptoService.generateKey(sr, sizeInBytes);
Cipher.getInstance("AES/CBC/NoPadding").init(Cipher.ENCRYPT_MODE, key);
}
@Test
public void testAESKeyUtilsWrapAndUnwrap()
throws NoSuchAlgorithmException, NoSuchProviderException {
java.security.Key kek = AESCryptoService.generateKey(RANDOM.get(), 16);
java.security.Key fek = AESCryptoService.generateKey(RANDOM.get(), 16);
byte[] wrapped = AESCryptoService.wrapKey(fek, kek);
assertFalse(Arrays.equals(fek.getEncoded(), wrapped));
java.security.Key unwrapped = AESCryptoService.unwrapKey(wrapped, kek);
assertEquals(unwrapped, fek);
}
@Test
public void testAESKeyUtilsFailUnwrapWithWrongKEK()
throws NoSuchAlgorithmException, NoSuchProviderException {
java.security.Key kek = AESCryptoService.generateKey(RANDOM.get(), 16);
java.security.Key fek = AESCryptoService.generateKey(RANDOM.get(), 16);
byte[] wrongBytes = kek.getEncoded();
wrongBytes[0]++;
java.security.Key wrongKek = new SecretKeySpec(wrongBytes, "AES");
byte[] wrapped = AESCryptoService.wrapKey(fek, kek);
assertThrows(CryptoException.class, () -> AESCryptoService.unwrapKey(wrapped, wrongKek));
}
@Test
public void testAESKeyUtilsLoadKekFromUri() throws IOException {
java.security.Key fileKey = AESCryptoService.loadKekFromUri(keyPath(getClass()));
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos);
dos.writeUTF("sixteenbytekey");
SecretKeySpec handKey = new SecretKeySpec(baos.toByteArray(), "AES");
assertEquals(fileKey, handKey);
}
@Test
public void testAESKeyUtilsLoadKekFromUriInvalidUri() {
assertThrows(CryptoException.class, () -> AESCryptoService.loadKekFromUri(
System.getProperty("user.dir") + "/target/CryptoTest-testkeyfile-doesnt-exist"));
}
@Test
public void testAESKeyUtilsLoadKekFromEmptyFile() {
assertThrows(CryptoException.class,
() -> AESCryptoService.loadKekFromUri(emptyKeyPath(getClass())));
}
@Test
public void testPerTableFactory() {
PerTableCryptoServiceFactory factory = new PerTableCryptoServiceFactory();
CryptoEnvironment env = new CryptoEnvironmentImpl(TABLE, TableId.of("5"), null);
HashMap<String,String> props = new HashMap<>();
// empty properties returns NoCrypto
CryptoService cs = factory.getService(env, props);
assertEquals(NoCryptoService.class, cs.getClass());
var config = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
props.putAll(config.getAllCryptoProperties());
cs = factory.getService(env, props);
assertEquals(AESCryptoService.class, cs.getClass());
CryptoEnvironment env2 = new CryptoEnvironmentImpl(TABLE, TableId.of("6"), null);
props.put(PerTableCryptoServiceFactory.TABLE_SERVICE_NAME_PROP,
NoCryptoService.class.getName());
cs = factory.getService(env2, props);
assertEquals(NoCryptoService.class, cs.getClass());
assertEquals(2, factory.getCount());
}
private ArrayList<Key> testData() {
ArrayList<Key> keys = new ArrayList<>();
keys.add(new Key("a", "cf", "cq"));
keys.add(new Key("a1", "cf", "cq"));
keys.add(new Key("a2", "cf", "cq"));
keys.add(new Key("a3", "cf", "cq"));
return keys;
}
private <C extends CryptoService> byte[] encrypt(C cs, Scope scope) throws Exception {
CryptoEnvironment env = new CryptoEnvironmentImpl(scope, null, null);
FileEncrypter encrypter = cs.getFileEncrypter(env);
byte[] params = encrypter.getDecryptionParameters();
assertNotNull(encrypter, "CryptoService returned null FileEncrypter");
ByteArrayOutputStream out = new ByteArrayOutputStream();
DataOutputStream dataOut = new DataOutputStream(out);
CryptoUtils.writeParams(params, dataOut);
DataOutputStream encrypted =
new DataOutputStream(encrypter.encryptStream(new NoFlushOutputStream(dataOut)));
assertNotNull(encrypted);
encrypted.writeUTF(MARKER_STRING);
encrypted.writeInt(MARKER_INT);
encrypted.close();
dataOut.close();
out.close();
return out.toByteArray();
}
private void decrypt(CryptoService cs, byte[] resultingBytes, Scope scope) throws Exception {
try (DataInputStream dataIn = new DataInputStream(new ByteArrayInputStream(resultingBytes))) {
FileDecrypter decrypter = getFileDecrypter(cs, scope, null, dataIn);
try (DataInputStream decrypted = new DataInputStream(decrypter.decryptStream(dataIn))) {
String markerString = decrypted.readUTF();
int markerInt = decrypted.readInt();
assertEquals(MARKER_STRING, markerString);
assertEquals(MARKER_INT, markerInt);
}
}
}
private String getStringifiedBytes(byte[] params, String s, int i) throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
DataOutputStream dataOut = new DataOutputStream(out);
if (params != null) {
dataOut.writeInt(params.length);
dataOut.write(params);
}
dataOut.writeUTF(s);
dataOut.writeInt(i);
dataOut.close();
byte[] stringMarkerBytes = out.toByteArray();
return Arrays.toString(stringMarkerBytes);
}
// simple counter to just make sure crypto works with summaries
public static class KeyCounter implements Summarizer {
@Override
public Collector collector(SummarizerConfiguration sc) {
return new Collector() {
long keys = 0;
@Override
public void accept(Key k, Value v) {
if (!k.isDeleted()) {
keys++;
}
}
@Override
public void summarize(StatisticConsumer sc) {
sc.accept("keys", keys);
}
};
}
@Override
public Combiner combiner(SummarizerConfiguration sc) {
return (m1, m2) -> m2.forEach((k, v) -> m1.merge(k, v, Long::sum));
}
}
}
| 9,291 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/singletons/SingletonManagerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.singletons;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import org.apache.accumulo.core.singletons.SingletonManager.Mode;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class SingletonManagerTest {
TestService service1;
TestService service2;
@BeforeEach
public void setup() {
SingletonManager.reset();
assertEquals(0, SingletonManager.getReservationCount());
service1 = new TestService(true);
service2 = new TestService(false);
SingletonManager.register(service1);
SingletonManager.register(service2);
assertEquals(new TestService(true, 0, 0), service1);
assertEquals(new TestService(true, 1, 0), service2);
assertEquals(Mode.CLIENT, SingletonManager.getMode());
}
@Test
public void testClient() {
SingletonReservation resv1 = SingletonManager.getClientReservation();
assertEquals(1, SingletonManager.getReservationCount());
SingletonReservation resv2 = SingletonManager.getClientReservation();
assertEquals(2, SingletonManager.getReservationCount());
assertEquals(new TestService(true, 0, 0), service1);
assertEquals(new TestService(true, 1, 0), service2);
resv1.close();
assertEquals(1, SingletonManager.getReservationCount());
assertEquals(new TestService(true, 0, 0), service1);
assertEquals(new TestService(true, 1, 0), service2);
// calling close again should have no effect
resv1.close();
assertEquals(1, SingletonManager.getReservationCount());
assertEquals(new TestService(true, 0, 0), service1);
assertEquals(new TestService(true, 1, 0), service2);
resv2.close();
assertEquals(0, SingletonManager.getReservationCount());
assertEquals(new TestService(false, 0, 1), service1);
assertEquals(new TestService(false, 1, 1), service2);
SingletonReservation resv3 = SingletonManager.getClientReservation();
assertEquals(1, SingletonManager.getReservationCount());
assertEquals(new TestService(true, 1, 1), service1);
assertEquals(new TestService(true, 2, 1), service2);
resv3.close();
assertEquals(0, SingletonManager.getReservationCount());
assertEquals(new TestService(false, 1, 2), service1);
assertEquals(new TestService(false, 2, 2), service2);
}
@Test
public void testServerPreventsDisable() {
SingletonManager.setMode(Mode.SERVER);
assertEquals(Mode.SERVER, SingletonManager.getMode());
SingletonReservation resv1 = SingletonManager.getClientReservation();
assertEquals(1, SingletonManager.getReservationCount());
SingletonReservation resv2 = SingletonManager.getClientReservation();
assertEquals(2, SingletonManager.getReservationCount());
resv1.close();
resv2.close();
assertEquals(0, SingletonManager.getReservationCount());
assertEquals(new TestService(true, 0, 0), service1);
assertEquals(new TestService(true, 1, 0), service2);
// can not leave server mode, so this should have no effect
SingletonManager.setMode(Mode.CLIENT);
assertEquals(Mode.SERVER, SingletonManager.getMode());
assertEquals(new TestService(true, 0, 0), service1);
assertEquals(new TestService(true, 1, 0), service2);
}
@Test
public void testServerEnables() {
SingletonReservation resv1 = SingletonManager.getClientReservation();
resv1.close();
assertEquals(new TestService(false, 0, 1), service1);
assertEquals(new TestService(false, 1, 1), service2);
// this should enable services
SingletonManager.setMode(Mode.SERVER);
assertEquals(Mode.SERVER, SingletonManager.getMode());
assertEquals(new TestService(true, 1, 1), service1);
assertEquals(new TestService(true, 2, 1), service2);
// can not leave server mode, so this should have no effect
SingletonManager.setMode(Mode.CLIENT);
assertEquals(Mode.SERVER, SingletonManager.getMode());
assertEquals(new TestService(true, 1, 1), service1);
assertEquals(new TestService(true, 2, 1), service2);
}
private static class TestService implements SingletonService {
boolean enabled;
int enables = 0;
int disables = 0;
TestService(boolean enabled) {
this.enabled = enabled;
}
TestService(boolean enabled, int enables, int disables) {
this.enabled = enabled;
this.enables = enables;
this.disables = disables;
}
@Override
public boolean isEnabled() {
return enabled;
}
@Override
public void enable() {
assertFalse(enabled);
enabled = true;
enables++;
}
@Override
public void disable() {
assertTrue(enabled);
enabled = false;
disables++;
}
@Override
public boolean equals(Object o) {
if (o instanceof TestService) {
TestService ots = (TestService) o;
return enabled == ots.enabled && enables == ots.enables && disables == ots.disables;
}
return false;
}
@Override
public int hashCode() {
throw new UnsupportedOperationException();
}
@Override
public String toString() {
return "enabled:" + enabled + " enables:" + enables + " disables:" + disables;
}
}
}
| 9,292 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iteratorsImpl/IteratorConfigUtilTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.ConfigurationCopy;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.dataImpl.thrift.IterInfo;
import org.apache.accumulo.core.iterators.DefaultIteratorEnvironment;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iterators.system.MultiIteratorTest;
import org.apache.accumulo.core.iterators.user.AgeOffFilter;
import org.apache.accumulo.core.iterators.user.SummingCombiner;
import org.apache.accumulo.core.iteratorsImpl.system.SortedMapIterator;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class IteratorConfigUtilTest {
private static final Logger log = LoggerFactory.getLogger(IteratorConfigUtilTest.class);
private static final Collection<ByteSequence> EMPTY_COL_FAMS = new ArrayList<>();
private static final List<IterInfo> EMPTY_ITERS = Collections.emptyList();
static class WrappedIter implements SortedKeyValueIterator<Key,Value> {
protected SortedKeyValueIterator<Key,Value> source;
@Override
public WrappedIter deepCopy(IteratorEnvironment env) {
throw new UnsupportedOperationException();
}
@Override
public Key getTopKey() {
return source.getTopKey();
}
@Override
public Value getTopValue() {
return source.getTopValue();
}
@Override
public boolean hasTop() {
return source.hasTop();
}
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
IteratorEnvironment env) throws IOException {
this.source = source;
}
@Override
public void next() throws IOException {
source.next();
}
@Override
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive)
throws IOException {
source.seek(range, columnFamilies, inclusive);
}
}
static class AddingIter extends WrappedIter {
int amount = 1;
@Override
public Value getTopValue() {
Value val = super.getTopValue();
int orig = Integer.parseInt(val.toString());
return new Value((orig + amount) + "");
}
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
IteratorEnvironment env) throws IOException {
super.init(source, options, env);
String amount = options.get("amount");
if (amount != null) {
this.amount = Integer.parseInt(amount);
}
}
}
static class SquaringIter extends WrappedIter {
@Override
public Value getTopValue() {
Value val = super.getTopValue();
int orig = Integer.parseInt(val.toString());
return new Value((orig * orig) + "");
}
}
private SortedKeyValueIterator<Key,Value> createIter(IteratorScope scope,
SortedMapIterator source, AccumuloConfiguration conf) throws IOException {
var ibEnv = IteratorConfigUtil.loadIterConf(scope, EMPTY_ITERS, new HashMap<>(), conf);
var iteratorBuilder =
ibEnv.env(new DefaultIteratorEnvironment(conf)).useClassLoader(null).build();
return IteratorConfigUtil.loadIterators(source, iteratorBuilder);
}
@Test
public void test1() throws IOException {
ConfigurationCopy conf = new ConfigurationCopy();
// create an iterator that adds 1 and then squares
conf.set(Property.TABLE_ITERATOR_PREFIX + IteratorScope.minc.name() + ".addIter",
"1," + AddingIter.class.getName());
conf.set(Property.TABLE_ITERATOR_PREFIX + IteratorScope.minc.name() + ".sqIter",
"2," + SquaringIter.class.getName());
TreeMap<Key,Value> tm = new TreeMap<>();
MultiIteratorTest.newKeyValue(tm, 1, 0, false, "1");
MultiIteratorTest.newKeyValue(tm, 2, 0, false, "2");
SortedMapIterator source = new SortedMapIterator(tm);
SortedKeyValueIterator<Key,Value> iter = createIter(IteratorScope.minc, source, conf);
iter.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(iter.hasTop());
assertEquals(iter.getTopKey(), MultiIteratorTest.newKey(1, 0));
assertEquals("4", iter.getTopValue().toString());
iter.next();
assertTrue(iter.hasTop());
assertEquals(iter.getTopKey(), MultiIteratorTest.newKey(2, 0));
assertEquals("9", iter.getTopValue().toString());
iter.next();
assertFalse(iter.hasTop());
}
@Test
public void test4() throws IOException {
// try loading for a different scope
AccumuloConfiguration conf = new ConfigurationCopy();
TreeMap<Key,Value> tm = new TreeMap<>();
MultiIteratorTest.newKeyValue(tm, 1, 0, false, "1");
MultiIteratorTest.newKeyValue(tm, 2, 0, false, "2");
SortedMapIterator source = new SortedMapIterator(tm);
SortedKeyValueIterator<Key,Value> iter = createIter(IteratorScope.majc, source, conf);
iter.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(iter.hasTop());
assertEquals(iter.getTopKey(), MultiIteratorTest.newKey(1, 0));
assertEquals("1", iter.getTopValue().toString());
iter.next();
assertTrue(iter.hasTop());
assertEquals(iter.getTopKey(), MultiIteratorTest.newKey(2, 0));
assertEquals("2", iter.getTopValue().toString());
iter.next();
assertFalse(iter.hasTop());
}
@Test
public void test3() throws IOException {
// change the load order, so it squares and then adds
ConfigurationCopy conf = new ConfigurationCopy();
TreeMap<Key,Value> tm = new TreeMap<>();
MultiIteratorTest.newKeyValue(tm, 1, 0, false, "1");
MultiIteratorTest.newKeyValue(tm, 2, 0, false, "2");
SortedMapIterator source = new SortedMapIterator(tm);
conf.set(Property.TABLE_ITERATOR_PREFIX + IteratorScope.minc.name() + ".addIter",
"2," + AddingIter.class.getName());
conf.set(Property.TABLE_ITERATOR_PREFIX + IteratorScope.minc.name() + ".sqIter",
"1," + SquaringIter.class.getName());
SortedKeyValueIterator<Key,Value> iter = createIter(IteratorScope.minc, source, conf);
iter.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(iter.hasTop());
assertEquals(iter.getTopKey(), MultiIteratorTest.newKey(1, 0));
assertEquals("2", iter.getTopValue().toString());
iter.next();
assertTrue(iter.hasTop());
assertEquals(iter.getTopKey(), MultiIteratorTest.newKey(2, 0));
assertEquals("5", iter.getTopValue().toString());
iter.next();
assertFalse(iter.hasTop());
}
@Test
public void test2() throws IOException {
ConfigurationCopy conf = new ConfigurationCopy();
// create an iterator that adds 1 and then squares
conf.set(Property.TABLE_ITERATOR_PREFIX + IteratorScope.minc.name() + ".addIter",
"1," + AddingIter.class.getName());
conf.set(Property.TABLE_ITERATOR_PREFIX + IteratorScope.minc.name() + ".addIter.opt.amount",
"7");
conf.set(Property.TABLE_ITERATOR_PREFIX + IteratorScope.minc.name() + ".sqIter",
"2," + SquaringIter.class.getName());
TreeMap<Key,Value> tm = new TreeMap<>();
MultiIteratorTest.newKeyValue(tm, 1, 0, false, "1");
MultiIteratorTest.newKeyValue(tm, 2, 0, false, "2");
SortedMapIterator source = new SortedMapIterator(tm);
SortedKeyValueIterator<Key,Value> iter = createIter(IteratorScope.minc, source, conf);
iter.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(iter.hasTop());
assertEquals(iter.getTopKey(), MultiIteratorTest.newKey(1, 0));
assertEquals("64", iter.getTopValue().toString());
iter.next();
assertTrue(iter.hasTop());
assertEquals(iter.getTopKey(), MultiIteratorTest.newKey(2, 0));
assertEquals("81", iter.getTopValue().toString());
iter.next();
assertFalse(iter.hasTop());
}
@Test
public void test5() throws IOException {
ConfigurationCopy conf = new ConfigurationCopy();
// create an iterator that ages off
conf.set(Property.TABLE_ITERATOR_PREFIX + IteratorScope.minc.name() + ".filter",
"1," + AgeOffFilter.class.getName());
conf.set(Property.TABLE_ITERATOR_PREFIX + IteratorScope.minc.name() + ".filter.opt.ttl", "100");
conf.set(Property.TABLE_ITERATOR_PREFIX + IteratorScope.minc.name() + ".filter.opt.currentTime",
"1000");
TreeMap<Key,Value> tm = new TreeMap<>();
MultiIteratorTest.newKeyValue(tm, 1, 850, false, "1");
MultiIteratorTest.newKeyValue(tm, 2, 950, false, "2");
SortedMapIterator source = new SortedMapIterator(tm);
SortedKeyValueIterator<Key,Value> iter = createIter(IteratorScope.minc, source, conf);
iter.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(iter.hasTop());
assertEquals(iter.getTopKey(), MultiIteratorTest.newKey(2, 950));
iter.next();
assertFalse(iter.hasTop());
}
@Test
public void onlyReadsRelevantIteratorScopeConfigurations() {
Map<String,String> data = new HashMap<>();
// Make some configuration items, one with a bogus scope
data.put(Property.TABLE_ITERATOR_SCAN_PREFIX + "foo", "50," + SummingCombiner.class.getName());
data.put(Property.TABLE_ITERATOR_SCAN_PREFIX + "foo.opt.all", "true");
data.put(Property.TABLE_ITERATOR_PREFIX + ".fakescope.bar",
"50," + SummingCombiner.class.getName());
data.put(Property.TABLE_ITERATOR_SCAN_PREFIX + "foo.opt.fakeopt", "fakevalue");
AccumuloConfiguration conf = new ConfigurationCopy(data);
List<IterInfo> iterators =
IteratorConfigUtil.parseIterConf(IteratorScope.scan, EMPTY_ITERS, new HashMap<>(), conf);
assertEquals(1, iterators.size());
IterInfo ii = iterators.get(0);
assertEquals(new IterInfo(50, SummingCombiner.class.getName(), "foo"), ii);
}
/**
* Iterators should not contain dots in the name. Also, if the split size on "." is greater than
* one, it should be 3, i.e., itername.opt.optname
*/
@Test
public void testInvalidIteratorFormats() {
Map<String,String> data = new HashMap<>();
Map<String,Map<String,String>> options = new HashMap<>();
AccumuloConfiguration conf;
// create iterator with 'dot' in name
List<IterInfo> iterators = new ArrayList<>();
try {
data.put(Property.TABLE_ITERATOR_SCAN_PREFIX + "foo.bar",
"50," + SummingCombiner.class.getName());
conf = new ConfigurationCopy(data);
iterators = IteratorConfigUtil.parseIterConf(IteratorScope.scan, iterators, options, conf);
} catch (IllegalArgumentException ex) {
log.debug("caught expected exception: " + ex.getMessage());
}
data.clear();
iterators.clear();
options.clear();
// create iterator with 'dot' in name and with split size of 3. If split size of three, then
// second part must be 'opt'.
try {
data.put(Property.TABLE_ITERATOR_SCAN_PREFIX + "foo.bar.baz",
"49," + SummingCombiner.class.getName());
conf = new ConfigurationCopy(data);
iterators = IteratorConfigUtil.parseIterConf(IteratorScope.scan, iterators, options, conf);
} catch (IllegalArgumentException ex) {
log.debug("caught expected exception: " + ex.getMessage());
}
data.clear();
iterators.clear();
options.clear();
// create iterator with invalid option format
try {
data.put(Property.TABLE_ITERATOR_SCAN_PREFIX + "foobar",
"48," + SummingCombiner.class.getName());
data.put(Property.TABLE_ITERATOR_SCAN_PREFIX + "foobar.opt", "fakevalue");
conf = new ConfigurationCopy(data);
iterators = IteratorConfigUtil.parseIterConf(IteratorScope.scan, iterators, options, conf);
assertEquals(1, iterators.size());
IterInfo ii = iterators.get(0);
assertEquals(new IterInfo(48, SummingCombiner.class.getName(), "foobar"), ii);
} catch (IllegalArgumentException ex) {
log.debug("caught expected exception: " + ex.getMessage());
}
data.clear();
iterators.clear();
options.clear();
// create iterator with 'opt' in incorrect position
try {
data.put(Property.TABLE_ITERATOR_SCAN_PREFIX + "foobaz",
"47," + SummingCombiner.class.getName());
data.put(Property.TABLE_ITERATOR_SCAN_PREFIX + "foobaz.fake.opt", "fakevalue");
conf = new ConfigurationCopy(data);
iterators = IteratorConfigUtil.parseIterConf(IteratorScope.scan, iterators, options, conf);
assertEquals(1, iterators.size());
IterInfo ii = iterators.get(0);
assertEquals(new IterInfo(47, SummingCombiner.class.getName(), "foobaz"), ii);
} catch (IllegalArgumentException ex) {
log.debug("caught expected exception: " + ex.getMessage());
}
}
}
| 9,293 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/MergeTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.util.Merge.Size;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
public class MergeTest {
static class MergeTester extends Merge {
public List<List<Size>> merges = new ArrayList<>();
public List<Size> tablets = new ArrayList<>();
MergeTester(Integer... sizes) {
Text start = null;
for (Integer size : sizes) {
Text end;
if (tablets.size() == sizes.length - 1) {
end = null;
} else {
end = new Text(String.format("%05d", tablets.size()));
}
KeyExtent extent = new KeyExtent(TableId.of("table"), end, start);
start = end;
tablets.add(new Size(extent, size));
}
}
@Override
protected void message(String format, Object... args) {}
@Override
protected Iterator<Size> getSizeIterator(AccumuloClient client, String tablename,
final Text start, final Text end) throws MergeException {
final Iterator<Size> impl = tablets.iterator();
return new Iterator<>() {
Size next = skip();
@Override
public boolean hasNext() {
return next != null;
}
private Size skip() {
while (impl.hasNext()) {
Size candidate = impl.next();
if (start != null) {
if (candidate.extent.endRow() != null
&& candidate.extent.endRow().compareTo(start) < 0) {
continue;
}
}
if (end != null) {
if (candidate.extent.prevEndRow() != null
&& candidate.extent.prevEndRow().compareTo(end) >= 0) {
continue;
}
}
return candidate;
}
return null;
}
@Override
public Size next() {
Size result = next;
next = skip();
return result;
}
@Override
public void remove() {
impl.remove();
}
};
}
@Override
protected void merge(AccumuloClient client, String table, List<Size> sizes, int numToMerge)
throws MergeException {
List<Size> merge = new ArrayList<>();
for (int i = 0; i < numToMerge; i++) {
merge.add(sizes.get(i));
}
merges.add(merge);
}
}
private static int[] sizes(List<Size> sizes) {
int[] result = new int[sizes.size()];
int i = 0;
for (Size s : sizes) {
result[i++] = (int) s.size;
}
return result;
}
@Test
public void testMergomatic() throws Exception {
// Merge everything to the last tablet
int i;
MergeTester test = new MergeTester(10, 20, 30);
test.mergomatic(null, "table", null, null, 1000, false);
assertEquals(1, test.merges.size());
assertArrayEquals(new int[] {10, 20, 30}, sizes(test.merges.get(i = 0)));
// Merge ranges around tablets that are big enough
test = new MergeTester(1, 2, 100, 1000, 17, 1000, 4, 5, 6, 900);
test.mergomatic(null, "table", null, null, 1000, false);
assertEquals(2, test.merges.size());
assertArrayEquals(new int[] {1, 2, 100}, sizes(test.merges.get(i = 0)));
assertArrayEquals(new int[] {4, 5, 6, 900}, sizes(test.merges.get(++i)));
// Test the force option
test = new MergeTester(1, 2, 100, 1000, 17, 1000, 4, 5, 6, 900);
test.mergomatic(null, "table", null, null, 1000, true);
assertEquals(3, test.merges.size());
assertArrayEquals(new int[] {1, 2, 100}, sizes(test.merges.get(i = 0)));
assertArrayEquals(new int[] {17, 1000}, sizes(test.merges.get(++i)));
assertArrayEquals(new int[] {4, 5, 6, 900}, sizes(test.merges.get(++i)));
// Limit the low-end of the merges
test = new MergeTester(1, 2, 1000, 17, 1000, 4, 5, 6, 900);
test.mergomatic(null, "table", new Text("00004"), null, 1000, false);
assertEquals(1, test.merges.size());
assertArrayEquals(new int[] {4, 5, 6, 900}, sizes(test.merges.get(i = 0)));
// Limit the upper end of the merges
test = new MergeTester(1, 2, 1000, 17, 1000, 4, 5, 6, 900);
test.mergomatic(null, "table", null, new Text("00004"), 1000, false);
assertEquals(1, test.merges.size());
assertArrayEquals(new int[] {1, 2}, sizes(test.merges.get(i = 0)));
// Limit both ends
test = new MergeTester(1, 2, 1000, 17, 1000, 4, 5, 6, 900);
test.mergomatic(null, "table", new Text("00002"), new Text("00004"), 1000, true);
assertEquals(1, test.merges.size());
assertArrayEquals(new int[] {17, 1000}, sizes(test.merges.get(i = 0)));
// Clump up tablets into larger values
test = new MergeTester(100, 250, 500, 600, 100, 200, 500, 200);
test.mergomatic(null, "table", null, null, 1000, false);
assertEquals(3, test.merges.size());
assertArrayEquals(new int[] {100, 250, 500}, sizes(test.merges.get(i = 0)));
assertArrayEquals(new int[] {600, 100, 200}, sizes(test.merges.get(++i)));
assertArrayEquals(new int[] {500, 200}, sizes(test.merges.get(++i)));
}
}
| 9,294 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/NumUtilTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static org.apache.accumulo.core.util.NumUtil.bigNumberForQuantity;
import static org.apache.accumulo.core.util.NumUtil.bigNumberForSize;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.util.Locale;
import org.junit.jupiter.api.Test;
public class NumUtilTest {
@Test
public void testBigNumberForSize() {
Locale.setDefault(Locale.US);
assertEquals("1,000", bigNumberForSize(1000));
assertEquals("1.00K", bigNumberForSize(1024));
assertEquals("1.50K", bigNumberForSize(1024 + (1024 / 2)));
assertEquals("1,024.00K", bigNumberForSize(1024 * 1024 - 1));
assertEquals("1.00M", bigNumberForSize(1024 * 1024));
assertEquals("1.50M", bigNumberForSize(1024 * 1024 + (1024 * 1024 / 2)));
assertEquals("1,024.00M", bigNumberForSize(1073741823));
assertEquals("1.00G", bigNumberForSize(1073741824));
assertEquals("1,024.00G", bigNumberForSize(1099511627775L));
assertEquals("1.00T", bigNumberForSize(1099511627776L));
}
@Test
public void testBigNumberForQuantity() {
assertEquals("999", bigNumberForQuantity(999));
assertEquals("1.00K", bigNumberForQuantity(1000));
assertEquals("1.02K", bigNumberForQuantity(1024));
assertEquals("5.00K", bigNumberForQuantity(5000));
assertEquals("50.00K", bigNumberForQuantity(50000));
assertEquals("5.00M", bigNumberForQuantity(5000000));
assertEquals("5.00B", bigNumberForQuantity(5000000000L));
assertEquals("5.00T", bigNumberForQuantity(5000000000000L));
}
}
| 9,295 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/CompletableFutureUtilTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.function.Function;
import java.util.function.Predicate;
import org.junit.jupiter.api.Test;
public class CompletableFutureUtilTest {
@Test
public void testMerge() throws Exception {
ExecutorService es = Executors.newFixedThreadPool(3);
try {
for (int n : new int[] {1, 2, 3, 997, 1000}) {
List<CompletableFuture<Integer>> futures = new ArrayList<>();
for (int i = 1; i <= n; i++) {
final int num = i;
futures.add(CompletableFuture.supplyAsync(() -> num, es));
}
CompletableFuture<Integer> mergedFutures =
CompletableFutureUtil.merge(futures, Integer::sum, () -> 0);
assertEquals(n * (n + 1) / 2, mergedFutures.get().intValue());
}
// test zero
CompletableFuture<Integer> mergedFutures =
CompletableFutureUtil.merge(Collections.emptyList(), Integer::sum, () -> 0);
assertEquals(0, mergedFutures.get().intValue());
} finally {
es.shutdown();
}
}
@Test
public void testIterateUntil() throws Exception {
ExecutorService es = Executors.newFixedThreadPool(1);
Function<Integer,CompletableFuture<Integer>> step =
n -> CompletableFuture.supplyAsync(() -> n - 1, es);
Predicate<Integer> isDone = n -> n == 0;
// The call stack should overflow before 10,000 calls, so this
// effectively tests whether iterateUntil avoids stack overflows
// when given async futures.
for (int n : new int[] {0, 1, 2, 3, 100, 10_000}) {
assertEquals(0, CompletableFutureUtil.iterateUntil(step, isDone, n).get());
}
// Test throwing an exception in the step function.
{
Function<Integer,CompletableFuture<Integer>> badStep = n -> {
throw new RuntimeException();
};
assertThrows(ExecutionException.class,
() -> CompletableFutureUtil.iterateUntil(badStep, isDone, 100).get());
}
// Test throwing an exception in the future returned by the step
// function.
{
Function<Integer,CompletableFuture<Integer>> badStep =
n -> CompletableFuture.supplyAsync(() -> {
throw new RuntimeException();
}, es);
assertThrows(ExecutionException.class,
() -> CompletableFutureUtil.iterateUntil(badStep, isDone, 100).get());
}
// Test throwing an exception in the predicate.
{
Predicate<Integer> badIsDone = n -> {
throw new RuntimeException();
};
assertThrows(ExecutionException.class,
() -> CompletableFutureUtil.iterateUntil(step, badIsDone, 100).get());
}
}
}
| 9,296 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/TextUtilTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
/**
* Test the TextUtil class.
*/
public class TextUtilTest {
@Test
public void testGetBytes() {
String longMessage = "This is some text";
Text longMessageText = new Text(longMessage);
String smallerMessage = "a";
Text smallerMessageText = new Text(smallerMessage);
Text someText = new Text(longMessage);
assertEquals(someText, longMessageText);
someText.set(smallerMessageText);
assertTrue(someText.getLength() != someText.getBytes().length);
assertEquals(TextUtil.getBytes(someText).length, smallerMessage.length());
assertEquals((new Text(TextUtil.getBytes(someText))), smallerMessageText);
}
}
| 9,297 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/UnsynchronizedBufferTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.nio.ByteBuffer;
import org.apache.hadoop.io.WritableUtils;
import org.junit.jupiter.api.Test;
public class UnsynchronizedBufferTest {
@Test
public void testByteBufferConstructor() {
byte[] test = "0123456789".getBytes(UTF_8);
ByteBuffer bb1 = ByteBuffer.wrap(test);
UnsynchronizedBuffer.Reader ub = new UnsynchronizedBuffer.Reader(bb1);
byte[] buf = new byte[10];
ub.readBytes(buf);
assertEquals("0123456789", new String(buf, UTF_8));
ByteBuffer bb2 = ByteBuffer.wrap(test, 3, 5);
ub = new UnsynchronizedBuffer.Reader(bb2);
buf = new byte[5];
// should read data from offset 3 where the byte buffer starts
ub.readBytes(buf);
assertEquals("34567", new String(buf, UTF_8));
buf = new byte[6];
// the byte buffer has the extra byte, but should not be able to read it...
final UnsynchronizedBuffer.Reader finalUb = ub;
final byte[] finalBuf = buf;
assertThrows(ArrayIndexOutOfBoundsException.class, () -> finalUb.readBytes(finalBuf));
}
@Test
public void testWriteVMethods() throws Exception {
// writeV methods use an extra byte for length, unless value is only one byte
// Integer.MAX_VALUE = 0x7fffffff
testInteger(0x7fffffff, 4 + 1);
testInteger(0x7fffff, 3 + 1);
testInteger(0x7fff, 2 + 1);
testInteger(0x7f, 1);
// Long.MAX_VALUE = 0x7fffffffffffffffL
testLong(0x7fffffffffffffffL, 8 + 1);
testLong(0x7fffffffffffffL, 7 + 1);
testLong(0x7fffffffffffL, 6 + 1);
testLong(0x7fffffffffL, 5 + 1);
testLong(0x7fffffffL, 4 + 1);
testLong(0x7fffffL, 3 + 1);
testLong(0x7fffL, 2 + 1);
testLong(0x7fL, 1);
}
private void testInteger(int value, int length) throws Exception {
byte[] integerBuffer = new byte[5];
try (ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos)) {
UnsynchronizedBuffer.writeVInt(dos, integerBuffer, value);
dos.flush();
assertEquals(length, baos.toByteArray().length);
}
}
private void testLong(long value, int length) throws Exception {
byte[] longBuffer = new byte[9];
try (ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos)) {
UnsynchronizedBuffer.writeVLong(dos, longBuffer, value);
dos.flush();
assertEquals(length, baos.toByteArray().length);
}
}
@Test
public void compareWithWritableUtils() throws Exception {
byte[] hadoopBytes;
byte[] accumuloBytes;
int oneByteInt = 0x7f;
int threeByteInt = 0x7fff;
long sixByteLong = 0x7fffffffffL;
try (ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos)) {
WritableUtils.writeVInt(dos, oneByteInt);
WritableUtils.writeVInt(dos, threeByteInt);
WritableUtils.writeVLong(dos, sixByteLong);
dos.flush();
hadoopBytes = baos.toByteArray();
}
try (ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos)) {
UnsynchronizedBuffer.writeVInt(dos, new byte[5], oneByteInt);
UnsynchronizedBuffer.writeVInt(dos, new byte[5], threeByteInt);
UnsynchronizedBuffer.writeVLong(dos, new byte[9], sixByteLong);
dos.flush();
accumuloBytes = baos.toByteArray();
}
assertArrayEquals(hadoopBytes, accumuloBytes,
"The byte array written to by UnsynchronizedBuffer is not equal to WritableUtils");
}
@Test
public void testNextArraySizeNegative() {
assertThrows(IllegalArgumentException.class, () -> UnsynchronizedBuffer.nextArraySize(-1));
}
@Test
public void testNextArraySize() {
// 0 <= size <= 2^0
assertEquals(1, UnsynchronizedBuffer.nextArraySize(0));
assertEquals(1, UnsynchronizedBuffer.nextArraySize(1));
// 2^0 < size <= 2^1
assertEquals(2, UnsynchronizedBuffer.nextArraySize(2));
// 2^1 < size <= 2^30
for (int exp = 1; exp < 30; ++exp) {
// 2^exp < size <= 2^(exp+1) (for all exp: [1,29])
int nextExp = exp + 1;
assertEquals(1 << nextExp, UnsynchronizedBuffer.nextArraySize((1 << exp) + 1));
assertEquals(1 << nextExp, UnsynchronizedBuffer.nextArraySize(1 << nextExp));
}
// 2^30 < size < Integer.MAX_VALUE
assertEquals(Integer.MAX_VALUE - 8, UnsynchronizedBuffer.nextArraySize((1 << 30) + 1));
assertEquals(Integer.MAX_VALUE - 8, UnsynchronizedBuffer.nextArraySize(Integer.MAX_VALUE - 9));
assertEquals(Integer.MAX_VALUE - 8, UnsynchronizedBuffer.nextArraySize(Integer.MAX_VALUE - 8));
assertEquals(Integer.MAX_VALUE - 8, UnsynchronizedBuffer.nextArraySize(Integer.MAX_VALUE));
}
}
| 9,298 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/MonitorUtilTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.easymock.EasyMock.expect;
import static org.easymock.EasyMock.mock;
import static org.easymock.EasyMock.replay;
import static org.easymock.EasyMock.verify;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.clientImpl.ClientContext;
import org.apache.accumulo.core.fate.zookeeper.ZooReader;
import org.apache.zookeeper.KeeperException.NoNodeException;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class MonitorUtilTest {
private ZooReader zr;
private ClientContext context;
@BeforeEach
public void beforeEachTest() {
zr = mock(ZooReader.class);
context = mock(ClientContext.class);
expect(context.getZooKeeperRoot()).andReturn("/root");
}
@AfterEach
public void afterEachTest() {
verify(zr, context);
}
@Test
public void testNodeFound() throws Exception {
expect(zr.getData("/root" + Constants.ZMONITOR_HTTP_ADDR))
.andReturn("http://example.org/".getBytes(UTF_8));
replay(zr, context);
assertEquals("http://example.org/", MonitorUtil.getLocation(zr, context));
}
@Test
public void testNoNodeFound() throws Exception {
expect(zr.getData("/root" + Constants.ZMONITOR_HTTP_ADDR)).andThrow(new NoNodeException());
replay(zr, context);
assertNull(MonitorUtil.getLocation(zr, context));
}
}
| 9,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.