index
int64
0
0
repo_id
stringlengths
9
205
file_path
stringlengths
31
246
content
stringlengths
1
12.2M
__index_level_0__
int64
0
10k
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/metrics
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/metrics/reporter/KafkaProducerPusherTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.metrics.reporter; import java.io.IOException; import org.apache.kafka.clients.producer.ProducerConfig; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import com.google.common.base.Optional; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.typesafe.config.ConfigFactory; import kafka.consumer.ConsumerIterator; import org.apache.gobblin.kafka.KafkaTestBase; import org.apache.gobblin.metrics.kafka.KafkaProducerPusher; import org.apache.gobblin.metrics.kafka.Pusher; /** * Test {@link org.apache.gobblin.metrics.kafka.KafkaProducerPusher}. */ public class KafkaProducerPusherTest { public static final String TOPIC = KafkaProducerPusherTest.class.getSimpleName(); private KafkaTestBase kafkaTestHelper; @BeforeClass public void setup() throws Exception { kafkaTestHelper = new KafkaTestBase(); kafkaTestHelper.startServers(); kafkaTestHelper.provisionTopic(TOPIC); } @Test public void test() throws IOException { // Test that the scoped config overrides the generic config Pusher pusher = new KafkaProducerPusher("127.0.0.1:dummy", TOPIC, Optional.of(ConfigFactory.parseMap(ImmutableMap.of( ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:" + this.kafkaTestHelper.getKafkaServerPort())))); String msg1 = "msg1"; String msg2 = "msg2"; pusher.pushMessages(Lists.newArrayList(msg1.getBytes(), msg2.getBytes())); try { Thread.sleep(1000); } catch(InterruptedException ex) { Thread.currentThread().interrupt(); } ConsumerIterator<byte[], byte[]> iterator = this.kafkaTestHelper.getIteratorForTopic(TOPIC); assert(iterator.hasNext()); Assert.assertEquals(new String(iterator.next().message()), msg1); assert(iterator.hasNext()); Assert.assertEquals(new String(iterator.next().message()), msg2); pusher.close(); } @AfterClass public void after() { try { this.kafkaTestHelper.close(); } catch(Exception e) { System.err.println("Failed to close Kafka server."); } } }
3,600
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/runtime/KafkaAvroJobMonitorTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.runtime; import java.io.IOException; import java.util.Collection; import java.util.List; import org.apache.avro.Schema; import org.testng.Assert; import org.testng.annotations.Test; import com.google.common.base.Optional; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import org.apache.gobblin.metrics.GobblinTrackingEvent; import org.apache.gobblin.metrics.Metric; import org.apache.gobblin.metrics.MetricReport; import org.apache.gobblin.metrics.reporter.util.AvroBinarySerializer; import org.apache.gobblin.metrics.reporter.util.AvroSerializer; import org.apache.gobblin.metrics.reporter.util.FixedSchemaVersionWriter; import org.apache.gobblin.metrics.reporter.util.NoopSchemaVersionWriter; import org.apache.gobblin.metrics.reporter.util.SchemaVersionWriter; import org.apache.gobblin.runtime.api.JobSpec; import org.apache.gobblin.runtime.job_monitor.KafkaAvroJobMonitor; import org.apache.gobblin.runtime.job_monitor.KafkaJobMonitor; public class KafkaAvroJobMonitorTest { @Test public void testSimple() throws Exception { TestKafkaAvroJobMonitor monitor = new TestKafkaAvroJobMonitor(GobblinTrackingEvent.SCHEMA$, new NoopSchemaVersionWriter()); monitor.buildMetricsContextAndMetrics(); AvroSerializer<GobblinTrackingEvent> serializer = new AvroBinarySerializer<>(GobblinTrackingEvent.SCHEMA$, new NoopSchemaVersionWriter()); GobblinTrackingEvent event = new GobblinTrackingEvent(0L, "namespace", "event", Maps.<String, String>newHashMap()); Collection<JobSpec> results = monitor.parseJobSpec(serializer.serializeRecord(event)); Assert.assertEquals(results.size(), 1); Assert.assertEquals(monitor.events.size(), 1); Assert.assertEquals(monitor.events.get(0), event); monitor.shutdownMetrics(); } @Test public void testWrongSchema() throws Exception { TestKafkaAvroJobMonitor monitor = new TestKafkaAvroJobMonitor(GobblinTrackingEvent.SCHEMA$, new NoopSchemaVersionWriter()); monitor.buildMetricsContextAndMetrics(); AvroSerializer<MetricReport> serializer = new AvroBinarySerializer<>(MetricReport.SCHEMA$, new NoopSchemaVersionWriter()); MetricReport event = new MetricReport(Maps.<String, String>newHashMap(), 0L, Lists.<Metric>newArrayList()); Collection<JobSpec> results = monitor.parseJobSpec(serializer.serializeRecord(event)); Assert.assertEquals(results.size(), 0); Assert.assertEquals(monitor.events.size(), 0); Assert.assertEquals(monitor.getMessageParseFailures().getCount(), 1); monitor.shutdownMetrics(); } @Test public void testUsingSchemaVersion() throws Exception { TestKafkaAvroJobMonitor monitor = new TestKafkaAvroJobMonitor(GobblinTrackingEvent.SCHEMA$, new FixedSchemaVersionWriter()); monitor.buildMetricsContextAndMetrics(); AvroSerializer<GobblinTrackingEvent> serializer = new AvroBinarySerializer<>(GobblinTrackingEvent.SCHEMA$, new FixedSchemaVersionWriter()); GobblinTrackingEvent event = new GobblinTrackingEvent(0L, "namespace", "event", Maps.<String, String>newHashMap()); Collection<JobSpec> results = monitor.parseJobSpec(serializer.serializeRecord(event)); Assert.assertEquals(results.size(), 1); Assert.assertEquals(monitor.events.size(), 1); Assert.assertEquals(monitor.events.get(0), event); monitor.shutdownMetrics(); } @Test public void testWrongSchemaVersionWriter() throws Exception { TestKafkaAvroJobMonitor monitor = new TestKafkaAvroJobMonitor(GobblinTrackingEvent.SCHEMA$, new NoopSchemaVersionWriter()); monitor.buildMetricsContextAndMetrics(); AvroSerializer<GobblinTrackingEvent> serializer = new AvroBinarySerializer<>(GobblinTrackingEvent.SCHEMA$, new FixedSchemaVersionWriter()); GobblinTrackingEvent event = new GobblinTrackingEvent(0L, "namespace", "event", Maps.<String, String>newHashMap()); Collection<JobSpec> results = monitor.parseJobSpec(serializer.serializeRecord(event)); Assert.assertEquals(results.size(), 0); Assert.assertEquals(monitor.events.size(), 0); Assert.assertEquals(monitor.getMessageParseFailures().getCount(), 1); monitor.shutdownMetrics(); } private class TestKafkaAvroJobMonitor extends KafkaAvroJobMonitor<GobblinTrackingEvent> { private List<GobblinTrackingEvent> events = Lists.newArrayList(); public TestKafkaAvroJobMonitor(Schema schema, SchemaVersionWriter<?> versionWriter) { super("dummy", null, HighLevelConsumerTest.getSimpleConfig(Optional.of(KafkaJobMonitor.KAFKA_JOB_MONITOR_PREFIX)), schema, versionWriter); } @Override public Collection<JobSpec> parseJobSpec(GobblinTrackingEvent message) { this.events.add(message); return Lists.newArrayList(JobSpec.builder(message.getName()).build()); } @Override protected void buildMetricsContextAndMetrics() { super.buildMetricsContextAndMetrics(); } @Override protected void shutdownMetrics() throws IOException { super.shutdownMetrics(); } } }
3,601
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/runtime/KafkaJobMonitorTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.runtime; import java.net.URI; import java.util.Properties; import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.AfterSuite; import org.testng.annotations.BeforeSuite; import org.testng.annotations.Test; import com.google.common.io.Closer; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.kafka.KafkaTestBase; import org.apache.gobblin.kafka.client.AbstractBaseKafkaConsumerClient; import org.apache.gobblin.kafka.client.Kafka09ConsumerClient; import org.apache.gobblin.kafka.writer.Kafka09DataWriter; import org.apache.gobblin.kafka.writer.KafkaWriterConfigurationKeys; import org.apache.gobblin.runtime.job_monitor.KafkaJobMonitor; import org.apache.gobblin.runtime.job_monitor.MockedKafkaJobMonitor; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.writer.AsyncDataWriter; import org.apache.gobblin.writer.WriteCallback; public class KafkaJobMonitorTest extends KafkaTestBase { private static final String BOOTSTRAP_SERVERS_KEY = "bootstrap.servers"; private static final String KAFKA_AUTO_OFFSET_RESET_KEY = "auto.offset.reset"; private static final String SOURCE_KAFKA_CONSUMERCONFIG_KEY_WITH_DOT = AbstractBaseKafkaConsumerClient.CONFIG_NAMESPACE + "." + AbstractBaseKafkaConsumerClient.CONSUMER_CONFIG + "."; private static final String TOPIC = KafkaJobMonitorTest.class.getSimpleName(); private static final int NUM_PARTITIONS = 2; private Closer _closer; private String _kafkaBrokers; private AsyncDataWriter dataWriter; public KafkaJobMonitorTest() throws InterruptedException, RuntimeException { super(); _kafkaBrokers = "localhost:" + this.getKafkaServerPort(); } @BeforeSuite public void beforeSuite() throws Exception { startServers(); _closer = Closer.create(); Properties producerProps = new Properties(); producerProps.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, TOPIC); producerProps.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + BOOTSTRAP_SERVERS_KEY, _kafkaBrokers); producerProps.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + KafkaWriterConfigurationKeys.VALUE_SERIALIZER_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer"); producerProps.setProperty(KafkaWriterConfigurationKeys.CLUSTER_ZOOKEEPER, this.getZkConnectString()); producerProps.setProperty(KafkaWriterConfigurationKeys.PARTITION_COUNT, String.valueOf(NUM_PARTITIONS)); dataWriter = _closer.register(new Kafka09DataWriter(producerProps)); } @Test public void test() throws Exception { Properties consumerProps = new Properties(); consumerProps.put(KafkaJobMonitor.KAFKA_JOB_MONITOR_PREFIX + "." + ConfigurationKeys.KAFKA_BROKERS, _kafkaBrokers); consumerProps.put(KafkaJobMonitor.KAFKA_JOB_MONITOR_PREFIX + "." + Kafka09ConsumerClient.GOBBLIN_CONFIG_VALUE_DESERIALIZER_CLASS_KEY, "org.apache.kafka.common.serialization.ByteArrayDeserializer"); consumerProps.setProperty(KafkaJobMonitor.KAFKA_JOB_MONITOR_PREFIX + "." + SOURCE_KAFKA_CONSUMERCONFIG_KEY_WITH_DOT + KAFKA_AUTO_OFFSET_RESET_KEY, "earliest"); MockedKafkaJobMonitor monitor = MockedKafkaJobMonitor.create(TOPIC, ConfigUtils.propertiesToConfig(consumerProps)); monitor.startAsync().awaitRunning(); WriteCallback mockCallback = Mockito.mock(WriteCallback.class); dataWriter.write("job1:1".getBytes(), mockCallback); monitor.awaitExactlyNSpecs(1); Assert.assertTrue(monitor.getJobSpecs().containsKey(new URI("job1"))); Assert.assertEquals(monitor.getJobSpecs().get(new URI("job1")).getVersion(), "1"); dataWriter.write("job2:1".getBytes(), mockCallback); monitor.awaitExactlyNSpecs(2); Assert.assertTrue(monitor.getJobSpecs().containsKey(new URI("job2"))); Assert.assertEquals(monitor.getJobSpecs().get(new URI("job2")).getVersion(), "1"); dataWriter.write((MockedKafkaJobMonitor.REMOVE + ":job1").getBytes(), mockCallback); monitor.awaitExactlyNSpecs(1); Assert.assertFalse(monitor.getJobSpecs().containsKey(new URI("job1"))); Assert.assertTrue(monitor.getJobSpecs().containsKey(new URI("job2"))); dataWriter.write(("job2:2,job1:2").getBytes(), mockCallback); monitor.awaitExactlyNSpecs(2); Assert.assertTrue(monitor.getJobSpecs().containsKey(new URI("job1"))); Assert.assertEquals(monitor.getJobSpecs().get(new URI("job1")).getVersion(), "2"); Assert.assertTrue(monitor.getJobSpecs().containsKey(new URI("job2"))); Assert.assertEquals(monitor.getJobSpecs().get(new URI("job2")).getVersion(), "2"); monitor.shutDown(); } @AfterSuite public void afterSuite() { try { _closer.close(); } catch (Exception e) { System.out.println("Failed to close data writer." + e); } finally { try { close(); } catch (Exception e) { System.out.println("Failed to close Kafka server."+ e); } } } }
3,602
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/runtime/HighLevelConsumerTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.runtime; import java.io.File; import java.util.List; import java.util.Properties; import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.AfterSuite; import org.testng.annotations.BeforeSuite; import org.testng.annotations.Test; import com.google.api.client.util.Lists; import com.google.common.base.Joiner; import com.google.common.base.Optional; import com.google.common.io.Closer; import com.google.common.io.Files; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.kafka.KafkaTestBase; import org.apache.gobblin.kafka.client.AbstractBaseKafkaConsumerClient; import org.apache.gobblin.kafka.client.Kafka09ConsumerClient; import org.apache.gobblin.kafka.writer.Kafka09DataWriter; import org.apache.gobblin.kafka.writer.KafkaWriterConfigurationKeys; import org.apache.gobblin.runtime.kafka.HighLevelConsumer; import org.apache.gobblin.runtime.kafka.MockedHighLevelConsumer; import org.apache.gobblin.source.extractor.extract.kafka.KafkaPartition; import org.apache.gobblin.test.TestUtils; import org.apache.gobblin.testing.AssertWithBackoff; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.writer.AsyncDataWriter; import org.apache.gobblin.writer.WriteCallback; @Test @Slf4j public class HighLevelConsumerTest extends KafkaTestBase { private static final String BOOTSTRAP_SERVERS_KEY = "bootstrap.servers"; private static final String KAFKA_AUTO_OFFSET_RESET_KEY = "auto.offset.reset"; private static final String SOURCE_KAFKA_CONSUMERCONFIG_KEY_WITH_DOT = AbstractBaseKafkaConsumerClient.CONFIG_NAMESPACE + "." + AbstractBaseKafkaConsumerClient.CONSUMER_CONFIG + "."; private static final String TOPIC = HighLevelConsumerTest.class.getSimpleName(); private static final int NUM_PARTITIONS = 2; private static final int NUM_MSGS = 10; private Closer _closer; private String _kafkaBrokers; public HighLevelConsumerTest() throws InterruptedException, RuntimeException { super(); _kafkaBrokers = "127.0.0.1:" + this.getKafkaServerPort(); } @BeforeSuite public void beforeSuite() throws Exception { startServers(); _closer = Closer.create(); Properties producerProps = new Properties(); producerProps.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, TOPIC); producerProps .setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + BOOTSTRAP_SERVERS_KEY, _kafkaBrokers); producerProps.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + KafkaWriterConfigurationKeys.VALUE_SERIALIZER_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer"); producerProps.setProperty(KafkaWriterConfigurationKeys.CLUSTER_ZOOKEEPER, this.getZkConnectString()); producerProps.setProperty(KafkaWriterConfigurationKeys.PARTITION_COUNT, String.valueOf(NUM_PARTITIONS)); producerProps.setProperty(KafkaWriterConfigurationKeys.DELETE_TOPIC_IF_EXISTS, String.valueOf(true)); AsyncDataWriter<byte[]> dataWriter = _closer.register(new Kafka09DataWriter<byte[], byte[]>(producerProps)); List<byte[]> records = createByteArrayMessages(); WriteCallback mock = Mockito.mock(WriteCallback.class); for (byte[] record : records) { dataWriter.write(record, mock); } dataWriter.flush(); } public static Config getSimpleConfig(Optional<String> prefix) { Properties properties = new Properties(); properties.put(getConfigKey(prefix, ConfigurationKeys.KAFKA_BROKERS), "127.0.0.1:" + TestUtils.findFreePort()); properties.put(getConfigKey(prefix, Kafka09ConsumerClient.GOBBLIN_CONFIG_VALUE_DESERIALIZER_CLASS_KEY), Kafka09ConsumerClient.KAFKA_09_DEFAULT_KEY_DESERIALIZER); properties.put(getConfigKey(prefix, "zookeeper.connect"), "zookeeper"); properties.put(ConfigurationKeys.STATE_STORE_ENABLED, "true"); File tmpDir = Files.createTempDir(); tmpDir.deleteOnExit(); properties.put(ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY, tmpDir.toString()); return ConfigFactory.parseProperties(properties); } private static String getConfigKey(Optional<String> prefix, String key) { return prefix.isPresent() ? prefix.get() + "." + key : key; } @Test public void testConsumerAutoOffsetCommit() throws Exception { Properties consumerProps = new Properties(); consumerProps.setProperty(ConfigurationKeys.KAFKA_BROKERS, _kafkaBrokers); consumerProps.setProperty(Kafka09ConsumerClient.GOBBLIN_CONFIG_VALUE_DESERIALIZER_CLASS_KEY, "org.apache.kafka.common.serialization.ByteArrayDeserializer"); consumerProps.setProperty(SOURCE_KAFKA_CONSUMERCONFIG_KEY_WITH_DOT + KAFKA_AUTO_OFFSET_RESET_KEY, "earliest"); //Generate a brand new consumer group id to ensure there are no previously committed offsets for this group id String consumerGroupId = Joiner.on("-").join(TOPIC, "auto", System.currentTimeMillis()); consumerProps.setProperty(SOURCE_KAFKA_CONSUMERCONFIG_KEY_WITH_DOT + HighLevelConsumer.GROUP_ID_KEY, consumerGroupId); consumerProps.setProperty(HighLevelConsumer.ENABLE_AUTO_COMMIT_KEY, "true"); MockedHighLevelConsumer consumer = new MockedHighLevelConsumer(TOPIC, ConfigUtils.propertiesToConfig(consumerProps), NUM_PARTITIONS); consumer.startAsync().awaitRunning(); consumer.awaitExactlyNMessages(NUM_MSGS, 10000); consumer.shutDown(); } @Test public void testConsumerManualOffsetCommit() throws Exception { Properties consumerProps = new Properties(); consumerProps.setProperty(ConfigurationKeys.KAFKA_BROKERS, _kafkaBrokers); consumerProps.setProperty(Kafka09ConsumerClient.GOBBLIN_CONFIG_VALUE_DESERIALIZER_CLASS_KEY, "org.apache.kafka.common.serialization.ByteArrayDeserializer"); consumerProps.setProperty(SOURCE_KAFKA_CONSUMERCONFIG_KEY_WITH_DOT + KAFKA_AUTO_OFFSET_RESET_KEY, "earliest"); //Generate a brand new consumer group id to ensure there are no previously committed offsets for this group id String consumerGroupId = Joiner.on("-").join(TOPIC, "manual", System.currentTimeMillis()); consumerProps.setProperty(SOURCE_KAFKA_CONSUMERCONFIG_KEY_WITH_DOT + HighLevelConsumer.GROUP_ID_KEY, consumerGroupId); // Setting this to a second to make sure we are committing offsets frequently consumerProps.put(HighLevelConsumer.OFFSET_COMMIT_TIME_THRESHOLD_SECS_KEY, 1); MockedHighLevelConsumer consumer = new MockedHighLevelConsumer(TOPIC, ConfigUtils.propertiesToConfig(consumerProps), NUM_PARTITIONS); consumer.startAsync().awaitRunning(); consumer.awaitExactlyNMessages(NUM_MSGS, 10000); for(int i=0; i< NUM_PARTITIONS; i++) { KafkaPartition partition = new KafkaPartition.Builder().withTopicName(TOPIC).withId(i).build(); AssertWithBackoff.assertTrue(input -> consumer.getCommittedOffsets().containsKey(partition), 5000, "waiting for committing offsets", log, 2, 1000); } consumer.shutDown(); } @Test public void testCalculateProduceToConsumeLag() { Properties consumerProps = new Properties(); consumerProps.setProperty(ConfigurationKeys.KAFKA_BROKERS, _kafkaBrokers); consumerProps.setProperty(Kafka09ConsumerClient.GOBBLIN_CONFIG_VALUE_DESERIALIZER_CLASS_KEY, "org.apache.kafka.common.serialization.ByteArrayDeserializer"); consumerProps.setProperty(SOURCE_KAFKA_CONSUMERCONFIG_KEY_WITH_DOT + KAFKA_AUTO_OFFSET_RESET_KEY, "earliest"); //Generate a brand new consumer group id to ensure there are no previously committed offsets for this group id String consumerGroupId = Joiner.on("-").join(TOPIC, "auto", System.currentTimeMillis()); consumerProps.setProperty(SOURCE_KAFKA_CONSUMERCONFIG_KEY_WITH_DOT + HighLevelConsumer.GROUP_ID_KEY, consumerGroupId); consumerProps.setProperty(HighLevelConsumer.ENABLE_AUTO_COMMIT_KEY, "true"); MockedHighLevelConsumer consumer = new MockedHighLevelConsumer(TOPIC, ConfigUtils.propertiesToConfig(consumerProps), NUM_PARTITIONS) { @Override public Long calcMillisSince(Long timestamp) { return 1234L - timestamp; } }; Long produceTimestamp = 1000L; Assert.assertTrue(consumer.calcMillisSince(produceTimestamp).equals(234L)); } private List<byte[]> createByteArrayMessages() { List<byte[]> records = Lists.newArrayList(); for(int i=0; i<NUM_MSGS; i++) { byte[] msg = ("msg_" + i).getBytes(); records.add(msg); } return records; } @AfterSuite public void afterSuite() { try { _closer.close(); } catch (Exception e) { System.out.println("Failed to close data writer." + e); } finally { try { close(); } catch (Exception e) { System.out.println("Failed to close Kafka server."+ e); } } } }
3,603
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/runtime/KafkaAvroJobStatusMonitorTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.runtime; import java.io.File; import java.io.IOException; import java.util.Arrays; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.io.FileUtils; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.producer.ProducerConfig; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import com.google.common.base.Optional; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterators; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import com.typesafe.config.ConfigValueFactory; import kafka.consumer.ConsumerIterator; import kafka.message.MessageAndMetadata; import lombok.Getter; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.State; import org.apache.gobblin.kafka.KafkaTestBase; import org.apache.gobblin.kafka.client.DecodeableKafkaRecord; import org.apache.gobblin.kafka.client.Kafka09ConsumerClient; import org.apache.gobblin.metastore.StateStore; import org.apache.gobblin.metrics.GaaSObservabilityEventExperimental; import org.apache.gobblin.metrics.GobblinTrackingEvent; import org.apache.gobblin.metrics.JobStatus; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.metrics.event.TimingEvent; import org.apache.gobblin.metrics.kafka.KafkaAvroEventKeyValueReporter; import org.apache.gobblin.metrics.kafka.KafkaEventReporter; import org.apache.gobblin.metrics.kafka.KafkaKeyValueProducerPusher; import org.apache.gobblin.metrics.kafka.Pusher; import org.apache.gobblin.runtime.troubleshooter.InMemoryMultiContextIssueRepository; import org.apache.gobblin.runtime.troubleshooter.JobIssueEventHandler; import org.apache.gobblin.runtime.troubleshooter.MultiContextIssueRepository; import org.apache.gobblin.service.ExecutionStatus; import org.apache.gobblin.service.monitoring.GaaSObservabilityEventProducer; import org.apache.gobblin.service.monitoring.JobStatusRetriever; import org.apache.gobblin.service.monitoring.KafkaAvroJobStatusMonitor; import org.apache.gobblin.service.monitoring.KafkaJobStatusMonitor; import org.apache.gobblin.service.monitoring.MockGaaSObservabilityEventProducer; import org.apache.gobblin.service.monitoring.NoopGaaSObservabilityEventProducer; import org.apache.gobblin.util.ConfigUtils; import static org.apache.gobblin.util.retry.RetryerFactory.RETRY_MULTIPLIER; import static org.mockito.Mockito.mock; public class KafkaAvroJobStatusMonitorTest { public static final String TOPIC = KafkaAvroJobStatusMonitorTest.class.getSimpleName(); private KafkaTestBase kafkaTestHelper; private String flowGroup = "myFlowGroup"; private String flowName = "myFlowName"; private String jobGroup = "myJobGroup"; private String jobName = "myJobName"; private String flowExecutionId = "1234"; private String jobExecutionId = "1111"; private String message = "https://myServer:8143/1234/1111"; private String stateStoreDir = "/tmp/jobStatusMonitor/statestore"; private MetricContext context; private KafkaAvroEventKeyValueReporter.Builder<?> builder; @BeforeClass public void setUp() throws Exception { cleanUpDir(stateStoreDir); kafkaTestHelper = new KafkaTestBase(); kafkaTestHelper.startServers(); kafkaTestHelper.provisionTopic(TOPIC); // Create KeyValueProducerPusher instance. Pusher pusher = new KafkaKeyValueProducerPusher<byte[], byte[]>("localhost:dummy", TOPIC, Optional.of(ConfigFactory.parseMap(ImmutableMap.of( ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:" + this.kafkaTestHelper.getKafkaServerPort())))); //Create an event reporter instance context = MetricContext.builder("context").build(); builder = KafkaAvroEventKeyValueReporter.Factory.forContext(context); builder = builder.withKafkaPusher(pusher).withKeys(Lists.newArrayList(TimingEvent.FlowEventConstants.FLOW_NAME_FIELD, TimingEvent.FlowEventConstants.FLOW_GROUP_FIELD, TimingEvent.FlowEventConstants.FLOW_EXECUTION_ID_FIELD)); } @Test public void testProcessMessageForSuccessfulFlow() throws IOException, ReflectiveOperationException { KafkaEventReporter kafkaReporter = builder.build("localhost:0000", "topic1"); //Submit GobblinTrackingEvents to Kafka ImmutableList.of( createFlowCompiledEvent(), createJobOrchestratedEvent(1, 2), createJobStartEvent(), createJobSucceededEvent(), createDummyEvent(), // note position createJobStartEvent() ).forEach(event -> { context.submitEvent(event); kafkaReporter.report(); }); try { Thread.sleep(1000); } catch(InterruptedException ex) { Thread.currentThread().interrupt(); } MockKafkaAvroJobStatusMonitor jobStatusMonitor = createMockKafkaAvroJobStatusMonitor(new AtomicBoolean(false), ConfigFactory.empty(), new NoopGaaSObservabilityEventProducer()); jobStatusMonitor.buildMetricsContextAndMetrics(); Iterator<DecodeableKafkaRecord> recordIterator = Iterators.transform( this.kafkaTestHelper.getIteratorForTopic(TOPIC), this::convertMessageAndMetadataToDecodableKafkaRecord); State state = getNextJobStatusState(jobStatusMonitor, recordIterator, "NA", "NA"); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.COMPILED.name()); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.ORCHESTRATED.name()); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.RUNNING.name()); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.COMPLETE.name()); // (per above, is a 'dummy' event) Assert.assertNull(jobStatusMonitor.parseJobStatus( jobStatusMonitor.deserializeEvent(recordIterator.next()))); // Check that state didn't get set to running since it was already complete state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.COMPLETE.name()); jobStatusMonitor.shutDown(); } @Test (dependsOnMethods = "testProcessMessageForSuccessfulFlow") public void testProcessMessageForFailedFlow() throws IOException, ReflectiveOperationException { KafkaEventReporter kafkaReporter = builder.build("localhost:0000", "topic2"); //Submit GobblinTrackingEvents to Kafka ImmutableList.of( createFlowCompiledEvent(), createJobOrchestratedEvent(1, 2), createJobStartEvent(), createJobFailedEvent(), // Mimic retrying - job orchestration // set maximum attempt to 2, and current attempt to 2 createJobOrchestratedEvent(2, 2), // Mimic retrying - job start (current attempt = 2) createJobStartEvent(), // Mimic retrying - job failed again (current attempt = 2) createJobFailedEvent() ).forEach(event -> { context.submitEvent(event); kafkaReporter.report(); }); try { Thread.sleep(1000); } catch(InterruptedException ex) { Thread.currentThread().interrupt(); } MockKafkaAvroJobStatusMonitor jobStatusMonitor = createMockKafkaAvroJobStatusMonitor(new AtomicBoolean(false), ConfigFactory.empty(), new NoopGaaSObservabilityEventProducer()); jobStatusMonitor.buildMetricsContextAndMetrics(); ConsumerIterator<byte[], byte[]> iterator = this.kafkaTestHelper.getIteratorForTopic(TOPIC); MessageAndMetadata<byte[], byte[]> messageAndMetadata = iterator.next(); // Verify undecodeable message is skipped byte[] undecodeableMessage = Arrays.copyOf(messageAndMetadata.message(), messageAndMetadata.message().length - 1); ConsumerRecord undecodeableRecord = new ConsumerRecord<>(TOPIC, messageAndMetadata.partition(), messageAndMetadata.offset(), messageAndMetadata.key(), undecodeableMessage); Assert.assertEquals(jobStatusMonitor.getMessageParseFailures().getCount(), 0L); jobStatusMonitor.processMessage(new Kafka09ConsumerClient.Kafka09ConsumerRecord(undecodeableRecord)); Assert.assertEquals(jobStatusMonitor.getMessageParseFailures().getCount(), 1L); // Re-test when properly encoded, as expected for a normal event jobStatusMonitor.processMessage(convertMessageAndMetadataToDecodableKafkaRecord(messageAndMetadata)); StateStore stateStore = jobStatusMonitor.getStateStore(); String storeName = KafkaJobStatusMonitor.jobStatusStoreName(flowGroup, flowName); String tableName = KafkaJobStatusMonitor.jobStatusTableName(this.flowExecutionId, "NA", "NA"); List<State> stateList = stateStore.getAll(storeName, tableName); Assert.assertEquals(stateList.size(), 1); State state = stateList.get(0); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.COMPILED.name()); Iterator<DecodeableKafkaRecord> recordIterator = Iterators.transform( iterator, this::convertMessageAndMetadataToDecodableKafkaRecord); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.ORCHESTRATED.name()); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.RUNNING.name()); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); //Because the maximum attempt is set to 2, so the state is set to PENDING_RETRY after the first failure Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.PENDING_RETRY.name()); Assert.assertEquals(state.getProp(TimingEvent.FlowEventConstants.SHOULD_RETRY_FIELD), Boolean.toString(true)); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); //Job orchestrated for retrying Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.ORCHESTRATED.name()); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); //Because the maximum attempt is set to 2, so the state is set to PENDING_RETRY after the first failure Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.RUNNING.name()); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); //Because the maximum attempt is set to 2, so the state is set to Failed after trying twice Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.FAILED.name()); Assert.assertEquals(state.getProp(TimingEvent.FlowEventConstants.SHOULD_RETRY_FIELD), Boolean.toString(false)); jobStatusMonitor.shutDown(); } @Test (dependsOnMethods = "testProcessMessageForFailedFlow") public void testProcessMessageForSkippedFlow() throws IOException, ReflectiveOperationException { KafkaEventReporter kafkaReporter = builder.build("localhost:0000", "topic2"); //Submit GobblinTrackingEvents to Kafka ImmutableList.of( createFlowCompiledEvent(), createJobOrchestratedEvent(1, 2), createJobSkippedEvent() ).forEach(event -> { context.submitEvent(event); kafkaReporter.report(); }); try { Thread.sleep(1000); } catch(InterruptedException ex) { Thread.currentThread().interrupt(); } MockKafkaAvroJobStatusMonitor jobStatusMonitor = createMockKafkaAvroJobStatusMonitor(new AtomicBoolean(false), ConfigFactory.empty(), new NoopGaaSObservabilityEventProducer()); jobStatusMonitor.buildMetricsContextAndMetrics(); ConsumerIterator<byte[], byte[]> iterator = this.kafkaTestHelper.getIteratorForTopic(TOPIC); MessageAndMetadata<byte[], byte[]> messageAndMetadata = iterator.next(); // Verify undecodeable message is skipped byte[] undecodeableMessage = Arrays.copyOf(messageAndMetadata.message(), messageAndMetadata.message().length - 1); ConsumerRecord undecodeableRecord = new ConsumerRecord<>(TOPIC, messageAndMetadata.partition(), messageAndMetadata.offset(), messageAndMetadata.key(), undecodeableMessage); Assert.assertEquals(jobStatusMonitor.getMessageParseFailures().getCount(), 0L); jobStatusMonitor.processMessage(new Kafka09ConsumerClient.Kafka09ConsumerRecord(undecodeableRecord)); Assert.assertEquals(jobStatusMonitor.getMessageParseFailures().getCount(), 1L); // Re-test when properly encoded, as expected for a normal event jobStatusMonitor.processMessage(convertMessageAndMetadataToDecodableKafkaRecord(messageAndMetadata)); StateStore stateStore = jobStatusMonitor.getStateStore(); String storeName = KafkaJobStatusMonitor.jobStatusStoreName(flowGroup, flowName); String tableName = KafkaJobStatusMonitor.jobStatusTableName(this.flowExecutionId, "NA", "NA"); List<State> stateList = stateStore.getAll(storeName, tableName); Assert.assertEquals(stateList.size(), 1); State state = stateList.get(0); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.COMPILED.name()); Iterator<DecodeableKafkaRecord> recordIterator = Iterators.transform( iterator, this::convertMessageAndMetadataToDecodableKafkaRecord); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.ORCHESTRATED.name()); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.CANCELLED.name()); jobStatusMonitor.shutDown(); } @Test (dependsOnMethods = "testProcessMessageForSkippedFlow") public void testProcessingRetriedForApparentlyTransientErrors() throws IOException, ReflectiveOperationException { KafkaEventReporter kafkaReporter = builder.build("localhost:0000", "topic3"); //Submit GobblinTrackingEvents to Kafka ImmutableList.of( createFlowCompiledEvent(), createJobOrchestratedEvent(1, 2) ).forEach(event -> { context.submitEvent(event); kafkaReporter.report(); }); try { Thread.sleep(1000L); } catch(InterruptedException ex) { Thread.currentThread().interrupt(); } int minNumFakeExceptionsExpected = 10; AtomicBoolean shouldThrowFakeExceptionInParseJobStatusToggle = new AtomicBoolean(false); Config conf = ConfigFactory.empty().withValue( KafkaJobStatusMonitor.JOB_STATUS_MONITOR_PREFIX + "." + RETRY_MULTIPLIER, ConfigValueFactory.fromAnyRef(TimeUnit.MILLISECONDS.toMillis(1L))); MockKafkaAvroJobStatusMonitor jobStatusMonitor = createMockKafkaAvroJobStatusMonitor(shouldThrowFakeExceptionInParseJobStatusToggle, conf, new NoopGaaSObservabilityEventProducer()); jobStatusMonitor.buildMetricsContextAndMetrics(); Iterator<DecodeableKafkaRecord> recordIterator = Iterators.transform( this.kafkaTestHelper.getIteratorForTopic(TOPIC), this::convertMessageAndMetadataToDecodableKafkaRecord); State state = getNextJobStatusState(jobStatusMonitor, recordIterator, "NA", "NA");; Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.COMPILED.name()); shouldThrowFakeExceptionInParseJobStatusToggle.set(true); // since `processMessage` hereafter effectively hangs, launch eventual re-toggling before calling again ScheduledExecutorService toggleManagementExecutor = Executors.newScheduledThreadPool(2); toggleManagementExecutor.scheduleAtFixedRate(() -> { if (jobStatusMonitor.getNumFakeExceptionsFromParseJobStatus() > minNumFakeExceptionsExpected) { // curtail faking: simulate resolution shouldThrowFakeExceptionInParseJobStatusToggle.set(false); } }, 2, 2, TimeUnit.SECONDS); Thread mainThread = Thread.currentThread(); // guardrail against excessive retries (befitting this unit test): toggleManagementExecutor.scheduleAtFixedRate(mainThread::interrupt, 20, 5, TimeUnit.SECONDS); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); Assert.assertTrue(jobStatusMonitor.getNumFakeExceptionsFromParseJobStatus() > minNumFakeExceptionsExpected, String.format("processMessage returned with only %d (faked) exceptions", jobStatusMonitor.getNumFakeExceptionsFromParseJobStatus())); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.ORCHESTRATED.name()); toggleManagementExecutor.shutdownNow(); jobStatusMonitor.shutDown(); } @Test (dependsOnMethods = "testProcessingRetriedForApparentlyTransientErrors") public void testProcessMessageForCancelledAndKilledEvent() throws IOException, ReflectiveOperationException { KafkaEventReporter kafkaReporter = builder.build("localhost:0000", "topic4"); //Submit GobblinTrackingEvents to Kafka ImmutableList.of( createFlowCompiledEvent(), createJobOrchestratedEvent(1, 4), createJobSLAKilledEvent(), createJobOrchestratedEvent(2, 4), createJobStartSLAKilledEvent(), // Verify that kill event will not retry createJobOrchestratedEvent(3, 4), createJobCancelledEvent() ).forEach(event -> { context.submitEvent(event); kafkaReporter.report(); }); try { Thread.sleep(1000); } catch(InterruptedException ex) { Thread.currentThread().interrupt(); } MockKafkaAvroJobStatusMonitor jobStatusMonitor = createMockKafkaAvroJobStatusMonitor(new AtomicBoolean(false), ConfigFactory.empty(), new NoopGaaSObservabilityEventProducer()); jobStatusMonitor.buildMetricsContextAndMetrics(); Iterator<DecodeableKafkaRecord> recordIterator = Iterators.transform( this.kafkaTestHelper.getIteratorForTopic(TOPIC), this::convertMessageAndMetadataToDecodableKafkaRecord); State state = getNextJobStatusState(jobStatusMonitor, recordIterator, "NA", "NA"); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.COMPILED.name()); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.ORCHESTRATED.name()); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.PENDING_RETRY.name()); Assert.assertEquals(state.getProp(TimingEvent.FlowEventConstants.SHOULD_RETRY_FIELD), Boolean.toString(true)); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); //Job orchestrated for retrying Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.ORCHESTRATED.name()); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.PENDING_RETRY.name()); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); //Job orchestrated for retrying Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.ORCHESTRATED.name()); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); // Received kill flow event, should not retry the flow even though there is 1 pending attempt left Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.CANCELLED.name()); Assert.assertEquals(state.getProp(TimingEvent.FlowEventConstants.SHOULD_RETRY_FIELD), Boolean.toString(false)); jobStatusMonitor.shutDown(); } @Test (dependsOnMethods = "testProcessingRetriedForApparentlyTransientErrors") public void testProcessMessageForFlowPendingResume() throws IOException, ReflectiveOperationException { KafkaEventReporter kafkaReporter = builder.build("localhost:0000", "topic4"); //Submit GobblinTrackingEvents to Kafka ImmutableList.of( createFlowCompiledEvent(), createJobOrchestratedEvent(1, 2), createJobCancelledEvent(), createFlowPendingResumeEvent(), createJobOrchestratedEvent(2, 2), createJobStartEvent(), createJobSucceededEvent() ).forEach(event -> { context.submitEvent(event); kafkaReporter.report(); }); try { Thread.sleep(1000); } catch(InterruptedException ex) { Thread.currentThread().interrupt(); } MockKafkaAvroJobStatusMonitor jobStatusMonitor = createMockKafkaAvroJobStatusMonitor(new AtomicBoolean(false), ConfigFactory.empty(), new NoopGaaSObservabilityEventProducer()); jobStatusMonitor.buildMetricsContextAndMetrics(); Iterator<DecodeableKafkaRecord> recordIterator = Iterators.transform( this.kafkaTestHelper.getIteratorForTopic(TOPIC), this::convertMessageAndMetadataToDecodableKafkaRecord); State state = getNextJobStatusState(jobStatusMonitor, recordIterator, "NA", "NA"); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.COMPILED.name()); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.ORCHESTRATED.name()); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.CANCELLED.name()); // Job for flow pending resume status after it was cancelled or failed state = getNextJobStatusState(jobStatusMonitor, recordIterator, "NA", "NA"); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.PENDING_RESUME.name()); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); //Job orchestrated for retrying Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.ORCHESTRATED.name()); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.RUNNING.name()); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.COMPLETE.name()); jobStatusMonitor.shutDown(); } @Test (dependsOnMethods = "testProcessMessageForCancelledAndKilledEvent") public void testProcessProgressingMessageWhenNoPreviousStatus() throws IOException, ReflectiveOperationException { KafkaEventReporter kafkaReporter = builder.build("localhost:0000", "topic5"); //Submit GobblinTrackingEvents to Kafka ImmutableList.of( createGTE(TimingEvent.JOB_COMPLETION_PERCENTAGE, new HashMap<>()) ).forEach(event -> { context.submitEvent(event); kafkaReporter.report(); }); try { Thread.sleep(1000); } catch(InterruptedException ex) { Thread.currentThread().interrupt(); } MockKafkaAvroJobStatusMonitor jobStatusMonitor = createMockKafkaAvroJobStatusMonitor(new AtomicBoolean(false), ConfigFactory.empty(), new NoopGaaSObservabilityEventProducer()); jobStatusMonitor.buildMetricsContextAndMetrics(); Iterator<DecodeableKafkaRecord> recordIterator = Iterators.transform( this.kafkaTestHelper.getIteratorForTopic(TOPIC), this::convertMessageAndMetadataToDecodableKafkaRecord); State state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); // Verify we are able to process it without NPE Assert.assertNull(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD)); } @Test (dependsOnMethods = "testProcessProgressingMessageWhenNoPreviousStatus") public void testJobMonitorCreatesGaaSObservabilityEvent() throws IOException, ReflectiveOperationException { KafkaEventReporter kafkaReporter = builder.build("localhost:0000", "topic6"); //Submit GobblinTrackingEvents to Kafka ImmutableList.of( createFlowCompiledEvent(), createWorkUnitTimingEvent(), createJobSucceededEvent() ).forEach(event -> { context.submitEvent(event); kafkaReporter.report(); }); try { Thread.sleep(1000); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } MultiContextIssueRepository issueRepository = new InMemoryMultiContextIssueRepository(); MockGaaSObservabilityEventProducer mockEventProducer = new MockGaaSObservabilityEventProducer( ConfigUtils.configToState(ConfigFactory.empty()), issueRepository); MockKafkaAvroJobStatusMonitor jobStatusMonitor = createMockKafkaAvroJobStatusMonitor(new AtomicBoolean(false), ConfigFactory.empty(), mockEventProducer); jobStatusMonitor.buildMetricsContextAndMetrics(); Iterator<DecodeableKafkaRecord> recordIterator = Iterators.transform( this.kafkaTestHelper.getIteratorForTopic(TOPIC), this::convertMessageAndMetadataToDecodableKafkaRecord); State state = getNextJobStatusState(jobStatusMonitor, recordIterator, "NA", "NA"); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.COMPILED.name()); getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.COMPLETE.name()); // Only the COMPLETE event should create a GaaSObservabilityEvent List<GaaSObservabilityEventExperimental> emittedEvents = mockEventProducer.getTestEmittedEvents(); Iterator<GaaSObservabilityEventExperimental> iterator = emittedEvents.iterator(); GaaSObservabilityEventExperimental event1 = iterator.next(); Assert.assertEquals(event1.getJobStatus(), JobStatus.SUCCEEDED); Assert.assertEquals(event1.getFlowName(), this.flowName); Assert.assertEquals(event1.getFlowGroup(), this.flowGroup); Assert.assertEquals(event1.getJobPlanningPhaseStartTime(), Long.valueOf(2)); Assert.assertEquals(event1.getJobPlanningPhaseEndTime(), Long.valueOf(3)); jobStatusMonitor.shutDown(); } @Test (dependsOnMethods = "testJobMonitorCreatesGaaSObservabilityEvent") public void testObservabilityEventSingleEmission() throws IOException, ReflectiveOperationException { KafkaEventReporter kafkaReporter = builder.build("localhost:0000", "topic7"); //Submit GobblinTrackingEvents to Kafka ImmutableList.of( createFlowCompiledEvent(), createJobCancelledEvent(), createJobSucceededEvent() // This event should be ignored ).forEach(event -> { context.submitEvent(event); kafkaReporter.report(); }); try { Thread.sleep(1000); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } MultiContextIssueRepository issueRepository = new InMemoryMultiContextIssueRepository(); MockGaaSObservabilityEventProducer mockEventProducer = new MockGaaSObservabilityEventProducer( ConfigUtils.configToState(ConfigFactory.empty()), issueRepository); MockKafkaAvroJobStatusMonitor jobStatusMonitor = createMockKafkaAvroJobStatusMonitor(new AtomicBoolean(false), ConfigFactory.empty(), mockEventProducer); jobStatusMonitor.buildMetricsContextAndMetrics(); Iterator<DecodeableKafkaRecord> recordIterator = Iterators.transform( this.kafkaTestHelper.getIteratorForTopic(TOPIC), this::convertMessageAndMetadataToDecodableKafkaRecord); State state = getNextJobStatusState(jobStatusMonitor, recordIterator, "NA", "NA"); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.COMPILED.name()); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.CANCELLED.name()); state = getNextJobStatusState(jobStatusMonitor, recordIterator, this.jobGroup, this.jobName); Assert.assertEquals(state.getProp(JobStatusRetriever.EVENT_NAME_FIELD), ExecutionStatus.CANCELLED.name()); // Only the COMPLETE event should create a GaaSObservabilityEvent List<GaaSObservabilityEventExperimental> emittedEvents = mockEventProducer.getTestEmittedEvents(); Assert.assertEquals(emittedEvents.size(), 1); Iterator<GaaSObservabilityEventExperimental> iterator = emittedEvents.iterator(); GaaSObservabilityEventExperimental event1 = iterator.next(); Assert.assertEquals(event1.getJobStatus(), JobStatus.CANCELLED); Assert.assertEquals(event1.getFlowName(), this.flowName); Assert.assertEquals(event1.getFlowGroup(), this.flowGroup); jobStatusMonitor.shutDown(); } private State getNextJobStatusState(MockKafkaAvroJobStatusMonitor jobStatusMonitor, Iterator<DecodeableKafkaRecord> recordIterator, String jobGroup, String jobName) throws IOException { jobStatusMonitor.processMessage(recordIterator.next()); StateStore stateStore = jobStatusMonitor.getStateStore(); String storeName = KafkaJobStatusMonitor.jobStatusStoreName(flowGroup, flowName); String tableName = KafkaJobStatusMonitor.jobStatusTableName(this.flowExecutionId, jobGroup, jobName); List<State> stateList = stateStore.getAll(storeName, tableName); Assert.assertEquals(stateList.size(), 1); return stateList.get(0); } private GobblinTrackingEvent createFlowCompiledEvent() { // Shouldn't have job properties in the GTE for FLOW_COMPILED events so that it gets marked as "NA" GobblinTrackingEvent event = createGTE(TimingEvent.FlowTimings.FLOW_COMPILED, Maps.newHashMap()); event.getMetadata().remove(TimingEvent.FlowEventConstants.JOB_NAME_FIELD); event.getMetadata().remove(TimingEvent.FlowEventConstants.JOB_GROUP_FIELD); event.getMetadata().remove(TimingEvent.FlowEventConstants.JOB_EXECUTION_ID_FIELD); event.getMetadata().remove(TimingEvent.METADATA_MESSAGE); return event; } /** * Create a Job Orchestrated Event with a configurable currentAttempt * @param currentAttempt specify the number of attempts for the JobOrchestration event * @param maxAttempt the maximum number of retries for the event * @return the {@link GobblinTrackingEvent} */ private GobblinTrackingEvent createJobOrchestratedEvent(int currentAttempt, int maxAttempt) { Map<String, String> metadata = Maps.newHashMap(); metadata.put(TimingEvent.FlowEventConstants.MAX_ATTEMPTS_FIELD, String.valueOf(maxAttempt)); metadata.put(TimingEvent.FlowEventConstants.CURRENT_ATTEMPTS_FIELD, String.valueOf(currentAttempt)); metadata.put(TimingEvent.FlowEventConstants.SHOULD_RETRY_FIELD, Boolean.toString(false)); return createGTE(TimingEvent.LauncherTimings.JOB_ORCHESTRATED, metadata); } private GobblinTrackingEvent createJobStartEvent() { return createGTE(TimingEvent.LauncherTimings.JOB_START, Maps.newHashMap()); } private GobblinTrackingEvent createJobSkippedEvent() { return createGTE(TimingEvent.JOB_SKIPPED_TIME, Maps.newHashMap()); } private GobblinTrackingEvent createJobSucceededEvent() { return createGTE(TimingEvent.LauncherTimings.JOB_SUCCEEDED, Maps.newHashMap()); } private GobblinTrackingEvent createFlowSucceededEvent() { return createGTE(TimingEvent.FlowTimings.FLOW_SUCCEEDED, Maps.newHashMap()); } private GobblinTrackingEvent createJobFailedEvent() { return createGTE(TimingEvent.LauncherTimings.JOB_FAILED, Maps.newHashMap()); } private GobblinTrackingEvent createJobCancelledEvent() { return createGTE(TimingEvent.FlowTimings.FLOW_CANCELLED, Maps.newHashMap()); } private GobblinTrackingEvent createJobSLAKilledEvent() { return createGTE(TimingEvent.FlowTimings.FLOW_RUN_DEADLINE_EXCEEDED, Maps.newHashMap()); } private GobblinTrackingEvent createJobStartSLAKilledEvent() { return createGTE(TimingEvent.FlowTimings.FLOW_START_DEADLINE_EXCEEDED, Maps.newHashMap()); } private GobblinTrackingEvent createFlowPendingResumeEvent() { GobblinTrackingEvent event = createGTE(TimingEvent.FlowTimings.FLOW_PENDING_RESUME, Maps.newHashMap()); event.getMetadata().remove(TimingEvent.FlowEventConstants.JOB_NAME_FIELD); event.getMetadata().remove(TimingEvent.FlowEventConstants.JOB_GROUP_FIELD); return event; } private GobblinTrackingEvent createWorkUnitTimingEvent() { Map<String, String> metadata = Maps.newHashMap(); metadata.put(TimingEvent.METADATA_START_TIME, "2"); metadata.put(TimingEvent.METADATA_END_TIME, "3"); return createGTE(TimingEvent.LauncherTimings.WORK_UNITS_CREATION, metadata); } private GobblinTrackingEvent createGTE(String eventName, Map<String, String> customMetadata) { String namespace = "org.apache.gobblin.metrics"; Long timestamp = System.currentTimeMillis(); Map<String, String> metadata = Maps.newHashMap(); metadata.put(TimingEvent.FlowEventConstants.FLOW_GROUP_FIELD, this.flowGroup); metadata.put(TimingEvent.FlowEventConstants.FLOW_NAME_FIELD, this.flowName); metadata.put(TimingEvent.FlowEventConstants.FLOW_EXECUTION_ID_FIELD, this.flowExecutionId); metadata.put(TimingEvent.FlowEventConstants.JOB_NAME_FIELD, this.jobName); metadata.put(TimingEvent.FlowEventConstants.JOB_GROUP_FIELD, this.jobGroup); metadata.put(TimingEvent.FlowEventConstants.JOB_EXECUTION_ID_FIELD, this.jobExecutionId); metadata.put(TimingEvent.METADATA_MESSAGE, this.message); metadata.put(TimingEvent.METADATA_START_TIME, "7"); metadata.put(TimingEvent.METADATA_END_TIME, "8"); metadata.putAll(customMetadata); return new GobblinTrackingEvent(timestamp, namespace, eventName, metadata); } MockKafkaAvroJobStatusMonitor createMockKafkaAvroJobStatusMonitor(AtomicBoolean shouldThrowFakeExceptionInParseJobStatusToggle, Config additionalConfig, GaaSObservabilityEventProducer eventProducer) throws IOException, ReflectiveOperationException { Config config = ConfigFactory.empty().withValue(ConfigurationKeys.KAFKA_BROKERS, ConfigValueFactory.fromAnyRef("localhost:0000")) .withValue(Kafka09ConsumerClient.GOBBLIN_CONFIG_VALUE_DESERIALIZER_CLASS_KEY, ConfigValueFactory.fromAnyRef("org.apache.kafka.common.serialization.ByteArrayDeserializer")) .withValue(ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY, ConfigValueFactory.fromAnyRef(stateStoreDir)) .withValue("zookeeper.connect", ConfigValueFactory.fromAnyRef("localhost:2121")) .withFallback(additionalConfig); return new MockKafkaAvroJobStatusMonitor("test",config, 1, shouldThrowFakeExceptionInParseJobStatusToggle, eventProducer); } /** * Create a dummy event to test if it is filtered out by the consumer. */ private GobblinTrackingEvent createDummyEvent() { String namespace = "org.apache.gobblin.metrics"; Long timestamp = System.currentTimeMillis(); String name = "dummy"; Map<String, String> metadata = Maps.newHashMap(); metadata.put("k1", "v1"); metadata.put("k2", "v2"); GobblinTrackingEvent event = new GobblinTrackingEvent(timestamp, namespace, name, metadata); return event; } private DecodeableKafkaRecord convertMessageAndMetadataToDecodableKafkaRecord(MessageAndMetadata messageAndMetadata) { ConsumerRecord consumerRecord = new ConsumerRecord<>(TOPIC, messageAndMetadata.partition(), messageAndMetadata.offset(), messageAndMetadata.key(), messageAndMetadata.message()); return new Kafka09ConsumerClient.Kafka09ConsumerRecord(consumerRecord); } private void cleanUpDir(String dir) throws Exception { File specStoreDir = new File(dir); if (specStoreDir.exists()) { FileUtils.deleteDirectory(specStoreDir); } } @AfterMethod public void cleanUpStateStore() { try { cleanUpDir(stateStoreDir); } catch(Exception e) { System.err.println("Failed to clean up the state store."); } } @AfterClass public void tearDown() { try { this.kafkaTestHelper.close(); } catch(Exception e) { System.err.println("Failed to close Kafka server."); } } class MockKafkaAvroJobStatusMonitor extends KafkaAvroJobStatusMonitor { private final AtomicBoolean shouldThrowFakeExceptionInParseJobStatus; @Getter private volatile int numFakeExceptionsFromParseJobStatus = 0; /** * @param shouldThrowFakeExceptionInParseJobStatusToggle - pass (and retain) to dial whether `parseJobStatus` throws */ public MockKafkaAvroJobStatusMonitor(String topic, Config config, int numThreads, AtomicBoolean shouldThrowFakeExceptionInParseJobStatusToggle, GaaSObservabilityEventProducer producer) throws IOException, ReflectiveOperationException { super(topic, config, numThreads, mock(JobIssueEventHandler.class), producer); shouldThrowFakeExceptionInParseJobStatus = shouldThrowFakeExceptionInParseJobStatusToggle; } @Override protected void processMessage(DecodeableKafkaRecord record) { super.processMessage(record); } @Override protected void buildMetricsContextAndMetrics() { super.buildMetricsContextAndMetrics(); } /** * Overridden to stub potential exception within core processing of `processMessage` (specifically retried portion). * Although truly plausible (IO)Exceptions would originate from `KafkaJobStatusMonitor.addJobStatusToStateStore`, * that is `static`, so unavailable for override. The approach here is a pragmatic compromise, being simpler than * the alternative of writing a mock `StateStore.Factory` that the `KafkaJobStatusMonitor` ctor could reflect, * instantiate, and finally create a mock `StateStore` from. */ @Override public org.apache.gobblin.configuration.State parseJobStatus(GobblinTrackingEvent event) { if (shouldThrowFakeExceptionInParseJobStatus.get()) { int n = ++numFakeExceptionsFromParseJobStatus; throw new RuntimeException(String.format("BOOM! Failure [%d] w/ event at %d", n, event.getTimestamp())); } else { return super.parseJobStatus(event); } } } }
3,604
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/runtime/SLAEventKafkaJobMonitorTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.runtime; import java.io.IOException; import java.net.URI; import java.util.Collection; import java.util.Map; import java.util.Properties; import java.util.regex.Pattern; import org.testng.Assert; import org.testng.annotations.Test; import com.google.common.base.Optional; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import org.apache.gobblin.metrics.GobblinTrackingEvent; import org.apache.gobblin.metrics.event.sla.SlaEventKeys; import org.apache.gobblin.metrics.reporter.util.FixedSchemaVersionWriter; import org.apache.gobblin.metrics.reporter.util.NoopSchemaVersionWriter; import org.apache.gobblin.metrics.reporter.util.SchemaVersionWriter; import org.apache.gobblin.runtime.api.JobSpec; import org.apache.gobblin.runtime.api.MutableJobCatalog; import org.apache.gobblin.runtime.job_monitor.KafkaJobMonitor; import org.apache.gobblin.runtime.job_monitor.SLAEventKafkaJobMonitor; public class SLAEventKafkaJobMonitorTest { private URI templateURI; private Config superConfig; public SLAEventKafkaJobMonitorTest() throws Exception { this.templateURI = new URI("/templates/uri"); this.superConfig = HighLevelConsumerTest.getSimpleConfig(Optional.of(KafkaJobMonitor.KAFKA_JOB_MONITOR_PREFIX)); } @Test public void testParseJobSpec() throws Exception { MockSLAEventKafkaJobMonitor monitor = new MockSLAEventKafkaJobMonitor("topic", null, new URI("/base/URI"), HighLevelConsumerTest.getSimpleConfig(Optional.of(KafkaJobMonitor.KAFKA_JOB_MONITOR_PREFIX)), new NoopSchemaVersionWriter(), Optional.<Pattern>absent(), Optional.<Pattern>absent(), this.templateURI, ImmutableMap.of("metadataKey1", "key1")); monitor.buildMetricsContextAndMetrics(); GobblinTrackingEvent event = createSLAEvent("DatasetPublish", new URI("/data/myDataset"), ImmutableMap.of("metadataKey1","value1","key1","value2")); Collection<JobSpec> jobSpecs = monitor.parseJobSpec(event); Assert.assertEquals(jobSpecs.size(), 1); JobSpec jobSpec = jobSpecs.iterator().next(); Assert.assertEquals(jobSpec.getUri(), new URI("/base/URI/data/myDataset")); Assert.assertEquals(jobSpec.getTemplateURI().get(), templateURI); // should insert configuration from metadata Assert.assertEquals(jobSpec.getConfig().getString("key1"), "value1"); monitor.shutdownMetrics(); } @Test public void testFilterByName() throws Exception { MockSLAEventKafkaJobMonitor monitor = new MockSLAEventKafkaJobMonitor("topic", null, new URI("/base/URI"), HighLevelConsumerTest.getSimpleConfig(Optional.of(KafkaJobMonitor.KAFKA_JOB_MONITOR_PREFIX)), new NoopSchemaVersionWriter(), Optional.<Pattern>absent(), Optional.of(Pattern.compile("^accept.*")), this.templateURI, ImmutableMap.<String, String>of()); monitor.buildMetricsContextAndMetrics(); GobblinTrackingEvent event; Collection<JobSpec> jobSpecs; event = createSLAEvent("acceptthis", new URI("/data/myDataset"), Maps.<String, String>newHashMap()); jobSpecs = monitor.parseJobSpec(event); Assert.assertEquals(jobSpecs.size(), 1); Assert.assertEquals(monitor.getRejectedEvents().getCount(), 0); event = createSLAEvent("donotacceptthis", new URI("/data/myDataset"), Maps.<String, String>newHashMap()); jobSpecs = monitor.parseJobSpec(event); Assert.assertEquals(jobSpecs.size(), 0); Assert.assertEquals(monitor.getRejectedEvents().getCount(), 1); monitor.shutdownMetrics(); } @Test public void testFilterByDatasetURN() throws Exception { Properties props = new Properties(); props.put(SLAEventKafkaJobMonitor.TEMPLATE_KEY, templateURI.toString()); props.put(SLAEventKafkaJobMonitor.DATASET_URN_FILTER_KEY, "^/accept.*"); Config config = ConfigFactory.parseProperties(props).withFallback(superConfig); MockSLAEventKafkaJobMonitor monitor = new MockSLAEventKafkaJobMonitor("topic", null, new URI("/base/URI"), HighLevelConsumerTest.getSimpleConfig(Optional.of(KafkaJobMonitor.KAFKA_JOB_MONITOR_PREFIX)), new NoopSchemaVersionWriter(), Optional.of(Pattern.compile("^/accept.*")), Optional.<Pattern>absent(), this.templateURI, ImmutableMap.<String, String>of()); monitor.buildMetricsContextAndMetrics(); GobblinTrackingEvent event; Collection<JobSpec> jobSpecs; event = createSLAEvent("event", new URI("/accept/myDataset"), Maps.<String, String>newHashMap()); jobSpecs = monitor.parseJobSpec(event); Assert.assertEquals(jobSpecs.size(), 1); Assert.assertEquals(monitor.getRejectedEvents().getCount(), 0); event = createSLAEvent("event", new URI("/reject/myDataset"), Maps.<String, String>newHashMap()); jobSpecs = monitor.parseJobSpec(event); Assert.assertEquals(jobSpecs.size(), 0); Assert.assertEquals(monitor.getRejectedEvents().getCount(), 1); monitor.shutdownMetrics(); } @Test public void testFactory() throws Exception { Pattern urnFilter = Pattern.compile("filter"); Pattern nameFilter = Pattern.compile("filtername"); Map<String, String> configMap = ImmutableMap.<String, String>builder() .put(SLAEventKafkaJobMonitor.DATASET_URN_FILTER_KEY, urnFilter.pattern()) .put(SLAEventKafkaJobMonitor.EVENT_NAME_FILTER_KEY, nameFilter.pattern()) .put(SLAEventKafkaJobMonitor.TEMPLATE_KEY, "template") .put(SLAEventKafkaJobMonitor.EXTRACT_KEYS + ".key1", "value1") .put(SLAEventKafkaJobMonitor.BASE_URI_KEY, "uri") .put(SLAEventKafkaJobMonitor.TOPIC_KEY, "topic") .put(SLAEventKafkaJobMonitor.SCHEMA_VERSION_READER_CLASS, FixedSchemaVersionWriter.class.getName()).build(); Config config = ConfigFactory.parseMap(configMap). withFallback(HighLevelConsumerTest.getSimpleConfig(Optional.of(KafkaJobMonitor.KAFKA_JOB_MONITOR_PREFIX))); SLAEventKafkaJobMonitor monitor = (SLAEventKafkaJobMonitor) (new SLAEventKafkaJobMonitor.Factory()).forConfig(config, null); Assert.assertEquals(monitor.getUrnFilter().get().pattern(), urnFilter.pattern()); Assert.assertEquals(monitor.getNameFilter().get().pattern(), nameFilter.pattern()); Assert.assertEquals(monitor.getTemplate(), new URI("template")); Assert.assertEquals(monitor.getExtractKeys().size(), 1); Assert.assertEquals(monitor.getExtractKeys().get("key1"), "value1"); Assert.assertEquals(monitor.getBaseURI(), new URI("uri")); Assert.assertEquals(monitor.getTopic(), "topic"); Assert.assertEquals(monitor.getVersionWriter().getClass(), FixedSchemaVersionWriter.class); } private GobblinTrackingEvent createSLAEvent(String name, URI urn, Map<String, String> additionalMetadata) { Map<String, String> metadata = Maps.newHashMap(); metadata.put(SlaEventKeys.DATASET_URN_KEY, urn.toString()); metadata.putAll(additionalMetadata); return new GobblinTrackingEvent(0L, "namespace", name, metadata); } class MockSLAEventKafkaJobMonitor extends SLAEventKafkaJobMonitor { protected MockSLAEventKafkaJobMonitor(String topic, MutableJobCatalog catalog, URI baseURI, Config limitedScopeConfig, SchemaVersionWriter<?> versionWriter, Optional<Pattern> urnFilter, Optional<Pattern> nameFilter, URI template, Map<String, String> extractKeys) throws IOException { super(topic, catalog, baseURI, limitedScopeConfig, versionWriter, urnFilter, nameFilter, template, extractKeys); } @Override protected void buildMetricsContextAndMetrics() { super.buildMetricsContextAndMetrics(); } @Override protected void shutdownMetrics() throws IOException { super.shutdownMetrics(); } } }
3,605
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/runtime/DagActionStoreChangeMonitorTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.runtime; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import com.typesafe.config.ConfigValueFactory; import java.net.URI; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.kafka.client.DecodeableKafkaRecord; import org.apache.gobblin.kafka.client.Kafka09ConsumerClient; import org.apache.gobblin.runtime.api.DagActionStore; import org.apache.gobblin.runtime.api.SpecNotFoundException; import org.apache.gobblin.runtime.spec_catalog.FlowCatalog; import org.apache.gobblin.service.modules.orchestration.DagManager; import org.apache.gobblin.service.modules.orchestration.Orchestrator; import org.apache.gobblin.service.monitoring.DagActionStoreChangeEvent; import org.apache.gobblin.service.monitoring.DagActionStoreChangeMonitor; import org.apache.gobblin.service.monitoring.DagActionValue; import org.apache.gobblin.service.monitoring.GenericStoreChangeEvent; import org.apache.gobblin.service.monitoring.OperationType; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.testng.annotations.Test; import static org.mockito.Mockito.*; /** * Tests the main functionality of {@link DagActionStoreChangeMonitor} to process {@link DagActionStoreChangeEvent} type * events stored in a {@link org.apache.gobblin.kafka.client.KafkaConsumerRecord}. The * processMessage(DecodeableKafkaRecord message) function should be able to gracefully process a variety of message * types, even with undesired formats, without throwing exceptions. */ @Slf4j public class DagActionStoreChangeMonitorTest { public static final String TOPIC = DagActionStoreChangeEvent.class.getSimpleName(); private final int PARTITION = 1; private final int OFFSET = 1; private final String FLOW_GROUP = "flowGroup"; private final String FLOW_NAME = "flowName"; private final String FLOW_EXECUTION_ID = "123"; private MockDagActionStoreChangeMonitor mockDagActionStoreChangeMonitor; private int txidCounter = 0; /** * Note: The class methods are wrapped in a test specific method because the original methods are package protected * and cannot be accessed by this class. */ class MockDagActionStoreChangeMonitor extends DagActionStoreChangeMonitor { public MockDagActionStoreChangeMonitor(String topic, Config config, int numThreads, boolean isMultiActiveSchedulerEnabled) { super(topic, config, mock(DagActionStore.class), mock(DagManager.class), numThreads, mock(FlowCatalog.class), mock(Orchestrator.class), isMultiActiveSchedulerEnabled); } protected void processMessageForTest(DecodeableKafkaRecord record) { super.processMessage(record); } protected void startUpForTest() { super.startUp(); } } MockDagActionStoreChangeMonitor createMockDagActionStoreChangeMonitor() { Config config = ConfigFactory.empty().withValue(ConfigurationKeys.KAFKA_BROKERS, ConfigValueFactory.fromAnyRef("localhost:0000")) .withValue(Kafka09ConsumerClient.GOBBLIN_CONFIG_VALUE_DESERIALIZER_CLASS_KEY, ConfigValueFactory.fromAnyRef("org.apache.kafka.common.serialization.ByteArrayDeserializer")) .withValue(ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY, ConfigValueFactory.fromAnyRef("/tmp/fakeStateStore")) .withValue("zookeeper.connect", ConfigValueFactory.fromAnyRef("localhost:2121")); return new MockDagActionStoreChangeMonitor("dummyTopic", config, 5, true); } // Called at start of every test so the count of each method being called is reset to 0 public void setup() { mockDagActionStoreChangeMonitor = createMockDagActionStoreChangeMonitor(); mockDagActionStoreChangeMonitor.startUpForTest(); } /** * Ensure no NPE results from passing a HEARTBEAT type message with a null {@link DagActionValue} and the message is * filtered out since it's a heartbeat type so no methods are called. */ @Test public void testProcessMessageWithHeartbeatAndNullDagAction() throws SpecNotFoundException { setup(); Kafka09ConsumerClient.Kafka09ConsumerRecord consumerRecord = wrapDagActionStoreChangeEvent(OperationType.HEARTBEAT, "", "", "", null); mockDagActionStoreChangeMonitor.processMessageForTest(consumerRecord); verify(mockDagActionStoreChangeMonitor.getDagManager(), times(0)).handleResumeFlowRequest(anyString(), anyString(), anyLong()); verify(mockDagActionStoreChangeMonitor.getDagManager(), times(0)).handleKillFlowRequest(anyString(), anyString(), anyLong()); // Note: Indirectly verifies submitFlowToDagManagerHelper is called which is not a mocked object so cannot be asserted verify(mockDagActionStoreChangeMonitor.getFlowCatalog(), times(0)).getSpecs(any(URI.class)); } /** * Ensure a HEARTBEAT type message with non-empty flow information is filtered out since it's a heartbeat type so no * methods are called. */ @Test (dependsOnMethods = "testProcessMessageWithHeartbeatAndNullDagAction") public void testProcessMessageWithHeartbeatAndFlowInfo() throws SpecNotFoundException { setup(); Kafka09ConsumerClient.Kafka09ConsumerRecord consumerRecord = wrapDagActionStoreChangeEvent(OperationType.HEARTBEAT, FLOW_GROUP, FLOW_NAME, FLOW_EXECUTION_ID, DagActionValue.RESUME); mockDagActionStoreChangeMonitor.processMessageForTest(consumerRecord); verify(mockDagActionStoreChangeMonitor.getDagManager(), times(0)).handleResumeFlowRequest(anyString(), anyString(), anyLong()); verify(mockDagActionStoreChangeMonitor.getDagManager(), times(0)).handleKillFlowRequest(anyString(), anyString(), anyLong()); verify(mockDagActionStoreChangeMonitor.getFlowCatalog(), times(0)).getSpecs(any(URI.class)); } /** * Tests process message with an INSERT type message of a `launch` action */ @Test (dependsOnMethods = "testProcessMessageWithHeartbeatAndFlowInfo") public void testProcessMessageWithInsertLaunchType() throws SpecNotFoundException { setup(); Kafka09ConsumerClient.Kafka09ConsumerRecord consumerRecord = wrapDagActionStoreChangeEvent(OperationType.INSERT, FLOW_GROUP, FLOW_NAME, FLOW_EXECUTION_ID, DagActionValue.LAUNCH); mockDagActionStoreChangeMonitor.processMessageForTest(consumerRecord); verify(mockDagActionStoreChangeMonitor.getDagManager(), times(0)).handleResumeFlowRequest(anyString(), anyString(), anyLong()); verify(mockDagActionStoreChangeMonitor.getDagManager(), times(0)).handleKillFlowRequest(anyString(), anyString(), anyLong()); verify(mockDagActionStoreChangeMonitor.getFlowCatalog(), times(1)).getSpecs(any(URI.class)); } /** * Tests process message with an INSERT type message of a `resume` action. It re-uses the same flow information however * since it is a different tid used every time it will be considered unique and submit a kill request. */ @Test (dependsOnMethods = "testProcessMessageWithInsertLaunchType") public void testProcessMessageWithInsertResumeType() throws SpecNotFoundException { setup(); Kafka09ConsumerClient.Kafka09ConsumerRecord consumerRecord = wrapDagActionStoreChangeEvent(OperationType.INSERT, FLOW_GROUP, FLOW_NAME, FLOW_EXECUTION_ID, DagActionValue.RESUME); mockDagActionStoreChangeMonitor.processMessageForTest(consumerRecord); verify(mockDagActionStoreChangeMonitor.getDagManager(), times(1)).handleResumeFlowRequest(anyString(), anyString(), anyLong()); verify(mockDagActionStoreChangeMonitor.getDagManager(), times(0)).handleKillFlowRequest(anyString(), anyString(), anyLong()); verify(mockDagActionStoreChangeMonitor.getFlowCatalog(), times(0)).getSpecs(any(URI.class)); } /** * Tests process message with an INSERT type message of a `kill` action. Similar to `testProcessMessageWithInsertResumeType`. */ @Test (dependsOnMethods = "testProcessMessageWithInsertResumeType") public void testProcessMessageWithInsertKillType() throws SpecNotFoundException { setup(); Kafka09ConsumerClient.Kafka09ConsumerRecord consumerRecord = wrapDagActionStoreChangeEvent(OperationType.INSERT, FLOW_GROUP, FLOW_NAME, FLOW_EXECUTION_ID, DagActionValue.KILL); mockDagActionStoreChangeMonitor.processMessageForTest(consumerRecord); verify(mockDagActionStoreChangeMonitor.getDagManager(), times(0)).handleResumeFlowRequest(anyString(), anyString(), anyLong()); verify(mockDagActionStoreChangeMonitor.getDagManager(), times(1)).handleKillFlowRequest(anyString(), anyString(), anyLong()); verify(mockDagActionStoreChangeMonitor.getFlowCatalog(), times(0)).getSpecs(any(URI.class)); } /** * Tests process message with an UPDATE type message of the 'launch' action above. Although processMessage does not * expect this message type it should handle it gracefully */ @Test (dependsOnMethods = "testProcessMessageWithInsertKillType") public void testProcessMessageWithUpdate() throws SpecNotFoundException { setup(); Kafka09ConsumerClient.Kafka09ConsumerRecord consumerRecord = wrapDagActionStoreChangeEvent(OperationType.UPDATE, FLOW_GROUP, FLOW_NAME, FLOW_EXECUTION_ID, DagActionValue.LAUNCH); mockDagActionStoreChangeMonitor.processMessageForTest(consumerRecord); verify(mockDagActionStoreChangeMonitor.getDagManager(), times(0)).handleResumeFlowRequest(anyString(), anyString(), anyLong()); verify(mockDagActionStoreChangeMonitor.getDagManager(), times(0)).handleKillFlowRequest(anyString(), anyString(), anyLong()); verify(mockDagActionStoreChangeMonitor.getFlowCatalog(), times(0)).getSpecs(any(URI.class)); } /** * Tests process message with a DELETE type message which should be ignored regardless of the flow information. */ @Test (dependsOnMethods = "testProcessMessageWithUpdate") public void testProcessMessageWithDelete() throws SpecNotFoundException { setup(); Kafka09ConsumerClient.Kafka09ConsumerRecord consumerRecord = wrapDagActionStoreChangeEvent(OperationType.DELETE, FLOW_GROUP, FLOW_NAME, FLOW_EXECUTION_ID, DagActionValue.LAUNCH); mockDagActionStoreChangeMonitor.processMessageForTest(consumerRecord); verify(mockDagActionStoreChangeMonitor.getDagManager(), times(0)).handleResumeFlowRequest(anyString(), anyString(), anyLong()); verify(mockDagActionStoreChangeMonitor.getDagManager(), times(0)).handleKillFlowRequest(anyString(), anyString(), anyLong()); verify(mockDagActionStoreChangeMonitor.getFlowCatalog(), times(0)).getSpecs(any(URI.class)); } /** * Util to create a general DagActionStoreChange type event */ private DagActionStoreChangeEvent createDagActionStoreChangeEvent(OperationType operationType, String flowGroup, String flowName, String flowExecutionId, DagActionValue dagAction) { String key = getKeyForFlow(flowGroup, flowName, flowExecutionId); GenericStoreChangeEvent genericStoreChangeEvent = new GenericStoreChangeEvent(key, String.valueOf(txidCounter), System.currentTimeMillis(), operationType); txidCounter++; return new DagActionStoreChangeEvent(genericStoreChangeEvent, flowGroup, flowName, flowExecutionId, dagAction); } /** * Form a key for events using the flow identifiers * @return a key formed by adding an '_' delimiter between the flow identifiers */ private String getKeyForFlow(String flowGroup, String flowName, String flowExecutionId) { return flowGroup + "_" + flowName + "_" + flowExecutionId; } /** * Util to create wrapper around DagActionStoreChangeEvent */ private Kafka09ConsumerClient.Kafka09ConsumerRecord wrapDagActionStoreChangeEvent(OperationType operationType, String flowGroup, String flowName, String flowExecutionId, DagActionValue dagAction) { DagActionStoreChangeEvent eventToProcess = null; try { eventToProcess = createDagActionStoreChangeEvent(operationType, flowGroup, flowName, flowExecutionId, dagAction); } catch (Exception e) { log.error("Exception while creating event ", e); } // TODO: handle partition and offset values better ConsumerRecord consumerRecord = new ConsumerRecord<>(TOPIC, PARTITION, OFFSET, getKeyForFlow(flowGroup, flowName, flowExecutionId), eventToProcess); return new Kafka09ConsumerClient.Kafka09ConsumerRecord(consumerRecord); } }
3,606
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/kafka/Kafka09JsonIntegrationTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.kafka; import java.io.IOException; import java.lang.management.ManagementFactory; import java.util.List; import org.testng.Assert; import org.testng.annotations.AfterSuite; import org.testng.annotations.BeforeSuite; import org.testng.annotations.Test; import com.google.gson.Gson; import com.google.gson.JsonArray; import com.google.gson.JsonObject; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.SourceState; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.kafka.client.Kafka09ConsumerClient; import org.apache.gobblin.kafka.writer.Kafka09JsonObjectWriterBuilder; import org.apache.gobblin.runtime.util.MultiWorkUnitUnpackingIterator; import org.apache.gobblin.source.extractor.DataRecordException; import org.apache.gobblin.source.extractor.Extractor; import org.apache.gobblin.source.extractor.extract.kafka.Kafka09JsonSource; import org.apache.gobblin.source.extractor.extract.kafka.KafkaSource; import org.apache.gobblin.source.workunit.WorkUnit; import org.apache.gobblin.writer.DataWriter; import org.apache.gobblin.writer.Destination; import static org.apache.gobblin.kafka.writer.KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX; import static org.apache.gobblin.kafka.writer.KafkaWriterConfigurationKeys.KAFKA_TOPIC; /** * An integration test for {@link Kafka09JsonSource} and {@link Kafka09JsonObjectWriterBuilder}. The test writes * a json object to kafka with the writer and extracts it with the source */ @Slf4j public class Kafka09JsonIntegrationTest { private final Gson gson; private final KafkaTestBase kafkaTestHelper; public Kafka09JsonIntegrationTest() throws InterruptedException, RuntimeException { kafkaTestHelper = new KafkaTestBase(); gson = new Gson(); } @BeforeSuite public void beforeSuite() { log.info("Process id = " + ManagementFactory.getRuntimeMXBean().getName()); kafkaTestHelper.startServers(); } @AfterSuite public void afterSuite() throws IOException { try { kafkaTestHelper.stopClients(); } finally { kafkaTestHelper.stopServers(); } } private SourceState createSourceState(String topic) { SourceState state = new SourceState(); state.setProp(ConfigurationKeys.KAFKA_BROKERS, "localhost:" + kafkaTestHelper.getKafkaServerPort()); state.setProp(KafkaSource.TOPIC_WHITELIST, topic); state.setProp(KafkaSource.GOBBLIN_KAFKA_CONSUMER_CLIENT_FACTORY_CLASS, Kafka09ConsumerClient.Factory.class.getName()); state.setProp(KafkaSource.BOOTSTRAP_WITH_OFFSET, "earliest"); return state; } @Test public void testHappyPath() throws IOException, DataRecordException { String topic = "testKafka09JsonSource"; kafkaTestHelper.provisionTopic(topic); SourceState state = createSourceState(topic); // Produce a record state.setProp(KAFKA_PRODUCER_CONFIG_PREFIX + "bootstrap.servers", "localhost:" + kafkaTestHelper.getKafkaServerPort()); state.setProp(KAFKA_TOPIC, topic); Destination destination = Destination.of(Destination.DestinationType.KAFKA, state); Kafka09JsonObjectWriterBuilder writerBuilder = new Kafka09JsonObjectWriterBuilder(); writerBuilder.writeTo(destination); DataWriter<JsonObject> writer = writerBuilder.build(); final String json = "{\"number\":27}"; JsonObject record = gson.fromJson(json, JsonObject.class); writer.write(record); writer.flush(); writer.close(); Kafka09JsonSource source = new Kafka09JsonSource(); List<WorkUnit> workUnitList = source.getWorkunits(state); // Test the right value serializer is set Assert.assertEquals(state.getProp(Kafka09ConsumerClient.GOBBLIN_CONFIG_VALUE_DESERIALIZER_CLASS_KEY), Kafka09JsonSource.KafkaGsonDeserializer.class.getName()); // Test there is only one non-empty work unit MultiWorkUnitUnpackingIterator iterator = new MultiWorkUnitUnpackingIterator(workUnitList.iterator()); Assert.assertTrue(iterator.hasNext()); WorkUnit workUnit = iterator.next(); Assert.assertEquals(workUnit.getProp(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY), topic); Assert.assertFalse(iterator.hasNext()); // Test extractor WorkUnitState workUnitState = new WorkUnitState(workUnit, state); final String jsonSchema = "[{\"columnName\":\"number\",\"comment\":\"\",\"isNullable\":\"false\",\"dataType\":{\"type\":\"int\"}}]"; workUnitState.setProp("source.kafka.json.schema", jsonSchema); Extractor<JsonArray, JsonObject> extractor = source.getExtractor(workUnitState); Assert.assertEquals(extractor.getSchema().toString(), jsonSchema); Assert.assertEquals(extractor.readRecord(null).toString(), json); } }
3,607
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/kafka/KafkaStreamingLocalTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.kafka; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; import org.apache.gobblin.runtime.api.JobExecutionResult; import org.apache.gobblin.runtime.embedded.EmbeddedGobblin; import org.apache.gobblin.writer.test.GobblinTestEventBusWriter; import org.apache.gobblin.writer.test.TestingEventBusAsserter; import org.testng.annotations.Test; public class KafkaStreamingLocalTest { //disable the test as streaming task will never end unless manually kill it @Test(enabled=false) public void testStreamingLocally() { String eventBusId = this.getClass().getName() + ".jobFileTest"; TestingEventBusAsserter asserter = new TestingEventBusAsserter(eventBusId); EmbeddedGobblin embeddedGobblin = new EmbeddedGobblin("TestJob").jobFile(this.getClass().getClassLoader().getResource("kafkaHdfsStreaming.conf").getPath()); embeddedGobblin.setConfiguration(GobblinTestEventBusWriter.FULL_EVENTBUSID_KEY, eventBusId); try { JobExecutionResult result = embeddedGobblin.run(); } catch (InterruptedException e) { e.printStackTrace(); } catch (TimeoutException e) { e.printStackTrace(); } catch (ExecutionException e) { e.printStackTrace(); } } }
3,608
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/kafka/KafkaClusterTestBase.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.kafka; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Properties; import org.I0Itec.zkclient.ZkClient; import kafka.server.KafkaConfig; import kafka.server.KafkaServer; import kafka.utils.MockTime; import kafka.utils.Time; import kafka.utils.ZKStringSerializer$; import kafka.zk.EmbeddedZookeeper; import org.apache.gobblin.test.TestUtils; public class KafkaClusterTestBase extends KafkaTestBase { int clusterCount; EmbeddedZookeeper _zkServer; String _zkConnectString; ZkClient _zkClient; List<KafkaServer> kafkaBrokerList = new ArrayList<KafkaServer>(); List<Integer> kafkaBrokerPortList = new ArrayList<Integer>(); public KafkaClusterTestBase(int clusterCount) throws InterruptedException, RuntimeException { super(); this.clusterCount = clusterCount; } public void startCluster() { // Start Zookeeper. _zkServer = new EmbeddedZookeeper(); _zkConnectString = "127.0.0.1:"+_zkServer.port(); _zkClient = new ZkClient(_zkConnectString, 30000, 30000, ZKStringSerializer$.MODULE$); // Start Kafka Cluster. for(int i=0;i<clusterCount;i++) { KafkaServer _kafkaServer = createKafkaServer(i,_zkConnectString); kafkaBrokerList.add(_kafkaServer); } } public void stopCluster() { Iterator<KafkaServer> iter = kafkaBrokerList.iterator(); while(iter.hasNext()){ KafkaServer server = iter.next(); try { server.shutdown(); } catch (RuntimeException e) { // Simply Ignore. } } } public int getZookeeperPort() { return _zkServer.port(); } public List<KafkaServer> getBrokerList() { return kafkaBrokerList; } public List<Integer> getKafkaBrokerPortList() { return kafkaBrokerPortList; } public int getClusterCount() { return kafkaBrokerList.size(); } private KafkaServer createKafkaServer(int brokerId,String _zkConnectString){ int _brokerId = brokerId; int _kafkaServerPort = TestUtils.findFreePort(); Properties props = kafka.utils.TestUtils.createBrokerConfig( _brokerId, _zkConnectString, kafka.utils.TestUtils.createBrokerConfig$default$3(), kafka.utils.TestUtils.createBrokerConfig$default$4(), _kafkaServerPort, kafka.utils.TestUtils.createBrokerConfig$default$6(), kafka.utils.TestUtils.createBrokerConfig$default$7(), kafka.utils.TestUtils.createBrokerConfig$default$8(), kafka.utils.TestUtils.createBrokerConfig$default$9(), kafka.utils.TestUtils.createBrokerConfig$default$10(), kafka.utils.TestUtils.createBrokerConfig$default$11(), kafka.utils.TestUtils.createBrokerConfig$default$12(), kafka.utils.TestUtils.createBrokerConfig$default$13(), kafka.utils.TestUtils.createBrokerConfig$default$14() ); KafkaConfig config = new KafkaConfig(props); Time mock = new MockTime(); KafkaServer _kafkaServer = kafka.utils.TestUtils.createServer(config, mock); kafkaBrokerPortList.add(_kafkaServerPort); return _kafkaServer; } public String getBootServersList() { String bootServerString = ""; Iterator<Integer> ports = kafkaBrokerPortList.iterator(); while(ports.hasNext()){ Integer port = ports.next(); bootServerString = bootServerString+"localhost:"+port+","; } bootServerString = bootServerString.substring(0,bootServerString.length()-1); return bootServerString; } }
3,609
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/kafka/KafkaTestBase.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.kafka; import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.concurrent.atomic.AtomicInteger; import org.I0Itec.zkclient.ZkClient; import com.google.common.collect.ImmutableMap; import kafka.admin.AdminUtils; import kafka.consumer.Consumer; import kafka.consumer.ConsumerConfig; import kafka.consumer.ConsumerIterator; import kafka.consumer.KafkaStream; import kafka.javaapi.consumer.ConsumerConnector; import kafka.server.KafkaConfig; import kafka.server.KafkaServer; import kafka.utils.MockTime; import kafka.utils.Time; import kafka.utils.ZKStringSerializer$; import kafka.utils.ZkUtils; import kafka.zk.EmbeddedZookeeper; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.test.TestUtils; /** * A private class for starting a suite of servers for Kafka * Calls to start and shutdown are reference counted, so that the suite is started and shutdown in pairs. * A suite of servers (Zk, Kafka etc) will be started just once per process */ @Slf4j class KafkaServerSuite { static KafkaServerSuite _instance; static KafkaServerSuite getInstance() { if (null == _instance) { _instance = new KafkaServerSuite(); return _instance; } else { return _instance; } } private int _brokerId = 0; private EmbeddedZookeeper _zkServer; private ZkClient _zkClient; private KafkaServer _kafkaServer; private final int _kafkaServerPort; private String _zkConnectString; private final AtomicInteger _numStarted; public ZkClient getZkClient() { return _zkClient; } public KafkaServer getKafkaServer() { return _kafkaServer; } public int getKafkaServerPort() { return _kafkaServerPort; } public String getZkConnectString() { return _zkConnectString; } private KafkaServerSuite() { _kafkaServerPort = TestUtils.findFreePort(); _zkConnectString = "UNINITIALIZED_HOST_PORT"; _numStarted = new AtomicInteger(0); } void start() throws RuntimeException { if (_numStarted.incrementAndGet() == 1) { log.warn("Starting up Kafka server suite. Zk at " + _zkConnectString + "; Kafka server at " + _kafkaServerPort); _zkServer = new EmbeddedZookeeper(); _zkConnectString = "127.0.0.1:"+_zkServer.port(); _zkClient = new ZkClient(_zkConnectString, 30000, 30000, ZKStringSerializer$.MODULE$); Properties props = kafka.utils.TestUtils.createBrokerConfig( _brokerId, _zkConnectString, kafka.utils.TestUtils.createBrokerConfig$default$3(), kafka.utils.TestUtils.createBrokerConfig$default$4(), _kafkaServerPort, kafka.utils.TestUtils.createBrokerConfig$default$6(), kafka.utils.TestUtils.createBrokerConfig$default$7(), kafka.utils.TestUtils.createBrokerConfig$default$8(), kafka.utils.TestUtils.createBrokerConfig$default$9(), kafka.utils.TestUtils.createBrokerConfig$default$10(), kafka.utils.TestUtils.createBrokerConfig$default$11(), kafka.utils.TestUtils.createBrokerConfig$default$12(), kafka.utils.TestUtils.createBrokerConfig$default$13(), kafka.utils.TestUtils.createBrokerConfig$default$14() ); KafkaConfig config = new KafkaConfig(props); Time mock = new MockTime(); _kafkaServer = kafka.utils.TestUtils.createServer(config, mock); } else { log.info("Kafka server suite already started... continuing"); } } void shutdown() { if (_numStarted.decrementAndGet() == 0) { log.info("Shutting down Kafka server suite"); _kafkaServer.shutdown(); _zkClient.close(); _zkServer.shutdown(); } else { log.info("Kafka server suite still in use ... not shutting down yet"); } } } class KafkaConsumerSuite { private final ConsumerConnector _consumer; private final KafkaStream<byte[], byte[]> _stream; private final ConsumerIterator<byte[], byte[]> _iterator; private final String _topic; KafkaConsumerSuite(String zkConnectString, String topic) { _topic = topic; Properties consumeProps = new Properties(); consumeProps.put("zookeeper.connect", zkConnectString); consumeProps.put("group.id", _topic+"-"+System.nanoTime()); consumeProps.put("zookeeper.session.timeout.ms", "10000"); consumeProps.put("zookeeper.sync.time.ms", "10000"); consumeProps.put("auto.commit.interval.ms", "10000"); consumeProps.put("_consumer.timeout.ms", "10000"); _consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumeProps)); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = _consumer.createMessageStreams(ImmutableMap.of(this._topic, 1)); List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(this._topic); _stream = streams.get(0); _iterator = _stream.iterator(); } void shutdown() { _consumer.shutdown(); } public ConsumerIterator<byte[],byte[]> getIterator() { return _iterator; } } /** * A Helper class for testing against Kafka * A suite of servers (Zk, Kafka etc) will be started just once per process * Consumer and iterator will be created per instantiation and is one instance per topic. */ public class KafkaTestBase implements Closeable { private final KafkaServerSuite _kafkaServerSuite; private final Map<String, KafkaConsumerSuite> _topicConsumerMap; public KafkaTestBase() throws InterruptedException, RuntimeException { this._kafkaServerSuite = KafkaServerSuite.getInstance(); this._topicConsumerMap = new HashMap<>(); } public synchronized void startServers() { _kafkaServerSuite.start(); } public void stopServers() { _kafkaServerSuite.shutdown(); } public void start() { startServers(); } public void stopClients() throws IOException { for (Map.Entry<String, KafkaConsumerSuite> consumerSuiteEntry: _topicConsumerMap.entrySet()) { consumerSuiteEntry.getValue().shutdown(); AdminUtils.deleteTopic(ZkUtils.apply(_kafkaServerSuite.getZkClient(), false), consumerSuiteEntry.getKey()); } } @Override public void close() throws IOException { stopClients(); stopServers(); } public void provisionTopic(String topic) { if (_topicConsumerMap.containsKey(topic)) { // nothing to do: return } else { // provision topic AdminUtils.createTopic(ZkUtils.apply(_kafkaServerSuite.getZkClient(), false), topic, 1, 1, new Properties()); List<KafkaServer> servers = new ArrayList<>(); servers.add(_kafkaServerSuite.getKafkaServer()); kafka.utils.TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000); KafkaConsumerSuite consumerSuite = new KafkaConsumerSuite(_kafkaServerSuite.getZkConnectString(), topic); _topicConsumerMap.put(topic, consumerSuite); } } public ConsumerIterator<byte[], byte[]> getIteratorForTopic(String topic) { if (_topicConsumerMap.containsKey(topic)) { return _topicConsumerMap.get(topic).getIterator(); } else { throw new IllegalStateException("Could not find provisioned topic" + topic + ": call provisionTopic before"); } } public int getKafkaServerPort() { return _kafkaServerSuite.getKafkaServerPort(); } public String getZkConnectString() { return this._kafkaServerSuite.getZkConnectString(); } }
3,610
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/kafka/source/extractor/extract
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/kafka/source/extractor/extract/kafka/KafkaSimpleStreamingTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.kafka.source.extractor.extract.kafka; import java.io.IOException; import java.lang.management.ManagementFactory; import java.nio.channels.ClosedChannelException; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.WakeupException; import org.testng.Assert; import org.testng.annotations.AfterSuite; import org.testng.annotations.BeforeSuite; import org.testng.annotations.Test; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.SourceState; import org.apache.gobblin.configuration.State; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.kafka.KafkaTestBase; import org.apache.gobblin.source.extractor.CheckpointableWatermark; import org.apache.gobblin.source.extractor.DataRecordException; import org.apache.gobblin.source.extractor.extract.LongWatermark; import org.apache.gobblin.source.extractor.extract.kafka.KafkaSimpleStreamingExtractor; import org.apache.gobblin.source.extractor.extract.kafka.KafkaSimpleStreamingSource; import org.apache.gobblin.source.workunit.WorkUnit; import org.apache.gobblin.stream.RecordEnvelope; import org.apache.gobblin.writer.WatermarkStorage; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; /** * Simple unit tests for the streaming kafka producer. Covers very simple scenarios */ @Slf4j public class KafkaSimpleStreamingTest { private final KafkaTestBase _kafkaTestHelper; public KafkaSimpleStreamingTest() throws InterruptedException, RuntimeException { _kafkaTestHelper = new KafkaTestBase(); } @BeforeSuite public void beforeSuite() { log.info("Process id = " + ManagementFactory.getRuntimeMXBean().getName()); _kafkaTestHelper.startServers(); } @AfterSuite public void afterSuite() throws IOException { try { _kafkaTestHelper.stopClients(); } finally { _kafkaTestHelper.stopServers(); } } /** * Tests that the source creates workUnits appropriately. Sets up a topic with a single partition and checks that a * single workUnit is returned with the right parameters sets * @throws IOException * @throws InterruptedException */ @Test public void testSource() throws IOException, InterruptedException { String topic = "testSimpleStreamingSource"; _kafkaTestHelper.provisionTopic(topic); List<WorkUnit> lWu = getWorkUnits(topic); // Check we have a single WorkUnit with the right properties setup. Assert.assertEquals(lWu.size(), 1); WorkUnit wU = lWu.get(0); Assert.assertEquals(KafkaSimpleStreamingSource.getTopicNameFromState(wU), topic); Assert.assertEquals(KafkaSimpleStreamingSource.getPartitionIdFromState(wU), 0); } private List<WorkUnit> getWorkUnits(String topic) { SourceState ss = new SourceState(); ss.setProp(ConfigurationKeys.KAFKA_BROKERS, "localhost:" + _kafkaTestHelper.getKafkaServerPort()); ss.setProp(KafkaSimpleStreamingSource.TOPIC_WHITELIST, topic); ss.setProp(ConfigurationKeys.JOB_NAME_KEY, topic); ss.setProp(KafkaSimpleStreamingSource.TOPIC_KEY_DESERIALIZER, "org.apache.kafka.common.serialization.StringDeserializer"); ss.setProp(KafkaSimpleStreamingSource.TOPIC_VALUE_DESERIALIZER, "org.apache.kafka.common.serialization.ByteArrayDeserializer"); KafkaSimpleStreamingSource<String, byte[]> simpleSource = new KafkaSimpleStreamingSource<String, byte[]>(); return simpleSource.getWorkunits(ss); } private KafkaSimpleStreamingExtractor<String, byte[]> getStreamingExtractor(String topic) { _kafkaTestHelper.provisionTopic(topic); List<WorkUnit> lWu = getWorkUnits(topic); WorkUnit wU = lWu.get(0); WorkUnitState wSU = new WorkUnitState(wU, new State()); wSU.setProp(ConfigurationKeys.KAFKA_BROKERS, "localhost:" + _kafkaTestHelper.getKafkaServerPort()); wSU.setProp(KafkaSimpleStreamingSource.TOPIC_WHITELIST, topic); wSU.setProp(ConfigurationKeys.JOB_NAME_KEY, topic); wSU.setProp(KafkaSimpleStreamingSource.TOPIC_KEY_DESERIALIZER, "org.apache.kafka.common.serialization.StringDeserializer"); wSU.setProp(KafkaSimpleStreamingSource.TOPIC_VALUE_DESERIALIZER, "org.apache.kafka.common.serialization.ByteArrayDeserializer"); // Create an extractor return new KafkaSimpleStreamingExtractor<String, byte[]>(wSU); } /** * testExtractor checks that the extractor code does the right thing. First it creates a topic, and sets up a source to point * to it. workUnits are generated from the source (only a single wU should be returned). Then it writes a record to this topic * and reads back from the extractor to verify the right record is returned. A second record is then written and read back * through the extractor to verify poll works as expected. Finally we test the commit api by forcing a commit and then starting * a new extractor to ensure we fetch data from after the commit. The commit is also verified in Kafka directly * @throws IOException * @throws InterruptedException * @throws DataRecordException */ @Test(timeOut = 30000) public void testExtractor() throws IOException, InterruptedException, DataRecordException { final String topic = "testSimpleStreamingExtractor"; _kafkaTestHelper.provisionTopic(topic); Properties props = new Properties(); props.put("bootstrap.servers", "localhost:" + _kafkaTestHelper.getKafkaServerPort()); props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); props.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer"); Producer<String, byte[]> producer = new KafkaProducer<>(props); final byte [] record_1 = {0, 1, 3}; final byte [] record_2 = {2, 4, 6}; final byte [] record_3 = {5, 7, 9}; // Write a sample record to the topic producer.send(new ProducerRecord<String, byte[]>(topic, topic, record_1)); producer.flush(); KafkaSimpleStreamingExtractor<String, byte[]> kSSE = getStreamingExtractor(topic); TopicPartition tP = new TopicPartition(topic, 0); KafkaSimpleStreamingExtractor.KafkaWatermark kwm = new KafkaSimpleStreamingExtractor.KafkaWatermark(tP, new LongWatermark(0)); byte [] reuse = new byte[1]; RecordEnvelope<byte[]> oldRecord = new RecordEnvelope<>(reuse, kwm); Map<String, CheckpointableWatermark> committedWatermarks = new HashMap<>(); WatermarkStorage mockWatermarkStorage = mock(WatermarkStorage.class); when(mockWatermarkStorage.getCommittedWatermarks(any(Class.class), any(Iterable.class))) .thenReturn(committedWatermarks); kSSE.start(mockWatermarkStorage); // read and verify the record matches we just wrote RecordEnvelope<byte[]> record = kSSE.readRecordEnvelope(); Assert.assertEquals(record.getRecord(), record_1); // write a second record. producer.send(new ProducerRecord<String, byte[]>(topic, topic, record_2)); producer.flush(); // read the second record using same extractor to verify it matches whats expected record = kSSE.readRecordEnvelope(); Assert.assertEquals(record.getRecord(), record_2); // Commit the watermark committedWatermarks.put(record.getWatermark().getSource(), record.getWatermark()); // write a third record. producer.send(new ProducerRecord<String, byte[]>(topic, topic, record_3)); producer.flush(); // recreate extractor to force a seek. kSSE = getStreamingExtractor(topic); kSSE.start(mockWatermarkStorage); record = kSSE.readRecordEnvelope(); // check it matches the data written Assert.assertEquals(record.getRecord(), record_3); } /** * testThreadedExtractor verifies its safe to call close from a different thread when the original thread is stuck in poll * We create a topic and then wait for the extractor to return a record (which it never does) in a side thread. The * original thread calls close on the extractor and verifies the waiting thread gets an expected exception and exits * as expected. */ @Test(timeOut = 30000) public void testThreadedExtractor() { final String topic = "testThreadedExtractor"; final KafkaSimpleStreamingExtractor<String, byte[]> kSSE = getStreamingExtractor(topic); Thread waitingThread = new Thread () { public void run () { TopicPartition tP = new TopicPartition(topic, 0); KafkaSimpleStreamingExtractor.KafkaWatermark kwm = new KafkaSimpleStreamingExtractor.KafkaWatermark(tP, new LongWatermark(0)); byte[] reuse = new byte[1]; RecordEnvelope<byte[]> oldRecord = new RecordEnvelope<>(reuse, kwm); try { RecordEnvelope<byte[]> record = kSSE.readRecordEnvelope(); } catch (Exception e) { Assert.assertTrue((e instanceof WakeupException) || (e instanceof ClosedChannelException)); } } }; waitingThread.start(); try { kSSE.close(); waitingThread.join(); } catch (Exception e) { // should never come here throw new Error(e); } } /** * Test that the extractor barfs on not calling start */ @Test(timeOut = 30000) public void testExtractorStart() { final String topic = "testExtractorStart"; final KafkaSimpleStreamingExtractor<String, byte[]> kSSE = getStreamingExtractor(topic); try { kSSE.readRecordEnvelope(); Assert.fail("Should have thrown an exception"); } catch (IOException e) { } catch (Exception e) { Assert.fail("Should only throw IOException"); } } }
3,611
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/kafka
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/kafka/writer/Kafka09DataWriterTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.kafka.writer; import java.io.IOException; import java.lang.management.ManagementFactory; import java.util.Properties; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import org.apache.avro.generic.GenericRecord; import org.testng.Assert; import org.testng.annotations.AfterSuite; import org.testng.annotations.BeforeSuite; import org.testng.annotations.Test; import kafka.message.MessageAndMetadata; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.kafka.KafkaTestBase; import org.apache.gobblin.kafka.schemareg.ConfigDrivenMd5SchemaRegistry; import org.apache.gobblin.kafka.schemareg.KafkaSchemaRegistryConfigurationKeys; import org.apache.gobblin.kafka.schemareg.SchemaRegistryException; import org.apache.gobblin.kafka.serialize.LiAvroDeserializer; import org.apache.gobblin.kafka.serialize.LiAvroSerializer; import org.apache.gobblin.test.TestUtils; import org.apache.gobblin.writer.WriteCallback; import org.apache.gobblin.writer.WriteResponse; import static org.mockito.Mockito.*; @Slf4j public class Kafka09DataWriterTest { private final KafkaTestBase _kafkaTestHelper; public Kafka09DataWriterTest() throws InterruptedException, RuntimeException { _kafkaTestHelper = new KafkaTestBase(); } @BeforeSuite(alwaysRun = true) public void beforeSuite() { log.warn("Process id = " + ManagementFactory.getRuntimeMXBean().getName()); _kafkaTestHelper.startServers(); } @AfterSuite(alwaysRun = true) public void afterSuite() throws IOException { try { _kafkaTestHelper.stopClients(); } finally { _kafkaTestHelper.stopServers(); } } @Test public void testStringSerialization() throws IOException, InterruptedException, ExecutionException { String topic = "testStringSerialization08"; _kafkaTestHelper.provisionTopic(topic); Properties props = new Properties(); props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic); props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX+"bootstrap.servers", "127.0.0.1:" + _kafkaTestHelper.getKafkaServerPort()); props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX+"value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); Kafka09DataWriter<String, String> kafka09DataWriter = new Kafka09DataWriter<>(props); String messageString = "foobar"; WriteCallback callback = mock(WriteCallback.class); Future<WriteResponse> future; try { future = kafka09DataWriter.write(messageString, callback); kafka09DataWriter.flush(); verify(callback, times(1)).onSuccess(isA(WriteResponse.class)); verify(callback, never()).onFailure(isA(Exception.class)); Assert.assertTrue(future.isDone(), "Future should be done"); System.out.println(future.get().getStringResponse()); byte[] message = _kafkaTestHelper.getIteratorForTopic(topic).next().message(); String messageReceived = new String(message); Assert.assertEquals(messageReceived, messageString); } finally { kafka09DataWriter.close(); } } @Test public void testBinarySerialization() throws IOException, InterruptedException { String topic = "testBinarySerialization08"; _kafkaTestHelper.provisionTopic(topic); Properties props = new Properties(); props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic); props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX+"bootstrap.servers", "127.0.0.1:" + _kafkaTestHelper.getKafkaServerPort()); props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX+"value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer"); Kafka09DataWriter<String, byte[]> kafka09DataWriter = new Kafka09DataWriter<>(props); WriteCallback callback = mock(WriteCallback.class); byte[] messageBytes = TestUtils.generateRandomBytes(); try { kafka09DataWriter.write(messageBytes, callback); } finally { kafka09DataWriter.close(); } verify(callback, times(1)).onSuccess(isA(WriteResponse.class)); verify(callback, never()).onFailure(isA(Exception.class)); byte[] message = _kafkaTestHelper.getIteratorForTopic(topic).next().message(); Assert.assertEquals(message, messageBytes); } @Test public void testAvroSerialization() throws IOException, InterruptedException, SchemaRegistryException { String topic = "testAvroSerialization08"; _kafkaTestHelper.provisionTopic(topic); Properties props = new Properties(); props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic); props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "bootstrap.servers", "127.0.0.1:" + _kafkaTestHelper.getKafkaServerPort()); props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "value.serializer", LiAvroSerializer.class.getName()); // set up mock schema registry props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_CLASS, ConfigDrivenMd5SchemaRegistry.class.getCanonicalName()); Kafka09DataWriter<String, GenericRecord> kafka09DataWriter = new Kafka09DataWriter<>(props); WriteCallback callback = mock(WriteCallback.class); GenericRecord record = TestUtils.generateRandomAvroRecord(); try { kafka09DataWriter.write(record, callback); } finally { kafka09DataWriter.close(); } log.info("Kafka events written"); verify(callback, times(1)).onSuccess(isA(WriteResponse.class)); verify(callback, never()).onFailure(isA(Exception.class)); byte[] message = _kafkaTestHelper.getIteratorForTopic(topic).next().message(); log.info("Kafka events read, start to check result... "); ConfigDrivenMd5SchemaRegistry schemaReg = new ConfigDrivenMd5SchemaRegistry(topic, record.getSchema()); LiAvroDeserializer deser = new LiAvroDeserializer(schemaReg); GenericRecord receivedRecord = deser.deserialize(topic, message); Assert.assertEquals(record.toString(), receivedRecord.toString()); } @Test public void testKeyedAvroSerialization() throws IOException, InterruptedException, SchemaRegistryException { String topic = "testAvroSerialization09"; _kafkaTestHelper.provisionTopic(topic); Properties props = new Properties(); props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic); props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "bootstrap.servers", "127.0.0.1:" + _kafkaTestHelper.getKafkaServerPort()); props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "value.serializer", LiAvroSerializer.class.getName()); props.setProperty(KafkaWriterConfigurationKeys.WRITER_KAFKA_KEYED_CONFIG, "true"); String keyField = "field1"; props.setProperty(KafkaWriterConfigurationKeys.WRITER_KAFKA_KEYFIELD_CONFIG, keyField); // set up mock schema registry props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_CLASS, ConfigDrivenMd5SchemaRegistry.class.getCanonicalName()); Kafka09DataWriter<String, GenericRecord> kafka09DataWriter = new Kafka09DataWriter<>(props); WriteCallback callback = mock(WriteCallback.class); GenericRecord record = TestUtils.generateRandomAvroRecord(); try { kafka09DataWriter.write(record, callback); } finally { kafka09DataWriter.close(); } verify(callback, times(1)).onSuccess(isA(WriteResponse.class)); verify(callback, never()).onFailure(isA(Exception.class)); MessageAndMetadata<byte[], byte[]> value = _kafkaTestHelper.getIteratorForTopic(topic).next(); byte[] key = value.key(); byte[] message = value.message(); ConfigDrivenMd5SchemaRegistry schemaReg = new ConfigDrivenMd5SchemaRegistry(topic, record.getSchema()); LiAvroDeserializer deser = new LiAvroDeserializer(schemaReg); GenericRecord receivedRecord = deser.deserialize(topic, message); Assert.assertEquals(record.toString(), receivedRecord.toString()); Assert.assertEquals(new String(key), record.get(keyField)); } @Test public void testValueSerialization() throws IOException, InterruptedException, SchemaRegistryException { String topic = "testAvroSerialization09"; _kafkaTestHelper.provisionTopic(topic); Properties props = new Properties(); props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic); props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "bootstrap.servers", "127.0.0.1:" + _kafkaTestHelper.getKafkaServerPort()); props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); props.setProperty(KafkaWriterConfigurationKeys.WRITER_KAFKA_KEYED_CONFIG, "true"); String keyField = "field1"; props.setProperty(KafkaWriterConfigurationKeys.WRITER_KAFKA_KEYFIELD_CONFIG, keyField); props.setProperty(KafkaWriterConfigurationKeys.WRITER_KAFKA_VALUEFIELD_CONFIG, keyField); // set up mock schema registry props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_CLASS, ConfigDrivenMd5SchemaRegistry.class.getCanonicalName()); Kafka09DataWriter<String, GenericRecord> kafka09DataWriter = new Kafka09DataWriter<>(props); WriteCallback callback = mock(WriteCallback.class); GenericRecord record = TestUtils.generateRandomAvroRecord(); try { kafka09DataWriter.write(record, callback); } finally { kafka09DataWriter.close(); } verify(callback, times(1)).onSuccess(isA(WriteResponse.class)); verify(callback, never()).onFailure(isA(Exception.class)); MessageAndMetadata<byte[], byte[]> value = _kafkaTestHelper.getIteratorForTopic(topic).next(); byte[] key = value.key(); byte[] message = value.message(); Assert.assertEquals(new String(message), record.get(keyField)); Assert.assertEquals(new String(key), record.get(keyField)); } }
3,612
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/kafka
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/kafka/writer/Kafka09TopicProvisionTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.kafka.writer; import java.io.IOException; import java.lang.management.ManagementFactory; import java.util.ArrayList; import java.util.List; import java.util.Properties; import org.I0Itec.zkclient.ZkClient; import org.I0Itec.zkclient.ZkConnection; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.ZooKeeper; import org.json.JSONObject; import org.testng.Assert; import org.testng.annotations.AfterSuite; import org.testng.annotations.BeforeSuite; import org.testng.annotations.Test; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.kafka.KafkaClusterTestBase; import org.apache.commons.lang3.StringUtils; import kafka.admin.AdminUtils; import kafka.api.TopicMetadata; import kafka.utils.ZKStringSerializer$; import kafka.utils.ZkUtils; @Slf4j public class Kafka09TopicProvisionTest { private final KafkaClusterTestBase _kafkaTestHelper; private int testClusterCount = 5; public Kafka09TopicProvisionTest() throws InterruptedException, RuntimeException { _kafkaTestHelper = new KafkaClusterTestBase(testClusterCount); } @BeforeSuite public void beforeSuite() { log.info("Process id = " + ManagementFactory.getRuntimeMXBean().getName()); _kafkaTestHelper.startCluster(); } @AfterSuite public void afterSuite() throws IOException { _kafkaTestHelper.stopCluster(); } @Test (enabled=false) public void testCluster() throws IOException, InterruptedException, KeeperException { int clusterCount = _kafkaTestHelper.getClusterCount(); Assert.assertEquals(clusterCount,testClusterCount); int zkPort = _kafkaTestHelper.getZookeeperPort(); String kafkaBrokerPortList = _kafkaTestHelper.getKafkaBrokerPortList().toString(); System.out.println("kafkaBrokerPortList : " + kafkaBrokerPortList); ZooKeeper zk = new ZooKeeper("localhost:"+zkPort, 10000, new ByPassWatcher()); List<Integer> brokerPortList = new ArrayList<Integer>(); List<String> ids = zk.getChildren("/brokers/ids", false); for (String id : ids) { String brokerInfo = new String(zk.getData("/brokers/ids/" + id, false, null)); JSONObject obj = new JSONObject(brokerInfo); int brokerPort = obj.getInt("port"); System.out.println(brokerPort); brokerPortList.add(brokerPort); } Assert.assertTrue(_kafkaTestHelper.getKafkaBrokerPortList().equals(brokerPortList)); } @Test (enabled=false) public void testTopicPartitionCreationCount() throws IOException, InterruptedException { String topic = "topicPartition4"; int clusterCount = _kafkaTestHelper.getClusterCount(); int partionCount = clusterCount/2; int zkPort = _kafkaTestHelper.getZookeeperPort(); Properties props = new Properties(); // Setting Topic Properties props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic); props.setProperty(KafkaWriterConfigurationKeys.REPLICATION_COUNT, String.valueOf(clusterCount)); props.setProperty(KafkaWriterConfigurationKeys.PARTITION_COUNT, String.valueOf(partionCount)); props.setProperty(KafkaWriterConfigurationKeys.CLUSTER_ZOOKEEPER, "localhost:"+zkPort); System.out.println(_kafkaTestHelper.getBootServersList()); // Setting Producer Properties props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX+"bootstrap.servers", _kafkaTestHelper.getBootServersList()); props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX+"value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); Kafka09DataWriter<String, String> kafka09DataWriter = new Kafka09DataWriter<>(props); String zookeeperConnect = "localhost:"+_kafkaTestHelper.getZookeeperPort(); int sessionTimeoutMs = 10 * 1000; int connectionTimeoutMs = 8 * 1000; // Note: You must initialize the ZkClient with ZKStringSerializer. If you don't, then // createTopic() will only seem to work (it will return without error). The topic will exist in // only ZooKeeper and will be returned when listing topics, but Kafka itself does not create the // topic. ZkClient zkClient = new ZkClient( zookeeperConnect, sessionTimeoutMs, connectionTimeoutMs, ZKStringSerializer$.MODULE$); boolean isSecureKafkaCluster = false; ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect), isSecureKafkaCluster); TopicMetadata metaData = AdminUtils.fetchTopicMetadataFromZk(topic,zkUtils); Assert.assertEquals(metaData.partitionsMetadata().size(), partionCount); } @Test (enabled=false) public void testLiveTopicPartitionCreationCount() throws IOException, InterruptedException { String liveClusterCount = System.getProperty("live.cluster.count"); String liveZookeeper = System.getProperty("live.zookeeper"); String liveBroker = System.getProperty("live.broker"); String topic = System.getProperty("live.newtopic"); String topicReplicationCount = System.getProperty("live.newtopic.replicationCount"); String topicPartitionCount = System.getProperty("live.newtopic.partitionCount"); if(StringUtils.isEmpty(liveClusterCount)){ Assert.assertTrue(true); return; } if(StringUtils.isEmpty(topicPartitionCount)){ int clusterCount = Integer.parseInt(liveClusterCount); clusterCount--; int partionCount = clusterCount/2; topicReplicationCount = String.valueOf(clusterCount); topicPartitionCount = String.valueOf(partionCount); } Properties props = new Properties(); // Setting Topic Properties props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic); props.setProperty(KafkaWriterConfigurationKeys.REPLICATION_COUNT, topicReplicationCount); props.setProperty(KafkaWriterConfigurationKeys.PARTITION_COUNT, topicPartitionCount ); props.setProperty(KafkaWriterConfigurationKeys.CLUSTER_ZOOKEEPER, liveZookeeper); // Setting Producer Properties props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX+"bootstrap.servers", liveBroker); props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX+"value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); Kafka09DataWriter<String, String> kafka09DataWriter = new Kafka09DataWriter<>(props); int sessionTimeoutMs = 10 * 1000; int connectionTimeoutMs = 8 * 1000; // Note: You must initialize the ZkClient with ZKStringSerializer. If you don't, then // createTopic() will only seem to work (it will return without error). The topic will exist in // only ZooKeeper and will be returned when listing topics, but Kafka itself does not create the // topic. ZkClient zkClient = new ZkClient( liveZookeeper, sessionTimeoutMs, connectionTimeoutMs, ZKStringSerializer$.MODULE$); boolean isSecureKafkaCluster = false; ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(liveZookeeper), isSecureKafkaCluster); TopicMetadata metaData = AdminUtils.fetchTopicMetadataFromZk(topic,zkUtils); Assert.assertEquals(metaData.partitionsMetadata().size(), Integer.parseInt(topicPartitionCount)); } }
3,613
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/kafka
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/kafka/writer/ByPassWatcher.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.kafka.writer; import org.apache.zookeeper.WatchedEvent; import org.apache.zookeeper.Watcher; public class ByPassWatcher implements Watcher { @Override public void process(WatchedEvent event) { // TODO Auto-generated method stub } }
3,614
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/kafka
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/kafka/client/Kafka09ConsumerClientTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.kafka.client; import java.util.Arrays; import java.util.HashMap; import java.util.Set; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.MockConsumer; import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.common.TopicPartition; import org.testng.Assert; import org.testng.annotations.Test; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Sets; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.source.extractor.extract.kafka.KafkaPartition; import org.apache.gobblin.kafka.client.Kafka09ConsumerClient.Kafka09ConsumerRecord; public class Kafka09ConsumerClientTest { @Test public void testConsume() throws Exception { Config testConfig = ConfigFactory.parseMap(ImmutableMap.of(ConfigurationKeys.KAFKA_BROKERS, "test")); MockConsumer<String, String> consumer = new MockConsumer<String, String>(OffsetResetStrategy.NONE); consumer.assign(Arrays.asList(new TopicPartition("test_topic", 0))); HashMap<TopicPartition, Long> beginningOffsets = new HashMap<>(); beginningOffsets.put(new TopicPartition("test_topic", 0), 0L); consumer.updateBeginningOffsets(beginningOffsets); ConsumerRecord<String, String> record0 = new ConsumerRecord<>("test_topic", 0, 0L, "key", "value0"); ConsumerRecord<String, String> record1 = new ConsumerRecord<>("test_topic", 0, 1L, "key", "value1"); ConsumerRecord<String, String> record2 = new ConsumerRecord<>("test_topic", 0, 2L, "key", "value2"); consumer.addRecord(record0); consumer.addRecord(record1); consumer.addRecord(record2); try (Kafka09ConsumerClient<String, String> kafka09Client = new Kafka09ConsumerClient<>(testConfig, consumer);) { // Consume from 0 offset Set<KafkaConsumerRecord> consumedRecords = Sets.newHashSet(kafka09Client.consume(new KafkaPartition.Builder().withId(0).withTopicName("test_topic") .build(), 0l, 100l)); Set<Kafka09ConsumerRecord<String, String>> expected = ImmutableSet.<Kafka09ConsumerRecord<String, String>> of(new Kafka09ConsumerRecord<>(record0), new Kafka09ConsumerRecord<>(record1), new Kafka09ConsumerRecord<>(record2)); Assert.assertEquals(consumedRecords, expected); } } }
3,615
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/service/StreamingKafkaSpecExecutorTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service; import java.io.File; import java.io.IOException; import java.lang.management.ManagementFactory; import java.net.URI; import java.util.List; import java.util.Map; import java.util.Properties; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.Pair; import org.testng.Assert; import org.testng.annotations.AfterSuite; import org.testng.annotations.BeforeClass; import org.testng.annotations.BeforeSuite; import org.testng.annotations.Test; import com.google.common.io.Closer; import com.typesafe.config.Config; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.kafka.KafkaTestBase; import org.apache.gobblin.kafka.client.Kafka09ConsumerClient; import org.apache.gobblin.kafka.writer.KafkaWriterConfigurationKeys; import org.apache.gobblin.runtime.api.JobSpec; import org.apache.gobblin.runtime.api.Spec; import org.apache.gobblin.runtime.api.SpecExecutor; import org.apache.gobblin.runtime.job_catalog.NonObservingFSJobCatalog; import org.apache.gobblin.runtime.job_monitor.KafkaJobMonitor; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.writer.WriteResponse; @Slf4j public class StreamingKafkaSpecExecutorTest extends KafkaTestBase { public static final String TOPIC = StreamingKafkaSpecExecutorTest.class.getSimpleName(); private Closer _closer; private Properties _properties; private SimpleKafkaSpecProducer _seip; private StreamingKafkaSpecConsumer _seic; private NonObservingFSJobCatalog _jobCatalog; private String _kafkaBrokers; private static final String _TEST_DIR_PATH = "/tmp/StreamingKafkaSpecExecutorTest"; private static final String _JOBS_DIR_PATH = _TEST_DIR_PATH + "/jobs"; String flowSpecUriString = "/flowgroup/flowname/spec"; Spec flowSpec = initJobSpecWithFlowExecutionId(flowSpecUriString, "12345"); String specUriString = "/foo/bar/spec"; Spec spec = initJobSpec(specUriString); @BeforeSuite public void beforeSuite() { log.info("Process id = " + ManagementFactory.getRuntimeMXBean().getName()); startServers(); } public StreamingKafkaSpecExecutorTest() throws InterruptedException, RuntimeException { super(); _kafkaBrokers = "localhost:" + this.getKafkaServerPort(); log.info("Going to use Kakfa broker: " + _kafkaBrokers); cleanupTestDir(); } private void cleanupTestDir() { File testDir = new File(_TEST_DIR_PATH); if (testDir.exists()) { try { FileUtils.deleteDirectory(testDir); } catch (IOException e) { throw new RuntimeException("Could not delete test directory", e); } } } @BeforeClass public void setup() throws Exception { _closer = Closer.create(); _properties = new Properties(); // Properties for Producer _properties.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, TOPIC); _properties.setProperty("spec.kafka.dataWriterClass", "org.apache.gobblin.kafka.writer.Kafka09DataWriter"); _properties.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "bootstrap.servers", _kafkaBrokers); _properties.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX+"value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer"); // Properties for Consumer _properties.setProperty(KafkaJobMonitor.KAFKA_JOB_MONITOR_PREFIX + "." + ConfigurationKeys.KAFKA_BROKERS, _kafkaBrokers); _properties.setProperty(KafkaJobMonitor.KAFKA_JOB_MONITOR_PREFIX + "." + Kafka09ConsumerClient.GOBBLIN_CONFIG_VALUE_DESERIALIZER_CLASS_KEY, "org.apache.kafka.common.serialization.ByteArrayDeserializer"); _properties.setProperty(SimpleKafkaSpecExecutor.SPEC_KAFKA_TOPICS_KEY, TOPIC); _properties.setProperty("gobblin.cluster.jobconf.fullyQualifiedPath", _JOBS_DIR_PATH); _properties.setProperty(KafkaJobMonitor.KAFKA_JOB_MONITOR_PREFIX + "." + Kafka09ConsumerClient.CONFIG_PREFIX + Kafka09ConsumerClient.CONSUMER_CONFIG + ".auto.offset.reset", "earliest"); Config config = ConfigUtils.propertiesToConfig(_properties); // SEI Producer _seip = _closer.register(new SimpleKafkaSpecProducer(config)); _jobCatalog = new NonObservingFSJobCatalog(config.getConfig("gobblin.cluster")); _jobCatalog.startAsync().awaitRunning(); // SEI Consumer _seic = _closer.register(new StreamingKafkaSpecConsumer(config, _jobCatalog)); _seic.startAsync().awaitRunning(); } @Test public void testAddSpec() throws Exception { WriteResponse writeResponse = (WriteResponse) _seip.addSpec(spec).get(); log.info("WriteResponse: " + writeResponse); List<Pair<SpecExecutor.Verb, Spec>> consumedEvent = _seic.changedSpecs().get(); Assert.assertTrue(consumedEvent.size() == 1, "Consumption did not match production"); Map.Entry<SpecExecutor.Verb, Spec> consumedSpecAction = consumedEvent.get(0); Assert.assertTrue(consumedSpecAction.getKey().equals(SpecExecutor.Verb.ADD), "Verb did not match"); Assert.assertTrue(consumedSpecAction.getValue().getUri().toString().equals(specUriString), "Expected URI did not match"); Assert.assertTrue(consumedSpecAction.getValue() instanceof JobSpec, "Expected JobSpec"); } @Test (dependsOnMethods = "testAddSpec") public void testUpdateSpec() throws Exception { // update is only treated as an update for existing job specs WriteResponse writeResponse = (WriteResponse) _seip.updateSpec(spec).get(); log.info("WriteResponse: " + writeResponse); List<Pair<SpecExecutor.Verb, Spec>> consumedEvent = _seic.changedSpecs().get(); Assert.assertTrue(consumedEvent.size() == 1, "Consumption did not match production"); Map.Entry<SpecExecutor.Verb, Spec> consumedSpecAction = consumedEvent.get(0); Assert.assertTrue(consumedSpecAction.getKey().equals(SpecExecutor.Verb.UPDATE), "Verb did not match"); Assert.assertTrue(consumedSpecAction.getValue().getUri().toString().equals(specUriString), "Expected URI did not match"); Assert.assertTrue(consumedSpecAction.getValue() instanceof JobSpec, "Expected JobSpec"); } @Test (dependsOnMethods = "testUpdateSpec") public void testDeleteSpec() throws Exception { // delete needs to be on a job spec that exists to get notification WriteResponse writeResponse = (WriteResponse) _seip.deleteSpec(new URI(specUriString)).get(); log.info("WriteResponse: " + writeResponse); List<Pair<SpecExecutor.Verb, Spec>> consumedEvent = _seic.changedSpecs().get(); Assert.assertTrue(consumedEvent.size() == 1, "Consumption did not match production"); Map.Entry<SpecExecutor.Verb, Spec> consumedSpecAction = consumedEvent.get(0); Assert.assertTrue(consumedSpecAction.getKey().equals(SpecExecutor.Verb.DELETE), "Verb did not match"); Assert.assertTrue(consumedSpecAction.getValue().getUri().toString().equals(specUriString), "Expected URI did not match"); Assert.assertTrue(consumedSpecAction.getValue() instanceof JobSpec, "Expected JobSpec"); } @Test(dependsOnMethods = "testDeleteSpec") public void testCancelSpec() throws Exception { // Cancel an existing spec that was added _seip.addSpec(spec).get(); WriteResponse writeResponse = (WriteResponse) _seip.cancelJob(new URI(specUriString), new Properties()).get(); log.info("WriteResponse: " + writeResponse); // Wait for the cancellation to be processed Thread.sleep(5000); List<Pair<SpecExecutor.Verb, Spec>> consumedEvent = _seic.changedSpecs().get(); Assert.assertTrue(consumedEvent.size() == 3, "Consumption did not match production"); Map.Entry<SpecExecutor.Verb, Spec> consumedSpecAction = consumedEvent.get(2); log.info(consumedSpecAction.getKey().toString()); Assert.assertTrue(consumedEvent.get(0).getKey().equals(SpecExecutor.Verb.ADD), "Verb did not match"); Assert.assertTrue(consumedEvent.get(1).getKey().equals(SpecExecutor.Verb.DELETE), "Verb did not match"); Assert.assertTrue(consumedSpecAction.getKey().equals(SpecExecutor.Verb.CANCEL), "Verb did not match"); Assert.assertTrue(consumedSpecAction.getValue().getUri().toString().equals(specUriString), "Expected URI did not match"); Assert.assertTrue(consumedSpecAction.getValue() instanceof JobSpec, "Expected JobSpec"); } @Test (dependsOnMethods = "testCancelSpec") public void testCancelSpecNoopDefault() throws Exception { _seip.addSpec(flowSpec).get(); Properties props = new Properties(); props.setProperty(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, "54321"); // Does not match with added jobspec, so should not cancel job WriteResponse writeResponse = (WriteResponse) _seip.cancelJob(new URI(flowSpecUriString), props).get(); log.info("WriteResponse: " + writeResponse); // Wait for the cancellation to be processed, but it should ignore the spec as flow execution IDs do not match Thread.sleep(5000); List<Pair<SpecExecutor.Verb, Spec>> consumedEvent = _seic.changedSpecs().get(); Assert.assertTrue(consumedEvent.size() == 1, "Consumption did not match production"); Map.Entry<SpecExecutor.Verb, Spec> consumedSpecAction = consumedEvent.get(0); Assert.assertTrue(consumedSpecAction.getKey().equals(SpecExecutor.Verb.ADD), "Verb did not match"); Assert.assertTrue(consumedSpecAction.getValue().getUri().toString().equals(flowSpecUriString), "Expected URI did not match"); Assert.assertTrue(consumedSpecAction.getValue() instanceof JobSpec, "Expected JobSpec"); _seip.cancelJob(new URI(flowSpecUriString), new Properties()).get(); Thread.sleep(5000); consumedEvent = _seic.changedSpecs().get(); Assert.assertTrue(consumedEvent.size() == 2, "Should emit cancellation event if no flow ID provided"); consumedSpecAction = consumedEvent.get(1); Assert.assertTrue(consumedEvent.get(0).getKey().equals(SpecExecutor.Verb.DELETE), "Verb did not match"); Assert.assertTrue(consumedSpecAction.getKey().equals(SpecExecutor.Verb.CANCEL), "Verb did not match"); Assert.assertTrue(consumedSpecAction.getValue().getUri().toString().equals(flowSpecUriString), "Expected URI did not match"); Assert.assertTrue(consumedSpecAction.getValue() instanceof JobSpec, "Expected JobSpec"); } @Test(dependsOnMethods = "testCancelSpecNoopDefault") public void testCancelSpecWithFlowExecutionId() throws Exception { _seip.addSpec(flowSpec).get(); Properties props = new Properties(); props.setProperty(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, "12345"); WriteResponse writeResponse = (WriteResponse) _seip.cancelJob(new URI(flowSpecUriString), props).get(); log.info("WriteResponse: " + writeResponse); // Wait for the cancellation to be processed Thread.sleep(5000); List<Pair<SpecExecutor.Verb, Spec>> consumedEvent = _seic.changedSpecs().get(); Assert.assertTrue(consumedEvent.size() == 3, "Consumption did not match production"); Map.Entry<SpecExecutor.Verb, Spec> consumedSpecAction = consumedEvent.get(2); log.info(consumedSpecAction.getKey().toString()); Assert.assertTrue(consumedEvent.get(0).getKey().equals(SpecExecutor.Verb.ADD), "Verb did not match"); Assert.assertTrue(consumedEvent.get(1).getKey().equals(SpecExecutor.Verb.DELETE), "Verb did not match"); Assert.assertTrue(consumedSpecAction.getKey().equals(SpecExecutor.Verb.CANCEL), "Verb did not match"); Assert.assertTrue(consumedSpecAction.getValue().getUri().toString().equals(flowSpecUriString), "Expected URI did not match"); Assert.assertTrue(consumedSpecAction.getValue() instanceof JobSpec, "Expected JobSpec"); } private static JobSpec initJobSpec(String specUri) { Properties properties = new Properties(); return JobSpec.builder(specUri) .withConfig(ConfigUtils.propertiesToConfig(properties)) .withVersion("1") .withDescription("Spec Description") .build(); } private static JobSpec initJobSpecWithFlowExecutionId(String specUri, String flowExecutionId) { Properties properties = new Properties(); properties.setProperty(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, flowExecutionId); return JobSpec.builder(specUri) .withConfig(ConfigUtils.propertiesToConfig(properties)) .withVersion("1") .withDescription("Spec Description") .build(); } @AfterSuite public void after() { try { _closer.close(); } catch(Exception e) { log.error("Failed to close SEIC and SEIP.", e); } try { close(); } catch(Exception e) { log.error("Failed to close Kafka server.", e); } if (_jobCatalog != null) { _jobCatalog.stopAsync().awaitTerminated(); } cleanupTestDir(); } }
3,616
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/main/java/org/apache/gobblin/metrics
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/main/java/org/apache/gobblin/metrics/kafka/KafkaKeyValueProducerPusher.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.metrics.kafka; import java.io.IOException; import java.util.List; import java.util.Properties; import org.apache.commons.lang3.tuple.Pair; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.serialization.ByteArraySerializer; import org.apache.kafka.common.serialization.StringSerializer; import com.google.common.base.Optional; import com.google.common.io.Closer; import com.typesafe.config.Config; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.util.ConfigUtils; /** * Establishes a connection to a Kafka cluster and push keyed messages to a specified topic. * @param <K> key type * @param <V> value type */ @Slf4j public class KafkaKeyValueProducerPusher<K, V> implements Pusher<Pair<K, V>> { private final String topic; private final KafkaProducer<K, V> producer; private final Closer closer; public KafkaKeyValueProducerPusher(String brokers, String topic, Optional<Config> kafkaConfig) { this.closer = Closer.create(); this.topic = topic; Properties props = new Properties(); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers); props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); props.put(ProducerConfig.ACKS_CONFIG, "all"); props.put(ProducerConfig.RETRIES_CONFIG, 3); //To guarantee ordered delivery, the maximum in flight requests must be set to 1. props.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, 1); // add the kafka scoped config. if any of the above are specified then they are overridden if (kafkaConfig.isPresent()) { props.putAll(ConfigUtils.configToProperties(kafkaConfig.get())); } this.producer = createProducer(props); } public KafkaKeyValueProducerPusher(String brokers, String topic) { this(brokers, topic, Optional.absent()); } /** * Push all keyed messages to the Kafka topic. * @param messages List of keyed messages to push to Kakfa. */ public void pushMessages(List<Pair<K, V>> messages) { for (Pair<K, V> message: messages) { this.producer.send(new ProducerRecord<>(topic, message.getKey(), message.getValue()), (recordMetadata, e) -> { if (e != null) { log.error("Failed to send message to topic {} due to exception: ", topic, e); } }); } } @Override public void close() throws IOException { //Call flush() before invoking close() to ensure any buffered messages are immediately sent. This is required //since close() only guarantees delivery of in-flight messages. log.info("Flushing records from producer buffer"); this.producer.flush(); this.closer.close(); } /** * Create the Kafka producer. */ protected KafkaProducer<K, V> createProducer(Properties props) { return this.closer.register(new KafkaProducer<K, V>(props)); } }
3,617
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/main/java/org/apache/gobblin/metrics
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/main/java/org/apache/gobblin/metrics/kafka/KafkaProducerPusher.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.metrics.kafka; import java.io.IOException; import java.util.List; import java.util.Properties; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.serialization.ByteArraySerializer; import org.apache.kafka.common.serialization.StringSerializer; import com.google.common.base.Optional; import com.google.common.io.Closer; import com.typesafe.config.Config; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.util.ConfigUtils; /** * Establish a connection to a Kafka cluster and push byte messages to a specified topic. */ @Slf4j public class KafkaProducerPusher implements Pusher<byte[]> { private final String topic; private final KafkaProducer<String, byte[]> producer; private final Closer closer; public KafkaProducerPusher(String brokers, String topic, Optional<Config> kafkaConfig) { this.closer = Closer.create(); this.topic = topic; Properties props = new Properties(); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers); props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); props.put(ProducerConfig.ACKS_CONFIG, "all"); props.put(ProducerConfig.RETRIES_CONFIG, 3); // add the kafka scoped config. if any of the above are specified then they are overridden if (kafkaConfig.isPresent()) { props.putAll(ConfigUtils.configToProperties(kafkaConfig.get())); } this.producer = createProducer(props); } public KafkaProducerPusher(String brokers, String topic) { this(brokers, topic, Optional.absent()); } /** * Push all byte array messages to the Kafka topic. * @param messages List of byte array messages to push to Kakfa. */ public void pushMessages(List<byte[]> messages) { for (byte[] message: messages) { producer.send(new ProducerRecord<>(topic, message), (recordMetadata, e) -> { if (e != null) { log.error("Failed to send message to topic {} due to exception: ", topic, e); } }); } } @Override public void close() throws IOException { //Call flush() before invoking close() to ensure any buffered messages are immediately sent. This is required //since close() only guarantees delivery of in-flight messages. log.info("Flushing records from producer buffer"); this.producer.flush(); this.closer.close(); } /** * Create the Kafka producer. */ protected KafkaProducer<String, byte[]> createProducer(Properties props) { return this.closer.register(new KafkaProducer<String, byte[]>(props)); } }
3,618
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/main/java/org/apache/gobblin/source/extractor/extract
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaSimpleStreamingSource.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.extractor.extract.kafka; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Properties; import org.apache.commons.lang.StringUtils; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.common.PartitionInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.base.Throwables; import com.google.common.io.Closer; import com.typesafe.config.Config; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.SourceState; import org.apache.gobblin.configuration.State; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.source.extractor.Extractor; import org.apache.gobblin.stream.RecordEnvelope; import org.apache.gobblin.source.extractor.extract.EventBasedSource; import org.apache.gobblin.source.workunit.Extract; import org.apache.gobblin.source.workunit.WorkUnit; import org.apache.gobblin.util.ConfigUtils; /** * A {@link EventBasedSource} implementation for a simple streaming kafka extractor. * * @author Shrikanth Shankar * */ public class KafkaSimpleStreamingSource<S, D> extends EventBasedSource<S, RecordEnvelope<D>> { private static final Logger LOG = LoggerFactory.getLogger(KafkaSimpleStreamingSource.class); /** * This is the topic name used by this source . Currently only supports singleton. */ public static final String TOPIC_WHITELIST = "gobblin.streaming.kafka.topic.singleton"; /** * Deserializer to be used by this source. */ public static final String TOPIC_KEY_DESERIALIZER = "gobblin.streaming.kafka.topic.key.deserializer"; public static final String TOPIC_VALUE_DESERIALIZER = "gobblin.streaming.kafka.topic.value.deserializer"; public static final String KAFKA_CONSUMER_CONFIG_PREFIX = "gobblin.streaming.kafka.consumerConfig"; /** * Private config keys used to pass data into work unit state */ private static final String TOPIC_NAME = KafkaSource.TOPIC_NAME; private static final String PARTITION_ID = KafkaSource.PARTITION_ID; public static String getTopicNameFromState(State s) { return s.getProp(TOPIC_NAME); } public static int getPartitionIdFromState(State s) { return s.getPropAsInt(PARTITION_ID); } public static void setTopicNameInState(State s, String topic) { s.setProp(TOPIC_NAME, topic); } public static void setPartitionId(State s, int partitionId) { s.setProp(PARTITION_ID, partitionId); } private final Closer closer = Closer.create(); public static final Extract.TableType DEFAULT_TABLE_TYPE = Extract.TableType.APPEND_ONLY; public static final String DEFAULT_NAMESPACE_NAME = "KAFKA"; static public Consumer getKafkaConsumer(Config config) { List<String> brokers = ConfigUtils.getStringList(config, ConfigurationKeys.KAFKA_BROKERS); Properties props = new Properties(); props.put("bootstrap.servers", Joiner.on(",").join(brokers)); props.put("group.id", ConfigUtils.getString(config, ConfigurationKeys.JOB_NAME_KEY, StringUtils.EMPTY)); props.put("enable.auto.commit", "false"); Preconditions.checkArgument(config.hasPath(TOPIC_KEY_DESERIALIZER)); props.put("key.deserializer", config.getString(TOPIC_KEY_DESERIALIZER)); Preconditions.checkArgument(config.hasPath(TOPIC_VALUE_DESERIALIZER)); props.put("value.deserializer", config.getString(TOPIC_VALUE_DESERIALIZER)); // pass along any config scoped under source.kafka.config // one use case of this is to pass SSL configuration Config scopedConfig = ConfigUtils.getConfigOrEmpty(config, KAFKA_CONSUMER_CONFIG_PREFIX); props.putAll(ConfigUtils.configToProperties(scopedConfig)); Consumer consumer = null; try { consumer = new KafkaConsumer<>(props); } catch (Exception e) { LOG.error("Exception when creating Kafka consumer - {}", e); throw Throwables.propagate(e); } return consumer; } @Override public List<WorkUnit> getWorkunits(SourceState state) { Config config = ConfigUtils.propertiesToConfig(state.getProperties()); Consumer<String, byte[]> consumer = getKafkaConsumer(config); LOG.debug("Consumer is {}", consumer); String topic = ConfigUtils.getString(config, TOPIC_WHITELIST, StringUtils.EMPTY); // TODO: fix this to use the new API when KafkaWrapper is fixed List<WorkUnit> workUnits = new ArrayList<WorkUnit>(); List<PartitionInfo> topicPartitions; topicPartitions = consumer.partitionsFor(topic); LOG.info("Partition count is {}", topicPartitions.size()); for (PartitionInfo topicPartition : topicPartitions) { Extract extract = this.createExtract(DEFAULT_TABLE_TYPE, DEFAULT_NAMESPACE_NAME, topicPartition.topic()); LOG.info("Partition info is {}", topicPartition); WorkUnit workUnit = WorkUnit.create(extract); setTopicNameInState(workUnit, topicPartition.topic()); workUnit.setProp(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY, topicPartition.topic()); setPartitionId(workUnit, topicPartition.partition()); workUnits.add(workUnit); } return workUnits; } @Override public Extractor getExtractor(WorkUnitState state) throws IOException { return new KafkaSimpleStreamingExtractor<S, D>(state); } }
3,619
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/main/java/org/apache/gobblin/source/extractor/extract
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaSimpleStreamingExtractor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.extractor.extract.kafka; import java.io.IOException; import java.nio.channels.ClosedChannelException; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.common.TopicPartition; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Optional; import com.google.common.base.Preconditions; import com.google.gson.JsonElement; import org.apache.gobblin.configuration.State; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.kafka.client.AbstractBaseKafkaConsumerClient; import org.apache.gobblin.metrics.Tag; import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry; import org.apache.gobblin.metrics.kafka.SchemaRegistryException; import org.apache.gobblin.source.extractor.CheckpointableWatermark; import org.apache.gobblin.source.extractor.ComparableWatermark; import org.apache.gobblin.source.extractor.DataRecordException; import org.apache.gobblin.stream.RecordEnvelope; import org.apache.gobblin.source.extractor.StreamingExtractor; import org.apache.gobblin.source.extractor.Watermark; import org.apache.gobblin.source.extractor.WatermarkSerializerHelper; import org.apache.gobblin.source.extractor.extract.EventBasedExtractor; import org.apache.gobblin.source.extractor.extract.LongWatermark; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.writer.WatermarkStorage; import lombok.ToString; /** * An implementation of {@link StreamingExtractor} which reads from Kafka and returns records . Type of record depends on deserializer set. * * @author Shrikanth Shankar * * */ public class KafkaSimpleStreamingExtractor<S, D> extends EventBasedExtractor<S, D> implements StreamingExtractor<S, D> { private static final Logger LOG = LoggerFactory.getLogger(KafkaSimpleStreamingExtractor.class); private AtomicBoolean _isStarted = new AtomicBoolean(false); @Override public void start(WatermarkStorage watermarkStorage) throws IOException { Preconditions.checkArgument(watermarkStorage != null, "Watermark Storage should not be null"); Map<String, CheckpointableWatermark> watermarkMap = watermarkStorage.getCommittedWatermarks(KafkaWatermark.class, Collections.singletonList(_partition.toString())); KafkaWatermark watermark = (KafkaWatermark) watermarkMap.get(_partition.toString()); if (watermark == null) { LOG.info("Offset is null - seeking to beginning of topic and partition for {} ", _partition.toString()); _consumer.seekToBeginning(_partition); } else { // seek needs to go one past the last committed offset LOG.info("Offset found in consumer for partition {}. Seeking to one past what we found : {}", _partition.toString(), watermark.getLwm().getValue() + 1); _consumer.seek(_partition, watermark.getLwm().getValue() + 1); } _isStarted.set(true); } @ToString public static class KafkaWatermark implements CheckpointableWatermark { TopicPartition _topicPartition; LongWatermark _lwm; @VisibleForTesting public KafkaWatermark(TopicPartition topicPartition, LongWatermark lwm) { _topicPartition = topicPartition; _lwm = lwm; } @Override public String getSource() { return _topicPartition.toString(); } @Override public ComparableWatermark getWatermark() { return _lwm; } @Override public short calculatePercentCompletion(Watermark lowWatermark, Watermark highWatermark) { return 0; } @Override public JsonElement toJson() { return WatermarkSerializerHelper.convertWatermarkToJson(this); } @Override public int compareTo(CheckpointableWatermark o) { Preconditions.checkArgument(o instanceof KafkaWatermark); KafkaWatermark ko = (KafkaWatermark) o; Preconditions.checkArgument(_topicPartition.equals(ko._topicPartition)); return _lwm.compareTo(ko._lwm); } @Override public boolean equals(Object obj) { if (obj == null) { return false; } if (!(obj instanceof KafkaWatermark)) { return false; } return this.compareTo((CheckpointableWatermark) obj) == 0; } @Override public int hashCode() { final int prime = 31; return _topicPartition.hashCode() * prime + _lwm.hashCode(); } public TopicPartition getTopicPartition() { return _topicPartition; } public LongWatermark getLwm() { return _lwm; } } private final Consumer<S, D> _consumer; private final TopicPartition _partition; private Iterator<ConsumerRecord<S, D>> _records; AtomicLong _rowCount = new AtomicLong(0); protected final Optional<KafkaSchemaRegistry<String, S>> _schemaRegistry; protected AtomicBoolean _close = new AtomicBoolean(false); private final long fetchTimeOut; public KafkaSimpleStreamingExtractor(WorkUnitState state) { super(state); _consumer = KafkaSimpleStreamingSource.getKafkaConsumer(ConfigUtils.propertiesToConfig(state.getProperties())); closer.register(_consumer); _partition = new TopicPartition(KafkaSimpleStreamingSource.getTopicNameFromState(state), KafkaSimpleStreamingSource.getPartitionIdFromState(state)); _consumer.assign(Collections.singletonList(_partition)); this._schemaRegistry = state.contains(KafkaSchemaRegistry.KAFKA_SCHEMA_REGISTRY_CLASS) ? Optional .of(KafkaSchemaRegistry.<String, S>get(state.getProperties())) : Optional.<KafkaSchemaRegistry<String, S>>absent(); this.fetchTimeOut = state.getPropAsLong(AbstractBaseKafkaConsumerClient.CONFIG_KAFKA_FETCH_TIMEOUT_VALUE, AbstractBaseKafkaConsumerClient.CONFIG_KAFKA_FETCH_TIMEOUT_VALUE_DEFAULT); } /** * Get the schema (metadata) of the extracted data records. * * @return the schema of Kafka topic being extracted * @throws IOException if there is problem getting the schema */ @Override public S getSchema() throws IOException { try { if (_schemaRegistry.isPresent()) { return _schemaRegistry.get().getLatestSchemaByTopic(this._partition.topic()); } } catch (SchemaRegistryException e) { throw new RuntimeException(e); } return ((S) this._partition.topic()); } @Override public List<Tag<?>> generateTags(State state) { List<Tag<?>> tags = super.generateTags(state); tags.add(new Tag<>("kafkaTopic", state.getProp(KafkaSimpleStreamingSource.TOPIC_WHITELIST))); return tags; } /** * Return the next record when available. Will never time out since this is a streaming source. */ @Override public RecordEnvelope<D> readRecordEnvelopeImpl() throws DataRecordException, IOException { if (!_isStarted.get()) { throw new IOException("Streaming extractor has not been started."); } while ((_records == null) || (!_records.hasNext())) { synchronized (_consumer) { if (_close.get()) { throw new ClosedChannelException(); } _records = _consumer.poll(this.fetchTimeOut).iterator(); } } ConsumerRecord<S, D> record = _records.next(); _rowCount.getAndIncrement(); return new RecordEnvelope<D>(record.value(), new KafkaWatermark(_partition, new LongWatermark(record.offset()))); } @Override public long getExpectedRecordCount() { return _rowCount.get(); } @Override public void close() throws IOException { _close.set(true); _consumer.wakeup(); synchronized (_consumer) { closer.close(); } } @Deprecated @Override public long getHighWatermark() { return 0; } }
3,620
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/main/java/org/apache/gobblin/source/extractor/extract
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/Kafka09JsonSource.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.extractor.extract.kafka; import java.io.IOException; import java.util.List; import org.apache.commons.lang.StringUtils; import org.apache.kafka.common.serialization.Deserializer; import com.google.gson.JsonArray; import com.google.gson.JsonObject; import com.google.gson.JsonParser; import org.apache.gobblin.configuration.SourceState; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.kafka.client.ByteArrayBasedKafkaRecord; import org.apache.gobblin.kafka.client.Kafka09ConsumerClient; import org.apache.gobblin.kafka.serialize.GsonDeserializerBase; import org.apache.gobblin.source.extractor.Extractor; import org.apache.gobblin.source.workunit.WorkUnit; /** * A {@link KafkaSource} that reads kafka record as {@link JsonObject} */ public class Kafka09JsonSource extends KafkaSource<JsonArray, JsonObject> { @Override public List<WorkUnit> getWorkunits(SourceState state) { if (!state.contains(Kafka09ConsumerClient.GOBBLIN_CONFIG_VALUE_DESERIALIZER_CLASS_KEY)) { state.setProp(Kafka09ConsumerClient.GOBBLIN_CONFIG_VALUE_DESERIALIZER_CLASS_KEY, KafkaGsonDeserializer.class.getName()); } return super.getWorkunits(state); } @Override public Extractor<JsonArray, JsonObject> getExtractor(WorkUnitState state) throws IOException { return new JsonExtractor(state); } static final class JsonExtractor extends KafkaExtractor<JsonArray, JsonObject> { private static final String JSON_SCHEMA = "source.kafka.json.schema"; private static final JsonParser JSON_PARSER = new JsonParser(); private final JsonArray schema; JsonExtractor(WorkUnitState state) { super(state); String schemaStr = state.getProp(JSON_SCHEMA); if (StringUtils.isEmpty(schemaStr)) { throw new RuntimeException("Missing configuration: " + JSON_SCHEMA); } this.schema = JSON_PARSER.parse(schemaStr).getAsJsonArray(); } @Override public JsonArray getSchema() throws IOException { return schema; } @Override protected JsonObject decodeRecord(ByteArrayBasedKafkaRecord kafkaConsumerRecord) throws IOException { throw new UnsupportedOperationException(); } } /** * A specific kafka {@link Deserializer} that deserializes record as JasonObject */ public static final class KafkaGsonDeserializer extends GsonDeserializerBase<JsonObject> implements Deserializer<JsonObject> { } }
3,621
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/main/java/org/apache/gobblin/kafka
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/main/java/org/apache/gobblin/kafka/serialize/LiAvroSerializer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.kafka.serialize; import org.apache.avro.generic.GenericRecord; import org.apache.kafka.common.errors.SerializationException; import org.apache.kafka.common.serialization.Serializer; /** * LinkedIn's implementation of Avro-schema based serialization for Kafka * TODO: Implement this for IndexedRecord not just GenericRecord * */ public class LiAvroSerializer extends LiAvroSerializerBase implements Serializer<GenericRecord> { @Override public byte[] serialize(String topic, GenericRecord data) { try { return super.serialize(topic, data); } catch (org.apache.gobblin.kafka.serialize.SerializationException e) { throw new SerializationException(e); } } }
3,622
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/main/java/org/apache/gobblin/kafka
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/main/java/org/apache/gobblin/kafka/serialize/LiAvroDeserializer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.kafka.serialize; import org.apache.avro.Schema; import org.apache.avro.generic.GenericRecord; import org.apache.kafka.common.errors.SerializationException; import org.apache.kafka.common.serialization.Deserializer; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.kafka.schemareg.KafkaSchemaRegistry; /** * The LinkedIn Avro Deserializer (works with records serialized by the {@link LiAvroSerializer}) */ @Slf4j public class LiAvroDeserializer extends LiAvroDeserializerBase implements Deserializer<GenericRecord> { public LiAvroDeserializer(KafkaSchemaRegistry<MD5Digest, Schema> schemaRegistry) { super(schemaRegistry); } /** * * @param topic topic associated with the data * @param data serialized bytes * @return deserialized object */ @Override public GenericRecord deserialize(String topic, byte[] data) { try { return super.deserialize(topic, data); } catch (org.apache.gobblin.kafka.serialize.SerializationException e) { throw new SerializationException("Error during Deserialization", e); } } }
3,623
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/main/java/org/apache/gobblin/kafka
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/main/java/org/apache/gobblin/kafka/writer/Kafka09JsonObjectWriterBuilder.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.kafka.writer; import java.util.Properties; import org.apache.gobblin.configuration.ConfigurationException; import org.apache.gobblin.kafka.serialize.GsonSerializerBase; import org.apache.gobblin.writer.AsyncDataWriter; import org.apache.kafka.common.serialization.Serializer; import com.google.gson.JsonArray; import com.google.gson.JsonObject; /** * A {@link org.apache.gobblin.writer.DataWriterBuilder} that builds a {@link org.apache.gobblin.writer.DataWriter} to * write {@link JsonObject} to kafka */ public class Kafka09JsonObjectWriterBuilder extends AbstractKafkaDataWriterBuilder<JsonArray, JsonObject> { private static final String VALUE_SERIALIZER_KEY = KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + KafkaWriterConfigurationKeys.VALUE_SERIALIZER_CONFIG; @Override protected AsyncDataWriter<JsonObject> getAsyncDataWriter(Properties props) throws ConfigurationException { props.setProperty(VALUE_SERIALIZER_KEY, KafkaGsonObjectSerializer.class.getName()); return new Kafka09DataWriter<>(props); } /** * A specific {@link Serializer} that serializes {@link JsonObject} to byte array */ public final static class KafkaGsonObjectSerializer extends GsonSerializerBase<JsonObject> implements Serializer<JsonObject> { } }
3,624
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/main/java/org/apache/gobblin/kafka
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/main/java/org/apache/gobblin/kafka/writer/KafkaDataWriterBuilder.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.kafka.writer; import java.util.Properties; import org.apache.gobblin.configuration.ConfigurationException; import org.apache.gobblin.writer.AsyncDataWriter; /** * Builder that hands back a {@link Kafka09DataWriter} */ public class KafkaDataWriterBuilder<S, D> extends AbstractKafkaDataWriterBuilder<S, D> { @Override protected AsyncDataWriter<D> getAsyncDataWriter(Properties props) throws ConfigurationException { return new Kafka09DataWriter<>(props); } }
3,625
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/main/java/org/apache/gobblin/kafka
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/main/java/org/apache/gobblin/kafka/writer/Kafka09DataWriter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.kafka.writer; import java.io.IOException; import java.util.Properties; import java.util.concurrent.Future; import org.I0Itec.zkclient.ZkClient; import org.I0Itec.zkclient.ZkConnection; import org.apache.commons.lang3.tuple.Pair; import org.apache.kafka.clients.producer.Callback; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.RecordMetadata; import com.google.common.base.Throwables; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import kafka.admin.AdminUtils; import kafka.utils.ZKStringSerializer$; import kafka.utils.ZkUtils; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.ConfigurationException; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.writer.WriteCallback; import org.apache.gobblin.writer.WriteResponse; import org.apache.gobblin.writer.WriteResponseFuture; import org.apache.gobblin.writer.WriteResponseMapper; /** * Implementation of KafkaWriter that wraps a {@link KafkaProducer}. * This provides at-least once semantics. * Applications should expect data to be possibly written to Kafka even if the overall Gobblin job fails. * */ @Slf4j public class Kafka09DataWriter<K, V> implements KafkaDataWriter<K, V> { public static final WriteResponseMapper<RecordMetadata> WRITE_RESPONSE_WRAPPER = new WriteResponseMapper<RecordMetadata>() { @Override public WriteResponse wrap(final RecordMetadata recordMetadata) { return new WriteResponse<RecordMetadata>() { @Override public RecordMetadata getRawResponse() { return recordMetadata; } @Override public String getStringResponse() { return recordMetadata.toString(); } @Override public long bytesWritten() { return -1; } }; } }; private final Producer<K, V> producer; private final String topic; private final KafkaWriterCommonConfig commonConfig; public static Producer getKafkaProducer(Properties props) { Object producerObject = KafkaWriterHelper.getKafkaProducer(props); try { Producer producer = (Producer) producerObject; return producer; } catch (ClassCastException e) { log.error("Failed to instantiate Kafka producer " + producerObject.getClass().getName() + " as instance of Producer.class", e); throw Throwables.propagate(e); } } public Kafka09DataWriter(Properties props) throws ConfigurationException { this(getKafkaProducer(props), ConfigFactory.parseProperties(props)); } public Kafka09DataWriter(Producer producer, Config config) throws ConfigurationException { this.topic = config.getString(KafkaWriterConfigurationKeys.KAFKA_TOPIC); provisionTopic(topic,config); this.producer = producer; this.commonConfig = new KafkaWriterCommonConfig(config); } @Override public void close() throws IOException { log.debug("Close called"); this.producer.close(); } @Override public Future<WriteResponse> write(final V record, final WriteCallback callback) { try { Pair<K, V> keyValuePair = KafkaWriterHelper.getKeyValuePair(record, this.commonConfig); return write(keyValuePair, callback); } catch (Exception e) { throw new RuntimeException("Failed to create a Kafka write request", e); } } public Future<WriteResponse> write(Pair<K, V> keyValuePair, final WriteCallback callback) { try { return new WriteResponseFuture<>(this.producer .send(new ProducerRecord<>(topic, keyValuePair.getKey(), keyValuePair.getValue()), new Callback() { @Override public void onCompletion(final RecordMetadata metadata, Exception exception) { if (exception != null) { callback.onFailure(exception); } else { callback.onSuccess(WRITE_RESPONSE_WRAPPER.wrap(metadata)); } } }), WRITE_RESPONSE_WRAPPER); } catch (Exception e) { throw new RuntimeException("Failed to create a Kafka write request", e); } } @Override public void flush() throws IOException { this.producer.flush(); } private void provisionTopic(String topicName, Config config) { String zooKeeperPropKey = KafkaWriterConfigurationKeys.CLUSTER_ZOOKEEPER; if (!config.hasPath(zooKeeperPropKey)) { log.debug("Topic " + topicName + " is configured without the partition and replication"); return; } String zookeeperConnect = config.getString(zooKeeperPropKey); int sessionTimeoutMs = ConfigUtils.getInt(config, KafkaWriterConfigurationKeys.ZOOKEEPER_SESSION_TIMEOUT, KafkaWriterConfigurationKeys.ZOOKEEPER_SESSION_TIMEOUT_DEFAULT); int connectionTimeoutMs = ConfigUtils.getInt(config, KafkaWriterConfigurationKeys.ZOOKEEPER_CONNECTION_TIMEOUT, KafkaWriterConfigurationKeys.ZOOKEEPER_CONNECTION_TIMEOUT_DEFAULT); // Note: You must initialize the ZkClient with ZKStringSerializer. If you don't, then // createTopic() will only seem to work (it will return without error). The topic will exist in // only ZooKeeper and will be returned when listing topics, but Kafka itself does not create the // topic. ZkClient zkClient = new ZkClient(zookeeperConnect, sessionTimeoutMs, connectionTimeoutMs, ZKStringSerializer$.MODULE$); // Security for Kafka was added in Kafka 0.9.0.0 ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect), false); int partitions = ConfigUtils.getInt(config, KafkaWriterConfigurationKeys.PARTITION_COUNT, KafkaWriterConfigurationKeys.PARTITION_COUNT_DEFAULT); int replication = ConfigUtils.getInt(config, KafkaWriterConfigurationKeys.REPLICATION_COUNT, KafkaWriterConfigurationKeys.PARTITION_COUNT_DEFAULT); Properties topicConfig = new Properties(); if (AdminUtils.topicExists(zkUtils, topicName)) { log.debug("Topic {} already exists with replication: {} and partitions: {}", topicName, replication, partitions); boolean deleteTopicIfExists = ConfigUtils.getBoolean(config, KafkaWriterConfigurationKeys.DELETE_TOPIC_IF_EXISTS, KafkaWriterConfigurationKeys.DEFAULT_DELETE_TOPIC_IF_EXISTS); if (!deleteTopicIfExists) { return; } else { log.debug("Deleting topic {}", topicName); AdminUtils.deleteTopic(zkUtils, topicName); } } AdminUtils.createTopic(zkUtils, topicName, partitions, replication, topicConfig); log.info("Created topic {} with replication: {} and partitions : {}", topicName, replication, partitions); } }
3,626
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/main/java/org/apache/gobblin/kafka
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/main/java/org/apache/gobblin/kafka/client/Kafka09ConsumerClient.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.kafka.client; import java.io.IOException; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Properties; import java.util.stream.Collectors; import org.apache.gobblin.source.extractor.extract.LongWatermark; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.OffsetCommitCallback; import org.apache.kafka.clients.consumer.internals.NoOpConsumerRebalanceListener; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.metrics.KafkaMetric; import com.codahale.metrics.Gauge; import com.codahale.metrics.Metric; import com.google.common.base.Function; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.base.Throwables; import com.google.common.collect.FluentIterable; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterators; import com.google.common.collect.Lists; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import javax.annotation.Nonnull; import lombok.EqualsAndHashCode; import lombok.ToString; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.source.extractor.extract.kafka.KafkaOffsetRetrievalFailureException; import org.apache.gobblin.source.extractor.extract.kafka.KafkaPartition; import org.apache.gobblin.source.extractor.extract.kafka.KafkaTopic; import org.apache.gobblin.util.ConfigUtils; /** * A {@link GobblinKafkaConsumerClient} that uses kafka 09 consumer client. Use {@link Factory#create(Config)} to create * new Kafka09ConsumerClients. The {@link Config} used to create clients must have required key {@value #GOBBLIN_CONFIG_VALUE_DESERIALIZER_CLASS_KEY} * * @param <K> Message key type * @param <V> Message value type */ @Slf4j public class Kafka09ConsumerClient<K, V> extends AbstractBaseKafkaConsumerClient { private static final String KAFKA_09_CLIENT_BOOTSTRAP_SERVERS_KEY = "bootstrap.servers"; private static final String KAFKA_09_CLIENT_ENABLE_AUTO_COMMIT_KEY = "enable.auto.commit"; private static final String KAFKA_09_CLIENT_SESSION_TIMEOUT_KEY = "session.timeout.ms"; private static final String KAFKA_09_CLIENT_KEY_DESERIALIZER_CLASS_KEY = "key.deserializer"; private static final String KAFKA_09_CLIENT_VALUE_DESERIALIZER_CLASS_KEY = "value.deserializer"; private static final String KAFKA_09_CLIENT_GROUP_ID = "group.id"; private static final String KAFKA_09_DEFAULT_ENABLE_AUTO_COMMIT = Boolean.toString(false); public static final String KAFKA_09_DEFAULT_KEY_DESERIALIZER = "org.apache.kafka.common.serialization.StringDeserializer"; private static final String KAFKA_09_DEFAULT_GROUP_ID = "kafka09"; public static final String GOBBLIN_CONFIG_KEY_DESERIALIZER_CLASS_KEY = CONFIG_PREFIX + KAFKA_09_CLIENT_KEY_DESERIALIZER_CLASS_KEY; public static final String GOBBLIN_CONFIG_VALUE_DESERIALIZER_CLASS_KEY = CONFIG_PREFIX + KAFKA_09_CLIENT_VALUE_DESERIALIZER_CLASS_KEY; private static final Config FALLBACK = ConfigFactory.parseMap(ImmutableMap.<String, Object>builder() .put(KAFKA_09_CLIENT_ENABLE_AUTO_COMMIT_KEY, KAFKA_09_DEFAULT_ENABLE_AUTO_COMMIT) .put(KAFKA_09_CLIENT_KEY_DESERIALIZER_CLASS_KEY, KAFKA_09_DEFAULT_KEY_DESERIALIZER) .put(KAFKA_09_CLIENT_GROUP_ID, KAFKA_09_DEFAULT_GROUP_ID) .build()); private final Consumer<K, V> consumer; private Kafka09ConsumerClient(Config config) { super(config); Preconditions.checkArgument(config.hasPath(GOBBLIN_CONFIG_VALUE_DESERIALIZER_CLASS_KEY), "Missing required property " + GOBBLIN_CONFIG_VALUE_DESERIALIZER_CLASS_KEY); Properties props = new Properties(); props.put(KAFKA_09_CLIENT_BOOTSTRAP_SERVERS_KEY, Joiner.on(",").join(super.brokers)); props.put(KAFKA_09_CLIENT_SESSION_TIMEOUT_KEY, super.socketTimeoutMillis); // grab all the config under "source.kafka" and add the defaults as fallback. Config baseConfig = ConfigUtils.getConfigOrEmpty(config, CONFIG_NAMESPACE).withFallback(FALLBACK); // get the "source.kafka.consumerConfig" config for extra config to pass along to Kafka with a fallback to the // shared config that start with "gobblin.kafka.sharedConfig" Config specificConfig = ConfigUtils.getConfigOrEmpty(baseConfig, CONSUMER_CONFIG).withFallback( ConfigUtils.getConfigOrEmpty(config, ConfigurationKeys.SHARED_KAFKA_CONFIG_PREFIX)); // The specific config overrides settings in the base config Config scopedConfig = specificConfig.withFallback(baseConfig.withoutPath(CONSUMER_CONFIG)); props.putAll(ConfigUtils.configToProperties(scopedConfig)); this.consumer = new KafkaConsumer<>(props); } public Kafka09ConsumerClient(Config config, Consumer<K, V> consumer) { super(config); this.consumer = consumer; } @Override public List<KafkaTopic> getTopics() { return FluentIterable.from(this.consumer.listTopics().entrySet()) .transform(new Function<Entry<String, List<PartitionInfo>>, KafkaTopic>() { @Override public KafkaTopic apply(Entry<String, List<PartitionInfo>> filteredTopicEntry) { return new KafkaTopic(filteredTopicEntry.getKey(), Lists.transform(filteredTopicEntry.getValue(), PARTITION_INFO_TO_KAFKA_PARTITION)); } }).toList(); } @Override public long getEarliestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException { TopicPartition topicPartition = new TopicPartition(partition.getTopicName(), partition.getId()); this.consumer.assign(Collections.singletonList(topicPartition)); long previousPosition = this.consumer.position(topicPartition); this.consumer.seekToBeginning(topicPartition); long earliestOffset = this.consumer.position(topicPartition); this.consumer.seek(topicPartition, previousPosition); return earliestOffset; } @Override public long getLatestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException { TopicPartition topicPartition = new TopicPartition(partition.getTopicName(), partition.getId()); this.consumer.assign(Collections.singletonList(topicPartition)); long previousPosition = this.consumer.position(topicPartition); this.consumer.seekToEnd(topicPartition); long latestOffset = this.consumer.position(topicPartition); this.consumer.seek(topicPartition, previousPosition); return latestOffset; } @Override public void assignAndSeek(List<KafkaPartition> topicPartitions, Map<KafkaPartition, LongWatermark> topicWatermarksMap) { HashSet<KafkaPartition> topicPartitionSet = new HashSet(topicPartitions); topicWatermarksMap.entrySet().stream().filter(entry -> topicPartitionSet.contains(entry.getKey())) .forEach(entry -> { TopicPartition topicPartition = new TopicPartition(entry.getKey().getTopicName(), entry.getKey().getId()); this.consumer.assign(Collections.singletonList(topicPartition)); this.consumer.seek(topicPartition, entry.getValue().getValue()); }); } @Override public Iterator<KafkaConsumerRecord> consume(KafkaPartition partition, long nextOffset, long maxOffset) { if (nextOffset > maxOffset) { return null; } this.consumer.assign(Lists.newArrayList(new TopicPartition(partition.getTopicName(), partition.getId()))); this.consumer.seek(new TopicPartition(partition.getTopicName(), partition.getId()), nextOffset); return consume(); } @Override public Iterator<KafkaConsumerRecord> consume() { try { ConsumerRecords<K, V> consumerRecords = consumer.poll(super.fetchTimeoutMillis); return Iterators.transform(consumerRecords.iterator(), input -> { try { return new Kafka09ConsumerRecord(input); } catch (Throwable t) { throw Throwables.propagate(t); } }); } catch (Exception e) { log.error("Exception on polling records", e); throw new RuntimeException(e); } } /** * Subscribe to a kafka topic * TODO Add multi topic support * @param topic */ @Override public void subscribe(String topic) { this.consumer.subscribe(Lists.newArrayList(topic), new NoOpConsumerRebalanceListener()); } /** * Subscribe to a kafka topic with a {#GobblinConsumerRebalanceListener} * TODO Add multi topic support * @param topic */ @Override public void subscribe(String topic, GobblinConsumerRebalanceListener listener) { this.consumer.subscribe(Lists.newArrayList(topic), new ConsumerRebalanceListener() { @Override public void onPartitionsRevoked(Collection<TopicPartition> partitions) { listener.onPartitionsRevoked(partitions.stream().map(a -> new KafkaPartition.Builder().withTopicName(a.topic()).withId(a.partition()).build()).collect(Collectors.toList())); } @Override public void onPartitionsAssigned(Collection<TopicPartition> partitions) { listener.onPartitionsAssigned(partitions.stream().map(a -> new KafkaPartition.Builder().withTopicName(a.topic()).withId(a.partition()).build()).collect(Collectors.toList())); } }); } @Override public Map<String, Metric> getMetrics() { Map<MetricName, KafkaMetric> kafkaMetrics = (Map<MetricName, KafkaMetric>) this.consumer.metrics(); Map<String, Metric> codaHaleMetricMap = new HashMap<>(); kafkaMetrics .forEach((key, value) -> codaHaleMetricMap.put(canonicalMetricName(value), kafkaToCodaHaleMetric(value))); return codaHaleMetricMap; } /** * Commit offsets to Kafka asynchronously */ @Override public void commitOffsetsAsync(Map<KafkaPartition, Long> partitionOffsets) { Map<TopicPartition, OffsetAndMetadata> offsets = partitionOffsets.entrySet().stream().collect(Collectors.toMap(e -> new TopicPartition(e.getKey().getTopicName(),e.getKey().getId()), e -> new OffsetAndMetadata(e.getValue()))); consumer.commitAsync(offsets, new OffsetCommitCallback() { @Override public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) { if(exception != null) { log.error("Exception while committing offsets " + offsets, exception); return; } } }); } /** * Commit offsets to Kafka synchronously */ @Override public void commitOffsetsSync(Map<KafkaPartition, Long> partitionOffsets) { Map<TopicPartition, OffsetAndMetadata> offsets = partitionOffsets.entrySet().stream().collect(Collectors.toMap(e -> new TopicPartition(e.getKey().getTopicName(),e.getKey().getId()), e -> new OffsetAndMetadata(e.getValue()))); consumer.commitSync(offsets); } /** * returns the last committed offset for a KafkaPartition * @param partition * @return last committed offset or -1 for invalid KafkaPartition */ @Override public long committed(KafkaPartition partition) { OffsetAndMetadata offsetAndMetadata = consumer.committed(new TopicPartition(partition.getTopicName(), partition.getId())); return offsetAndMetadata != null ? offsetAndMetadata.offset() : -1l; } /** * Convert a {@link KafkaMetric} instance to a {@link Metric}. * @param kafkaMetric * @return */ private Metric kafkaToCodaHaleMetric(final KafkaMetric kafkaMetric) { if (log.isDebugEnabled()) { log.debug("Processing a metric change for {}", kafkaMetric.metricName().toString()); } Gauge<Double> gauge = () -> kafkaMetric.value(); return gauge; } private String canonicalMetricName(KafkaMetric kafkaMetric) { MetricName name = kafkaMetric.metricName(); return canonicalMetricName(name.group(), name.tags().values(), name.name()); } @Override public void close() throws IOException { this.consumer.close(); } private static final Function<PartitionInfo, KafkaPartition> PARTITION_INFO_TO_KAFKA_PARTITION = new Function<PartitionInfo, KafkaPartition>() { @Override public KafkaPartition apply(@Nonnull PartitionInfo partitionInfo) { return new KafkaPartition.Builder().withId(partitionInfo.partition()).withTopicName(partitionInfo.topic()) .withLeaderId(partitionInfo.leader().id()) .withLeaderHostAndPort(partitionInfo.leader().host(), partitionInfo.leader().port()).build(); } }; /** * A factory class to instantiate {@link Kafka09ConsumerClient} */ public static class Factory implements GobblinKafkaConsumerClientFactory { @SuppressWarnings("rawtypes") @Override public GobblinKafkaConsumerClient create(Config config) { return new Kafka09ConsumerClient(config); } } /** * A record returned by {@link Kafka09ConsumerClient} * * @param <K> Message key type * @param <V> Message value type */ @EqualsAndHashCode(callSuper = true) @ToString public static class Kafka09ConsumerRecord<K, V> extends BaseKafkaConsumerRecord implements DecodeableKafkaRecord<K, V> { private final ConsumerRecord<K, V> consumerRecord; public Kafka09ConsumerRecord(ConsumerRecord<K, V> consumerRecord) { // Kafka 09 consumerRecords do not provide value size. // Only 08 and 10 versions provide them. super(consumerRecord.offset(), BaseKafkaConsumerRecord.VALUE_SIZE_UNAVAILABLE, consumerRecord.topic(), consumerRecord.partition()); this.consumerRecord = consumerRecord; } @Override public K getKey() { return this.consumerRecord.key(); } @Override public V getValue() { return this.consumerRecord.value(); } } }
3,627
0
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/main/java/org/apache/gobblin/service/AvroJobSpecDeserializer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service; import java.io.ByteArrayInputStream; import java.io.DataInputStream; import java.io.IOException; import java.io.InputStream; import java.util.Map; import org.apache.avro.io.BinaryDecoder; import org.apache.avro.io.Decoder; import org.apache.avro.io.DecoderFactory; import org.apache.avro.specific.SpecificDatumReader; import org.apache.kafka.common.serialization.Deserializer; import org.apache.gobblin.metrics.reporter.util.FixedSchemaVersionWriter; import org.apache.gobblin.metrics.reporter.util.SchemaVersionWriter; import org.apache.gobblin.runtime.job_spec.AvroJobSpec; import lombok.extern.slf4j.Slf4j; @Slf4j /** * A deserializer that converts a byte array into an {@link AvroJobSpec} */ public class AvroJobSpecDeserializer implements Deserializer<AvroJobSpec> { private BinaryDecoder _decoder; private SpecificDatumReader<AvroJobSpec> _reader; private SchemaVersionWriter<?> _versionWriter; @Override public void configure(Map<String, ?> configs, boolean isKey) { InputStream dummyInputStream = new ByteArrayInputStream(new byte[0]); _decoder = DecoderFactory.get().binaryDecoder(dummyInputStream, null); _reader = new SpecificDatumReader<AvroJobSpec>(AvroJobSpec.SCHEMA$); _versionWriter = new FixedSchemaVersionWriter(); } @Override public AvroJobSpec deserialize(String topic, byte[] data) { try (InputStream is = new ByteArrayInputStream(data)) { _versionWriter.readSchemaVersioningInformation(new DataInputStream(is)); Decoder decoder = DecoderFactory.get().binaryDecoder(is, _decoder); return _reader.read(null, decoder); } catch (IOException e) { throw new RuntimeException("Could not decode message"); } } @Override public void close() { } }
3,628
0
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/test/java/org/apache/gobblin/crypto/GPGFileEncryptorTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.crypto; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.nio.charset.StandardCharsets; import org.apache.commons.io.IOUtils; import org.apache.commons.io.output.ByteArrayOutputStream; import org.bouncycastle.openpgp.PGPException; import org.testng.Assert; import org.testng.annotations.Test; /** * Test class for {@link GPGFileDecryptor} * Test key and test passphrase are generated offline */ public class GPGFileEncryptorTest { public static final String PASSWORD = "test"; public static final String PASSPHRASE = "gobblin"; public static final String PUBLIC_KEY = "/crypto/gpg/testPublic.key"; public static final String PRIVATE_KEY = "/crypto/gpg/testPrivate.key"; public static final String KEY_ID = "c27093CA21A87D6F"; public static final String EXPECTED_FILE_CONTENT = "This is a key based encryption file."; public static final byte[] EXPECTED_FILE_CONTENT_BYTES = EXPECTED_FILE_CONTENT.getBytes(StandardCharsets.UTF_8); /** * Encrypt a test string with a symmetric key and check that it can be decrypted * @throws IOException * @throws PGPException */ @Test public void encryptSym() throws IOException, PGPException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); OutputStream os = GPGFileEncryptor.encryptFile(baos, PASSWORD, "DES"); os.write(EXPECTED_FILE_CONTENT_BYTES); os.close(); baos.close(); byte[] encryptedBytes = baos.toByteArray(); try (InputStream is = GPGFileDecryptor.decryptFile(new ByteArrayInputStream(encryptedBytes), "test")) { byte[] decryptedBytes = IOUtils.toByteArray(is); Assert.assertNotEquals(EXPECTED_FILE_CONTENT_BYTES, encryptedBytes); Assert.assertEquals(EXPECTED_FILE_CONTENT_BYTES, decryptedBytes); } } /** * Encrypt a test string with an asymmetric key and check that it can be decrypted * @throws IOException * @throws PGPException */ @Test public void encryptAsym() throws IOException, PGPException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); OutputStream os = GPGFileEncryptor.encryptFile(baos, getClass().getResourceAsStream(PUBLIC_KEY), Long.parseUnsignedLong(KEY_ID, 16), "CAST5"); os.write(EXPECTED_FILE_CONTENT_BYTES); os.close(); baos.close(); byte[] encryptedBytes = baos.toByteArray(); try (InputStream is = GPGFileDecryptor.decryptFile(new ByteArrayInputStream(encryptedBytes), getClass().getResourceAsStream(PRIVATE_KEY), PASSPHRASE)) { byte[] decryptedBytes = IOUtils.toByteArray(is); Assert.assertNotEquals(EXPECTED_FILE_CONTENT_BYTES, encryptedBytes); Assert.assertEquals(EXPECTED_FILE_CONTENT_BYTES, decryptedBytes); } } /** * Test error with bad cipher */ @Test(expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp = ".*BadCipher.*") public void badCipher() throws IOException, PGPException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); OutputStream os = GPGFileEncryptor.encryptFile(baos, getClass().getResourceAsStream(PUBLIC_KEY), Long.parseUnsignedLong(KEY_ID, 16), "BadCipher"); } }
3,629
0
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/test/java/org/apache/gobblin/crypto/RotatingAESCodecTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.crypto; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.security.InvalidAlgorithmParameterException; import java.security.InvalidKeyException; import java.security.NoSuchAlgorithmException; import java.security.SecureRandom; import java.util.Map; import java.util.Random; import org.apache.commons.io.IOUtils; import org.testng.Assert; import org.testng.annotations.Test; import com.google.common.collect.ImmutableMap; import javax.crypto.Cipher; import javax.crypto.CipherInputStream; import javax.crypto.NoSuchPaddingException; import javax.crypto.SecretKey; import javax.crypto.spec.IvParameterSpec; import javax.crypto.spec.SecretKeySpec; import javax.xml.bind.DatatypeConverter; public class RotatingAESCodecTest { @Test public void testStreams() throws IOException, NoSuchAlgorithmException, NoSuchPaddingException, InvalidKeyException, InvalidAlgorithmParameterException { final byte[] toWrite = "hello world".getBytes(); SimpleCredentialStore credStore = new SimpleCredentialStore(); RotatingAESCodec encryptor = new RotatingAESCodec(credStore); ByteArrayOutputStream sink = new ByteArrayOutputStream(); OutputStream os = encryptor.encodeOutputStream(sink); os.write(toWrite); os.close(); byte[] encryptedBytes = sink.toByteArray(); manuallyDecodeAndVerifyBytes(toWrite, encryptedBytes, credStore); // Try with stream InputStream decoderIn = encryptor.decodeInputStream(new ByteArrayInputStream(encryptedBytes)); byte[] decoded = IOUtils.toByteArray(decoderIn); Assert.assertEquals(decoded, toWrite, "Expected decoded output to match encoded output"); } @Test public void testLotsOfData() throws Exception { long bytesToWrite = 20 * 1000 * 1000; byte[] buf = new byte[16384]; long bytesWritten = 0; SimpleCredentialStore credStore = new SimpleCredentialStore(); RotatingAESCodec encryptor = new RotatingAESCodec(credStore); ByteArrayOutputStream sink = new ByteArrayOutputStream(); ByteArrayOutputStream originalBytesStream = new ByteArrayOutputStream(); OutputStream encryptedStream = encryptor.encodeOutputStream(sink); Random r = new Random(); while (bytesWritten < bytesToWrite) { r.nextBytes(buf); originalBytesStream.write(buf); encryptedStream.write(buf); bytesWritten += buf.length; } encryptedStream.close(); byte[] originalBytes = originalBytesStream.toByteArray(); byte[] encryptedBytes = sink.toByteArray(); manuallyDecodeAndVerifyBytes(originalBytes, encryptedBytes, credStore); // Try with stream InputStream decoderIn = encryptor.decodeInputStream(new ByteArrayInputStream(encryptedBytes)); byte[] decoded = IOUtils.toByteArray(decoderIn); Assert.assertEquals(decoded, originalBytes, "Expected decoded output to match encoded output"); } private byte[] readAndBase64DecodeBody(InputStream in) throws IOException { byte[] body = IOUtils.toByteArray(in); body = DatatypeConverter.parseBase64Binary(new String(body, "UTF-8")); return body; } private byte[] verifyAndExtractIv(InputStream in, Integer ivLen) throws IOException { int bytesRead; byte[] base64Iv = new byte[ivLen]; bytesRead = in.read(base64Iv); Assert.assertEquals(Integer.valueOf(bytesRead), Integer.valueOf(ivLen), "Expected to read IV"); return DatatypeConverter.parseBase64Binary(new String(base64Iv, "UTF-8")); } private Integer verifyIvLen(InputStream in) throws IOException { int bytesRead; byte[] ivLenBytes = new byte[3]; bytesRead = in.read(ivLenBytes); Assert.assertEquals(bytesRead, ivLenBytes.length, "Expected to be able to iv length"); Integer ivLen = Integer.valueOf(new String(ivLenBytes, "UTF-8")); Assert.assertEquals(Integer.valueOf(ivLen), Integer.valueOf(24), "Always expect IV to be 24 bytes base64 encoded"); return ivLen; } private void verifyKeyId(InputStream in, int expectedKeyId) throws IOException { // Verify keyId is properly padded byte[] keyIdBytes = new byte[4]; int bytesRead = in.read(keyIdBytes); Assert.assertEquals(bytesRead, keyIdBytes.length, "Expected to be able to read key id"); String keyId = new String(keyIdBytes, "UTF-8"); Assert.assertEquals(Integer.valueOf(keyId), Integer.valueOf(expectedKeyId), "Expected keyId to equal 1"); } private void manuallyDecodeAndVerifyBytes(byte[] originalBytes, byte[] encryptedBytes, SimpleCredentialStore credStore) throws IOException, NoSuchAlgorithmException, NoSuchPaddingException, InvalidKeyException, InvalidAlgorithmParameterException { // Manually decode InputStream in = new ByteArrayInputStream(encryptedBytes); verifyKeyId(in, 1); Integer ivLen = verifyIvLen(in); byte[] ivBinary = verifyAndExtractIv(in, ivLen); byte[] body = readAndBase64DecodeBody(in); // feed back into cipheroutput stream Cipher inputCipher = Cipher.getInstance("AES/CBC/PKCS5Padding"); IvParameterSpec ivParameterSpec = new IvParameterSpec(ivBinary); inputCipher.init(Cipher.DECRYPT_MODE, credStore.getKey(), ivParameterSpec); CipherInputStream cis = new CipherInputStream(new ByteArrayInputStream(body), inputCipher); byte[] decoded = IOUtils.toByteArray(cis); Assert.assertEquals(decoded, originalBytes, "Expected decoded output to match encoded output"); } static class SimpleCredentialStore implements CredentialStore { private final SecretKey key; public SimpleCredentialStore() { SecureRandom r = new SecureRandom(); byte[] keyBytes = new byte[16]; r.nextBytes(keyBytes); key = new SecretKeySpec(keyBytes, "AES"); } @Override public byte[] getEncodedKey(String id) { if (id.equals("1")) { return key.getEncoded(); } return null; } @Override public Map<String, byte[]> getAllEncodedKeys() { return ImmutableMap.of("1", key.getEncoded()); } public SecretKey getKey() { return key; } } }
3,630
0
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/test/java/org/apache/gobblin/crypto/KeyToStringCodecTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.crypto; import org.testng.Assert; import org.testng.annotations.Test; /** * Tests for the various KeyToStringCodec implementations. */ public class KeyToStringCodecTest { @Test public void testHexKeyToStringCodec() { String hexKey = "1234"; byte[] binKey = new byte[]{18, 52}; HexKeyToStringCodec codec = new HexKeyToStringCodec(); Assert.assertEquals(codec.decodeKey(hexKey), binKey); Assert.assertEquals(codec.encodeKey(binKey), hexKey); } @Test public void testBase64KeyToStringCodec() { String b64Key = "EjQ="; byte[] binKey = new byte[]{18, 52}; Base64KeyToStringCodec codec = new Base64KeyToStringCodec(); Assert.assertEquals(codec.decodeKey(b64Key), binKey); Assert.assertEquals(codec.encodeKey(binKey), b64Key); } }
3,631
0
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/test/java/org/apache/gobblin/crypto/JCEKSKeystoreCredentialStoreTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.crypto; import java.io.File; import java.io.IOException; import java.security.KeyStoreException; import java.util.EnumSet; import org.testng.Assert; import org.testng.annotations.AfterTest; import org.testng.annotations.BeforeTest; import org.testng.annotations.Test; public class JCEKSKeystoreCredentialStoreTest { private File tempFile; @BeforeTest public void generateTempPath() throws IOException { tempFile = File.createTempFile("keystore_unit_test", null); tempFile.delete(); } @AfterTest public void deleteTempPath() throws IOException { tempFile.delete(); } @Test public void testGenerateKeys() throws KeyStoreException, IOException { final String path = tempFile.getAbsolutePath(); final String password = "abcd"; try { JCEKSKeystoreCredentialStore cs = new JCEKSKeystoreCredentialStore(path, password); Assert.fail("Expected exception to be thrown because keystore doesn't exist"); } catch (IllegalArgumentException e) { // pass } JCEKSKeystoreCredentialStore cs = new JCEKSKeystoreCredentialStore(path, password, EnumSet.of( JCEKSKeystoreCredentialStore.CreationOptions.CREATE_IF_MISSING)); cs.generateAesKeys(20, 0); cs = new JCEKSKeystoreCredentialStore(path, password); Assert.assertEquals(cs.getAllEncodedKeys().size(), 20); } }
3,632
0
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/test/java/org/apache/gobblin/crypto/JsonCredentialStoreTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.crypto; import java.io.IOException; import java.util.Map; import org.apache.hadoop.fs.Path; import org.testng.Assert; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; public class JsonCredentialStoreTest { private static final byte[] KEY1_EXPECTED_VAL = new byte[] { 2, 79, 74, 11, 93, -118, 15, 29, -97, -78, 64, -7, -89, 74, 63, -119 }; @DataProvider(name="codecInfo") public static Object[][] codecInfo() { return new Object[][] { { HexKeyToStringCodec.TAG, new HexKeyToStringCodec() }, { Base64KeyToStringCodec.TAG, new Base64KeyToStringCodec() } }; } @Test(dataProvider = "codecInfo") public void canLoadKeystore(String codecType, KeyToStringCodec codec) throws IOException { Path ksPath = new Path(getClass().getResource("/crypto/test_json_keystore." + codecType + ".json").toString()); JsonCredentialStore credentialStore = new JsonCredentialStore(ksPath, codec); Map<String, byte[]> allKeys = credentialStore.getAllEncodedKeys(); Assert.assertEquals(allKeys.size(), 29); Assert.assertEquals(credentialStore.getEncodedKey("0001"), KEY1_EXPECTED_VAL); for (Map.Entry<String, byte[]> key : allKeys.entrySet()) { Assert.assertEquals(credentialStore.getEncodedKey(key.getKey()), key.getValue()); Assert.assertEquals(key.getValue().length, 16); } } }
3,633
0
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/test/java/org/apache/gobblin/crypto/GPGFileDecryptorTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.crypto; import java.io.File; import java.io.IOException; import java.io.InputStream; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.bouncycastle.openpgp.PGPException; import org.testng.Assert; import org.testng.annotations.Test; import com.google.common.base.Charsets; /** * Test class for {@link GPGFileDecryptor} * Test key and test passphrase are generated offline */ public class GPGFileDecryptorTest { private static final String fileDir = "src/test/resources/crypto/gpg/"; private static final String privateKey = "private.key"; private static final String passwdBasedFile = "PasswordBasedEncryptionFile.txt.gpg"; private static final String keyBasedFile = "KeyBasedEncryptionFile.txt.gpg"; private static final String passPhrase = "test"; private static final String expectedPasswdFileContent = "This is a password based encryption file.\n"; private static final String expectedKeyFileContent = "This is a key based encryption file.\n"; @Test (enabled=false) public void keyBasedDecryptionTest() throws IOException { try(InputStream is = GPGFileDecryptor.decryptFile( FileUtils.openInputStream( new File(fileDir, keyBasedFile)), FileUtils.openInputStream(new File(fileDir, privateKey)), passPhrase)) { Assert.assertEquals(IOUtils.toString(is, Charsets.UTF_8), expectedKeyFileContent); } } @Test (enabled=false) public void passwordBasedDecryptionTest() throws IOException { try(InputStream is = GPGFileDecryptor.decryptFile( FileUtils.openInputStream(new File(fileDir, passwdBasedFile)), passPhrase)) { Assert.assertEquals(IOUtils.toString(is, Charsets.UTF_8), expectedPasswdFileContent); } } /** * Decrypt a large (~1gb) password encrypted file and check that memory usage does not blow up * @throws IOException * @throws PGPException */ @Test (enabled=true) public void decryptLargeFileSym() throws IOException, PGPException { System.gc(); System.gc(); long startHeapSize = Runtime.getRuntime().totalMemory(); try(InputStream is = GPGFileDecryptor.decryptFile( getClass().getResourceAsStream("/crypto/gpg/passwordEncrypted.gpg"), "test")) { int value; long bytesRead = 0; // the file contains only the character 'a' while ((value = is.read()) != -1) { bytesRead++; Assert.assertTrue(value == 'a'); } Assert.assertEquals(bytesRead, 1041981183L); // Make sure no error thrown if read again after reaching EOF Assert.assertEquals(is.read(), -1); System.gc(); System.gc(); long endHeapSize = Runtime.getRuntime().totalMemory(); // make sure the heap doesn't grow too much Assert.assertTrue(endHeapSize - startHeapSize < 200 * 1024 * 1024, "start heap " + startHeapSize + " end heap " + endHeapSize); } } /** * Decrypt a large (~1gb) private key encrypted file and check that memory usage does not blow up * @throws IOException * @throws PGPException */ @Test (enabled=true) public void decryptLargeFileAsym() throws IOException, PGPException { System.gc(); System.gc(); long startHeapSize = Runtime.getRuntime().totalMemory(); try(InputStream is = GPGFileDecryptor.decryptFile( getClass().getResourceAsStream("/crypto/gpg/keyEncrypted.gpg"), getClass().getResourceAsStream("/crypto/gpg/testPrivate.key"), "gobblin")) { int value; long bytesRead = 0; // the file contains only the character 'a' while ((value = is.read()) != -1) { bytesRead++; Assert.assertTrue(value == 'a'); } Assert.assertEquals(bytesRead, 1041981183L); System.gc(); System.gc(); long endHeapSize = Runtime.getRuntime().totalMemory(); // make sure the heap doesn't grow too much Assert.assertTrue(endHeapSize - startHeapSize < 200 * 1024 * 1024, "start heap " + startHeapSize + " end heap " + endHeapSize); } } }
3,634
0
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/main/java/org/apache/gobblin/crypto/JsonCredentialStore.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.crypto; import java.io.IOException; import java.io.InputStream; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.codehaus.jackson.JsonNode; import org.codehaus.jackson.map.ObjectMapper; import lombok.extern.slf4j.Slf4j; /** * Credential store that reads a JSON map that looks like: * { * "1": "<hex encoded key>", * "2": "<hex encoded key>" * } */ @Slf4j public class JsonCredentialStore implements CredentialStore { private static final ObjectMapper defaultMapper = new ObjectMapper(); public final static String TAG = "json"; private Map<String, byte[]> credentials; /** * Instantiate a new keystore using the file at the provided path */ public JsonCredentialStore(String path, KeyToStringCodec codec) throws IOException { this(new Path(path), codec); } /** * Instantiate a new keystore using the file at the provided path */ public JsonCredentialStore(Path path, KeyToStringCodec codec) throws IOException { credentials = new HashMap<>(); FileSystem fs = path.getFileSystem(new Configuration()); try (InputStream in = fs.open(path)) { ObjectMapper jsonParser = defaultMapper; JsonNode tree = jsonParser.readTree(in); if (!tree.isObject()) { throw new IllegalArgumentException("Json in " + path.toString() + " is not an object!"); } Iterator<Map.Entry<String, JsonNode>> it = tree.getFields(); while (it.hasNext()) { Map.Entry<String, JsonNode> field = it.next(); String keyId = field.getKey(); byte[] key = codec.decodeKey(field.getValue().getTextValue()); credentials.put(keyId, key); } } log.info("Initialized keystore from {} with {} keys", path.toString(), credentials.size()); } @Override public byte[] getEncodedKey(String id) { return credentials.get(id); } @Override public Map<String, byte[]> getAllEncodedKeys() { return Collections.unmodifiableMap(credentials); } }
3,635
0
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/main/java/org/apache/gobblin/crypto/HexKeyToStringCodec.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.crypto; import javax.xml.bind.DatatypeConverter; /** * Reads in a hex string representing a key and returns the raw bytes. */ public class HexKeyToStringCodec implements KeyToStringCodec { public static final String TAG = "hex"; @Override public byte[] decodeKey(String src) { return DatatypeConverter.parseHexBinary(src); } @Override public String encodeKey(byte[] key) { return DatatypeConverter.printHexBinary(key); } }
3,636
0
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/main/java/org/apache/gobblin/crypto/GPGCodec.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.crypto; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.gobblin.codec.StreamCodec; /** * Codec class that supports GPG encryption and decryption. */ public class GPGCodec implements StreamCodec { public static final String TAG = "gpg"; private final String password; private final String cipher; private final Path keyRingPath; private final long keyId; private final Configuration conf; /** * Constructor for a {@code GPGCodec} configured for password-based encryption * @param password password for symmetric encryption * @param cipher the symmetric cipher to use for encryption. If null or empty then a default cipher is used. */ public GPGCodec(String password, String cipher) { this.password = password; this.cipher = cipher; this.keyRingPath = null; this.keyId = 0; this.conf = null; } /** * Constructor for a {@code GPGCodec} configured for key-based encryption * @param keyRingPath path to the key ring * @param passphrase passphrase for retrieving the decryption key * @param keyId id for retrieving the key used for encryption * @param cipher the symmetric cipher to use for encryption. If null or empty then a default cipher is used. */ public GPGCodec(Path keyRingPath, String passphrase, long keyId, String cipher) { this.keyRingPath = keyRingPath; this.password = passphrase; this.keyId = keyId; this.cipher = cipher; this.conf = new Configuration(); } @Override public OutputStream encodeOutputStream(OutputStream origStream) throws IOException { if (this.keyRingPath != null) { try (InputStream keyRingInputStream = this.keyRingPath.getFileSystem(this.conf).open(this.keyRingPath)) { return GPGFileEncryptor.encryptFile(origStream, keyRingInputStream, this.keyId, this.cipher); } } else { return GPGFileEncryptor.encryptFile(origStream, this.password, this.cipher); } } @Override public InputStream decodeInputStream(InputStream origStream) throws IOException { if (this.keyRingPath != null) { try (InputStream keyRingInputStream = this.keyRingPath.getFileSystem(this.conf).open(keyRingPath)) { return GPGFileDecryptor.decryptFile(origStream, keyRingInputStream, this.password); } } else { return GPGFileDecryptor.decryptFile(origStream, this.password); } } @Override public String getTag() { return TAG; } }
3,637
0
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/main/java/org/apache/gobblin/crypto/GPGFileDecryptor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.crypto; import java.io.IOException; import java.io.InputStream; import java.security.Security; import java.util.Iterator; import org.bouncycastle.jce.provider.BouncyCastleProvider; import org.bouncycastle.openpgp.PGPCompressedData; import org.bouncycastle.openpgp.PGPEncryptedDataList; import org.bouncycastle.openpgp.PGPException; import org.bouncycastle.openpgp.PGPLiteralData; import org.bouncycastle.openpgp.PGPOnePassSignatureList; import org.bouncycastle.openpgp.PGPPBEEncryptedData; import org.bouncycastle.openpgp.PGPPrivateKey; import org.bouncycastle.openpgp.PGPPublicKeyEncryptedData; import org.bouncycastle.openpgp.PGPSecretKey; import org.bouncycastle.openpgp.PGPSecretKeyRingCollection; import org.bouncycastle.openpgp.PGPSignatureList; import org.bouncycastle.openpgp.PGPUtil; import org.bouncycastle.openpgp.jcajce.JcaPGPObjectFactory; import org.bouncycastle.openpgp.operator.bc.BcKeyFingerprintCalculator; import org.bouncycastle.openpgp.operator.jcajce.JcaPGPDigestCalculatorProviderBuilder; import org.bouncycastle.openpgp.operator.jcajce.JcePBEDataDecryptorFactoryBuilder; import org.bouncycastle.openpgp.operator.jcajce.JcePBESecretKeyDecryptorBuilder; import org.bouncycastle.openpgp.operator.jcajce.JcePublicKeyDataDecryptorFactoryBuilder; import lombok.experimental.UtilityClass; /** * A utility class that decrypts both password based and key based encryption files. * * Code reference - org.bouncycastle.openpgp.examples.PBEFileProcessor * - org.bouncycastle.openpgp.examples.KeyBasedFileProcessor */ @UtilityClass public class GPGFileDecryptor { /** * Taking in a file inputstream and a passPhrase, generate a decrypted file inputstream. * @param inputStream file inputstream * @param passPhrase passPhrase * @return * @throws IOException */ public InputStream decryptFile(InputStream inputStream, String passPhrase) throws IOException { PGPEncryptedDataList enc = getPGPEncryptedDataList(inputStream); PGPPBEEncryptedData pbe = (PGPPBEEncryptedData) enc.get(0); InputStream clear; try { clear = pbe.getDataStream(new JcePBEDataDecryptorFactoryBuilder( new JcaPGPDigestCalculatorProviderBuilder().setProvider(BouncyCastleProvider.PROVIDER_NAME).build()) .setProvider(BouncyCastleProvider.PROVIDER_NAME).build(passPhrase.toCharArray())); JcaPGPObjectFactory pgpFact = new JcaPGPObjectFactory(clear); return new LazyMaterializeDecryptorInputStream(pgpFact); } catch (PGPException e) { throw new IOException(e); } } /** * Taking in a file inputstream, keyring inputstream and a passPhrase, generate a decrypted file inputstream. * @param inputStream file inputstream * @param keyIn keyring inputstream. This InputStream is owned by the caller. * @param passPhrase passPhrase * @return an {@link InputStream} for the decrypted content * @throws IOException */ public InputStream decryptFile(InputStream inputStream, InputStream keyIn, String passPhrase) throws IOException { try { PGPEncryptedDataList enc = getPGPEncryptedDataList(inputStream); Iterator it = enc.getEncryptedDataObjects(); PGPPrivateKey sKey = null; PGPPublicKeyEncryptedData pbe = null; PGPSecretKeyRingCollection pgpSec = new PGPSecretKeyRingCollection(PGPUtil.getDecoderStream(keyIn), new BcKeyFingerprintCalculator()); while (sKey == null && it.hasNext()) { pbe = (PGPPublicKeyEncryptedData) it.next(); sKey = findSecretKey(pgpSec, pbe.getKeyID(), passPhrase); } if (sKey == null) { throw new IllegalArgumentException("secret key for message not found."); } InputStream clear = pbe.getDataStream( new JcePublicKeyDataDecryptorFactoryBuilder().setProvider(BouncyCastleProvider.PROVIDER_NAME).build(sKey)); JcaPGPObjectFactory pgpFact = new JcaPGPObjectFactory(clear); return new LazyMaterializeDecryptorInputStream(pgpFact); } catch (PGPException e) { throw new IOException(e); } } /** * Private util function that finds the private key from keyring collection based on keyId and passPhrase * @param pgpSec keyring collection * @param keyID keyID for this encryption file * @param passPhrase passPhrase for this encryption file * @throws PGPException */ private PGPPrivateKey findSecretKey(PGPSecretKeyRingCollection pgpSec, long keyID, String passPhrase) throws PGPException { PGPSecretKey pgpSecKey = pgpSec.getSecretKey(keyID); if (pgpSecKey == null) { return null; } return pgpSecKey.extractPrivateKey( new JcePBESecretKeyDecryptorBuilder() .setProvider(BouncyCastleProvider.PROVIDER_NAME).build(passPhrase.toCharArray())); } /** * Generate a PGPEncryptedDataList from an inputstream * @param inputStream file inputstream that needs to be decrypted * @throws IOException */ private PGPEncryptedDataList getPGPEncryptedDataList(InputStream inputStream) throws IOException { if (Security.getProvider(BouncyCastleProvider.PROVIDER_NAME) == null) { Security.addProvider(new BouncyCastleProvider()); } inputStream = PGPUtil.getDecoderStream(inputStream); JcaPGPObjectFactory pgpF = new JcaPGPObjectFactory(inputStream); PGPEncryptedDataList enc; Object pgpfObject = pgpF.nextObject(); if (pgpfObject instanceof PGPEncryptedDataList) { enc = (PGPEncryptedDataList) pgpfObject; } else { enc = (PGPEncryptedDataList) pgpF.nextObject(); } return enc; } /** * A class for reading the underlying {@link InputStream}s from the pgp object without pre-materializing all of them. * The PGP object may present the decrypted data through multiple {@link InputStream}s, but these streams are sequential * and the n+1 stream is not available until the end of the nth stream is reached, so the * {@link LazyMaterializeDecryptorInputStream} keeps a reference to the {@link JcaPGPObjectFactory} and moves to new * {@link InputStream}s as they are available */ private static class LazyMaterializeDecryptorInputStream extends InputStream { JcaPGPObjectFactory pgpFact; InputStream currentUnderlyingStream; public LazyMaterializeDecryptorInputStream(JcaPGPObjectFactory pgpFact) throws IOException { this.pgpFact = pgpFact; moveToNextInputStream(); } @Override public int read() throws IOException { if (this.currentUnderlyingStream == null) { return -1; } int value = this.currentUnderlyingStream.read(); if (value != -1) { return value; } else { moveToNextInputStream(); if (this.currentUnderlyingStream == null) { return -1; } return this.currentUnderlyingStream.read(); } } /** * Move to the next {@link InputStream} if available, otherwise set {@link #currentUnderlyingStream} to null to * indicate that there is no more data. * @throws IOException */ private void moveToNextInputStream() throws IOException { Object pgpfObject = this.pgpFact.nextObject(); // no more data if (pgpfObject == null) { this.currentUnderlyingStream = null; return; } if (pgpfObject instanceof PGPCompressedData) { PGPCompressedData cData = (PGPCompressedData) pgpfObject; try { this.pgpFact = new JcaPGPObjectFactory(cData.getDataStream()); } catch (PGPException e) { throw new IOException("Could not get the PGP data stream", e); } pgpfObject = this.pgpFact.nextObject(); } if (pgpfObject instanceof PGPLiteralData) { this.currentUnderlyingStream = ((PGPLiteralData) pgpfObject).getInputStream(); } else if (pgpfObject instanceof PGPOnePassSignatureList) { throw new IOException("encrypted message contains PGPOnePassSignatureList message - not literal data."); } else if (pgpfObject instanceof PGPSignatureList) { throw new IOException("encrypted message contains PGPSignatureList message - not literal data."); } else { throw new IOException("message is not a simple encrypted file - type unknown."); } } } }
3,638
0
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/main/java/org/apache/gobblin/crypto/JCEKSKeystoreCredentialStore.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.crypto; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.security.Key; import java.security.KeyStore; import java.security.KeyStoreException; import java.security.NoSuchAlgorithmException; import java.security.SecureRandom; import java.security.UnrecoverableKeyException; import java.security.cert.CertificateException; import java.util.EnumSet; import java.util.Enumeration; import java.util.HashMap; import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import javax.crypto.SecretKey; import javax.crypto.spec.SecretKeySpec; import lombok.extern.slf4j.Slf4j; /** * A CredentialStore that uses Java's Keystore implementation to store and retrieve keys. */ @Slf4j public class JCEKSKeystoreCredentialStore implements CredentialStore { public final static String TAG = "java"; private final KeyStore ks; private final char[] password; private final Path path; private final FileSystem fs; /** * Options that can be used while instantiating a new keystore */ public enum CreationOptions { /** * Create an empty keystore if one can't be found. Otherwise an exception will be thrown. */ CREATE_IF_MISSING } /** * Instantiate a new keystore at the given path protected by a password. * @param path Path to find the keystore * @param passwordStr Password the keystore is protected with * @throws IOException If the keystore cannot be loaded because the password is wrong or the file has been corrupted. * @throws IllegalArgumentException If the keystore does not exist at the given path. */ public JCEKSKeystoreCredentialStore(String path, String passwordStr) throws IOException { this(path, passwordStr, EnumSet.noneOf(CreationOptions.class)); } /** * Instantiate a new keystore at the given path protected by a password. * @param path Path to find the keystore * @param passwordStr Password the keystore is protected with * @param options Flags for keystore creation * @throws IOException If the keystore cannot be loaded because of a corrupt file or the password is wrong * @throws IllegalArgumentException If CREATE_IF_MISSING is not present in options and the keystore does not exist * at the given path. */ public JCEKSKeystoreCredentialStore(String path, String passwordStr, EnumSet<CreationOptions> options) throws IOException { this(new Path(path), passwordStr, options); } /** * Instantiate a new keystore at the given path protected by a password. * @param path Path to find the keystore * @param passwordStr Password the keystore is protected with * @param options Flags for keystore creation * @throws IOException If the keystore cannot be loaded because of a corrupt file or the password is wrong * @throws IllegalArgumentException If CREATE_IF_MISSING is not present in options and the keystore does not exist * at the given path. */ public JCEKSKeystoreCredentialStore(Path path, String passwordStr, EnumSet<CreationOptions> options) throws IOException { try { this.ks = KeyStore.getInstance("JCEKS"); this.password = passwordStr.toCharArray(); this.path = path; this.fs = path.getFileSystem(new Configuration()); if (!fs.exists(path)) { if (options.contains(CreationOptions.CREATE_IF_MISSING)) { log.info("No keystore found at " + path + ", creating from scratch"); ks.load(null, password); } else { throw new IllegalArgumentException("Keystore " + path + " does not exist"); } } else { try (InputStream fis = fs.open(path)) { ks.load(fis, password); log.info("Successfully loaded keystore from " + path); } } } catch (KeyStoreException | CertificateException | NoSuchAlgorithmException e) { throw new IllegalStateException("Unexpected failure initializing keystore", e); } } @Override public byte[] getEncodedKey(String id) { try { Key k = ks.getKey(id, password); return (k == null) ? null : k.getEncoded(); } catch (KeyStoreException | NoSuchAlgorithmException | UnrecoverableKeyException e) { log.warn("Error trying to decode key " + id, e); return null; } } @Override public Map<String, byte[]> getAllEncodedKeys() { Map<String, byte[]> ret = new HashMap<>(); try { Enumeration<String> aliases = ks.aliases(); while (aliases.hasMoreElements()) { String key = aliases.nextElement(); try { if (ks.isKeyEntry(key)) { ret.put(key, getEncodedKey(key)); } } catch (KeyStoreException e) { log.warn("Error trying to decode key id " + key + ", not returning in list", e); } } } catch (KeyStoreException e) { log.warn("Error retrieving all aliases in keystore; treating as empty", e); return ret; } return ret; } /** * Generate a set of AES keys for the store. The key ids will simple be (startOffset ... startOffset + numKeys). * @param numKeys Number of keys to generate * @param startOffset ID to start generating keys with * @throws IOException If there is an error serializing the keystore back to disk * @throws KeyStoreException If there is an error serializing the keystore back to disk */ public void generateAesKeys(int numKeys, int startOffset) throws IOException, KeyStoreException { for (int i = 1; i <= numKeys; i++) { SecretKey key = generateKey(); ks.setEntry(String.valueOf(i + startOffset), new KeyStore.SecretKeyEntry(key), new KeyStore.PasswordProtection(password)); } saveKeystore(); } private SecretKey generateKey() { SecureRandom r = new SecureRandom(); byte[] keyBytes = new byte[16]; r.nextBytes(keyBytes); return new SecretKeySpec(keyBytes, "AES"); } private void saveKeystore() throws IOException { try (OutputStream fOs = fs.create(path, true)) { ks.store(fOs, password); } catch (KeyStoreException | NoSuchAlgorithmException | CertificateException e) { throw new IOException("Error serializing keystore", e); } } }
3,639
0
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/main/java/org/apache/gobblin/crypto/GPGFileEncryptor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.crypto; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.lang.reflect.Field; import java.security.SecureRandom; import java.security.Security; import java.util.Date; import java.util.Set; import org.apache.commons.lang3.StringUtils; import org.bouncycastle.jce.provider.BouncyCastleProvider; import org.bouncycastle.openpgp.PGPEncryptedData; import org.bouncycastle.openpgp.PGPEncryptedDataGenerator; import org.bouncycastle.openpgp.PGPException; import org.bouncycastle.openpgp.PGPLiteralDataGenerator; import org.bouncycastle.openpgp.PGPPublicKey; import org.bouncycastle.openpgp.PGPPublicKeyRingCollection; import org.bouncycastle.openpgp.PGPUtil; import org.bouncycastle.openpgp.operator.bc.BcKeyFingerprintCalculator; import org.bouncycastle.openpgp.operator.jcajce.JcePBEKeyEncryptionMethodGenerator; import org.bouncycastle.openpgp.operator.jcajce.JcePGPDataEncryptorBuilder; import org.bouncycastle.openpgp.operator.jcajce.JcePublicKeyKeyEncryptionMethodGenerator; import org.reflections.ReflectionUtils; import com.google.common.base.Preconditions; import lombok.experimental.UtilityClass; /** * A utility class that supports both password based and key based encryption * * Code reference - org.bouncycastle.openpgp.examples.PBEFileProcessor * - org.bouncycastle.openpgp.examples.KeyBasedFileProcessor */ @UtilityClass public class GPGFileEncryptor { private static int BUFFER_SIZE = 1024; private static String PAYLOAD_NAME = "payload.file"; private static String PROVIDER_NAME = BouncyCastleProvider.PROVIDER_NAME; /** * Taking in an input {@link OutputStream} and a passPhrase, return an {@link OutputStream} that can be used to output * encrypted output to the input {@link OutputStream}. * @param outputStream the output stream to hold the ciphertext {@link OutputStream} * @param passPhrase pass phrase * @param cipher the symmetric cipher to use for encryption. If null or empty then a default cipher is used. * @return {@link OutputStream} to write content to for encryption * @throws IOException */ public OutputStream encryptFile(OutputStream outputStream, String passPhrase, String cipher) throws IOException { try { if (Security.getProvider(PROVIDER_NAME) == null) { Security.addProvider(new BouncyCastleProvider()); } PGPEncryptedDataGenerator cPk = new PGPEncryptedDataGenerator( new JcePGPDataEncryptorBuilder(symmetricKeyAlgorithmNameToTag(cipher)) .setSecureRandom(new SecureRandom()) .setProvider(PROVIDER_NAME)); cPk.addMethod(new JcePBEKeyEncryptionMethodGenerator(passPhrase.toCharArray()).setProvider(PROVIDER_NAME)); OutputStream cOut = cPk.open(outputStream, new byte[BUFFER_SIZE]); PGPLiteralDataGenerator literalGen = new PGPLiteralDataGenerator(); OutputStream _literalOut = literalGen.open(cOut, PGPLiteralDataGenerator.BINARY, PAYLOAD_NAME, new Date(), new byte[BUFFER_SIZE]); return new ClosingWrapperOutputStream(_literalOut, cOut, outputStream); } catch (PGPException e) { throw new IOException(e); } } /** * Taking in an input {@link OutputStream}, keyring inputstream and a passPhrase, generate an encrypted {@link OutputStream}. * @param outputStream {@link OutputStream} that will receive the encrypted content * @param keyIn keyring inputstream. This InputStream is owned by the caller. * @param keyId key identifier * @param cipher the symmetric cipher to use for encryption. If null or empty then a default cipher is used. * @return an {@link OutputStream} to write content to for encryption * @throws IOException */ public OutputStream encryptFile(OutputStream outputStream, InputStream keyIn, long keyId, String cipher) throws IOException { try { if (Security.getProvider(PROVIDER_NAME) == null) { Security.addProvider(new BouncyCastleProvider()); } PGPEncryptedDataGenerator cPk = new PGPEncryptedDataGenerator( new JcePGPDataEncryptorBuilder(symmetricKeyAlgorithmNameToTag(cipher)) .setSecureRandom(new SecureRandom()) .setProvider(PROVIDER_NAME)); PGPPublicKey publicKey; PGPPublicKeyRingCollection keyRings = new PGPPublicKeyRingCollection(PGPUtil.getDecoderStream(keyIn), new BcKeyFingerprintCalculator()); publicKey = keyRings.getPublicKey(keyId); if (publicKey == null) { throw new IllegalArgumentException("public key for encryption not found"); } cPk.addMethod(new JcePublicKeyKeyEncryptionMethodGenerator(publicKey).setProvider(PROVIDER_NAME)); OutputStream cOut = cPk.open(outputStream, new byte[BUFFER_SIZE]); PGPLiteralDataGenerator literalGen = new PGPLiteralDataGenerator(); OutputStream _literalOut = literalGen.open(cOut, PGPLiteralDataGenerator.BINARY, PAYLOAD_NAME, new Date(), new byte[BUFFER_SIZE]); return new ClosingWrapperOutputStream(_literalOut, cOut, outputStream); } catch (PGPException e) { throw new IOException(e); } } /** * Convert a string cipher name to the integer tag used by GPG * @param cipherName the cipher name * @return integer tag for the cipher */ private static int symmetricKeyAlgorithmNameToTag(String cipherName) { // Use CAST5 if no cipher specified if (StringUtils.isEmpty(cipherName)) { return PGPEncryptedData.CAST5; } Set<Field> fields = ReflectionUtils.getAllFields(PGPEncryptedData.class, ReflectionUtils.withName(cipherName)); if (fields.isEmpty()) { throw new RuntimeException("Could not find tag for cipher name " + cipherName); } try { return fields.iterator().next().getInt(null); } catch (IllegalAccessException e) { throw new RuntimeException("Could not access field " + cipherName, e); } } /** * A class for keeping track of wrapped output streams and closing them when this stream is closed. * This is required because GPG wrapping of streams does not propagate the close. */ private static class ClosingWrapperOutputStream extends OutputStream { private final OutputStream[] outputStreams; private final OutputStream firstStream; /** * Creates an output stream that writes to the first {@link OutputStream} and closes all of the {@link OutputStream}s * when close() is called * @param outputStreams list of {@link OutputStream}s where the first one is written to and the rest are tracked * for closing. */ public ClosingWrapperOutputStream(OutputStream... outputStreams) { Preconditions.checkArgument(outputStreams.length >= 1); this.outputStreams = outputStreams; this.firstStream = outputStreams[0]; } @Override public void write(byte[] bytes) throws IOException { this.firstStream.write(bytes); } @Override public void write(byte[] bytes, int offset, int length) throws IOException { this.firstStream.write(bytes, offset, length); } @Override public void write(int b) throws IOException { this.firstStream.write(b); } public void flush() throws IOException { for (OutputStream os : this.outputStreams) { os.flush(); } } public void close() throws IOException { for (OutputStream os : this.outputStreams) { os.close(); } } } }
3,640
0
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/main/java/org/apache/gobblin/crypto/KeyToStringCodec.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.crypto; /** * A package that can convert a key to and from a string representation */ public interface KeyToStringCodec { /** * Given the string representation of a key, return the raw byte format. Eg given "0102", return * new byte[] { 1, 2 } */ byte[] decodeKey(String src); /** * Encode the raw byte format of a key in an encoded value. Eg take the byte array { 1, 2 } * and return the hex string "0102" */ String encodeKey(byte[] key); }
3,641
0
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/main/java/org/apache/gobblin/crypto/RotatingAESCodec.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.crypto; import java.io.FilterOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.nio.charset.StandardCharsets; import java.security.InvalidAlgorithmParameterException; import java.security.InvalidKeyException; import java.security.NoSuchAlgorithmException; import java.util.HashMap; import java.util.Map; import java.util.Random; import org.apache.commons.codec.binary.Base64; import org.apache.commons.io.IOUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.crypto.Cipher; import javax.crypto.CipherInputStream; import javax.crypto.CipherOutputStream; import javax.crypto.NoSuchPaddingException; import javax.crypto.SecretKey; import javax.crypto.spec.IvParameterSpec; import javax.crypto.spec.SecretKeySpec; import javax.xml.bind.DatatypeConverter; import org.apache.gobblin.codec.Base64Codec; import org.apache.gobblin.codec.StreamCodec; /** * Implementation of an encryption algorithm that works in the following way: * * 1. A credentialStore is provisioned with a set of AES keys * 2. When encodeOutputStream() is called, an AES key will be picked at random and a new initialization vector (IV) * will be generated. * 3. A header will be written [keyId][ivLength][base64 encoded iv] * 4. Ciphertext will be base64 encoded and written out. We do not insert linebreaks. */ public class RotatingAESCodec implements StreamCodec { private static final Logger log = LoggerFactory.getLogger(RotatingAESCodec.class); private static final int AES_KEY_LEN = 16; private static final String TAG = "aes_rotating"; private final Random random; private final CredentialStore credentialStore; /* * Cache valid keys in two forms: * A map for retrieving a key quickly (decode case) * An array for quickly selecting a random key (encode case) */ private volatile Map<Integer, KeyRecord> keyRecords_cache; private volatile KeyRecord[] keyRecords_cache_arr; /** * Create a new encryptor * @param credentialStore Credential store where keys can be found */ public RotatingAESCodec(CredentialStore credentialStore) { this.credentialStore = credentialStore; this.random = new Random(); } @Override public OutputStream encodeOutputStream(OutputStream origStream) throws IOException { return new EncodingStreamInstance(selectRandomKey(), origStream).wrapOutputStream(); } @Override public InputStream decodeInputStream(InputStream origStream) throws IOException { return new DecodingStreamInstance(origStream).wrapInputStream(); } private synchronized KeyRecord getKey(Integer key) { fillKeyRecords(); return keyRecords_cache.get(key); } private synchronized KeyRecord selectRandomKey() { KeyRecord[] keyRecords = getKeyRecords(); if (keyRecords.length == 0) { throw new IllegalStateException("Couldn't find any valid keys in store!"); } return keyRecords[random.nextInt(keyRecords.length)]; } private synchronized KeyRecord[] getKeyRecords() { fillKeyRecords(); return keyRecords_cache_arr; } private synchronized void fillKeyRecords() { if (keyRecords_cache == null) { keyRecords_cache = new HashMap<>(); for (Map.Entry<String, byte[]> entry : credentialStore.getAllEncodedKeys().entrySet()) { if (entry.getValue().length != AES_KEY_LEN) { log.debug("Skipping keyId {} because it is length {}; expected {}", entry.getKey(), entry.getValue().length, AES_KEY_LEN); continue; } try { Integer keyId = Integer.parseInt(entry.getKey()); SecretKey key = new SecretKeySpec(entry.getValue(), "AES"); keyRecords_cache.put(keyId, new KeyRecord(keyId, key)); } catch (NumberFormatException e) { log.debug("Skipping keyId {} because this algorithm can only use numeric key ids", entry.getKey()); } } keyRecords_cache_arr = keyRecords_cache.values().toArray(new KeyRecord[keyRecords_cache.size()]); } } @Override public String getTag() { return TAG; } /** * Represents a set of parsed AES keys that we can choose from when encrypting. */ static class KeyRecord { private final int keyId; private final SecretKey secretKey; KeyRecord(int keyId, SecretKey secretKey) { this.keyId = keyId; this.secretKey = secretKey; } int getKeyId() { return keyId; } SecretKey getSecretKey() { return secretKey; } } /** * Helper class that keeps state around for a wrapped output stream. Each stream will have a different * selected key, IV, and cipher state. */ static class EncodingStreamInstance { private final OutputStream origStream; private final KeyRecord secretKey; private Cipher cipher; private String base64Iv; private boolean headerWritten = false; EncodingStreamInstance(KeyRecord secretKey, OutputStream origStream) { this.secretKey = secretKey; this.origStream = origStream; } OutputStream wrapOutputStream() throws IOException { initCipher(); final OutputStream base64OutputStream = getBase64Stream(origStream); final CipherOutputStream encryptedStream = new CipherOutputStream(base64OutputStream, cipher); return new FilterOutputStream(origStream) { @Override public void write(int b) throws IOException { writeHeaderIfNecessary(); encryptedStream.write(b); } @Override public void write(byte[] b) throws IOException { writeHeaderIfNecessary(); encryptedStream.write(b); } @Override public void write(byte[] b, int off, int len) throws IOException { writeHeaderIfNecessary(); encryptedStream.write(b, off, len); } @Override public void close() throws IOException { encryptedStream.close(); } }; } private OutputStream getBase64Stream(OutputStream origStream) throws IOException { return new Base64Codec().encodeOutputStream(origStream); } private void initCipher() { if (origStream == null) { throw new IllegalStateException("Can't initCipher stream before encodeOutputStream() has been called!"); } try { cipher = Cipher.getInstance("AES/CBC/PKCS5Padding"); cipher.init(Cipher.ENCRYPT_MODE, secretKey.getSecretKey()); byte[] iv = cipher.getIV(); base64Iv = DatatypeConverter.printBase64Binary(iv); this.headerWritten = false; } catch (NoSuchAlgorithmException | NoSuchPaddingException e) { throw new IllegalStateException("Error creating AES algorithm? Should always exist in JRE"); } catch (InvalidKeyException e) { throw new IllegalStateException("Key " + secretKey.getKeyId() + " is illegal - please check credential store"); } } private void writeHeaderIfNecessary() throws IOException { if (!headerWritten) { String header = String.format("%04d%03d%s", secretKey.getKeyId(), base64Iv.length(), base64Iv); origStream.write(header.getBytes(StandardCharsets.UTF_8)); this.headerWritten = true; } } } private class DecodingStreamInstance { private final InputStream origStream; private final byte[] buffer = new byte[32]; private final Cipher cipher; DecodingStreamInstance(InputStream origStream) throws IOException { this.origStream = origStream; Integer keyId = readKey(); KeyRecord key = getKey(keyId); if (key == null) { throw new IOException("Cannot load key " + String.valueOf(keyId) + " which is specified in input stream"); } try { byte[] iv = readIv(); cipher = Cipher.getInstance("AES/CBC/PKCS5Padding"); if (iv != null) { IvParameterSpec ivParameterSpec = new IvParameterSpec(iv); cipher.init(Cipher.DECRYPT_MODE, key.getSecretKey(), ivParameterSpec); } else { cipher.init(Cipher.DECRYPT_MODE, key.getSecretKey()); } } catch (NoSuchAlgorithmException | NoSuchPaddingException e) { throw new IllegalStateException("Failed to load AES which should never happen", e); } catch (InvalidKeyException e) { throw new IllegalStateException("Failed to parse key from keystore", e); } catch (InvalidAlgorithmParameterException e) { throw new IllegalStateException("Failed to initialize IV", e); } } InputStream wrapInputStream() throws IOException { InputStream base64Decoder = new Base64Codec().decodeInputStream(origStream); return new CipherInputStream(base64Decoder, cipher); } // read and parse key from the bytestream private Integer readKey() throws IOException { IOUtils.readFully(origStream, buffer, 0, 4); try { return Integer.valueOf(new String(buffer, 0, 4, StandardCharsets.UTF_8)); } catch (NumberFormatException e) { throw new IOException("Expected to be able to parse first 4 bytes of stream as an ASCII keyId"); } } private byte[] readIv() throws IOException { IOUtils.readFully(origStream, buffer, 0, 3); Integer ivLen; try { ivLen = Integer.valueOf(new String(buffer, 0, 3, StandardCharsets.UTF_8)); } catch (NumberFormatException e) { throw new IOException("Expected to parse next 3 bytes of stream as an IV len"); } if (ivLen < 0 || ivLen > buffer.length) { throw new IOException( "Corrupted data suspected; expected IVLen to be between 0 and " + String.valueOf(buffer.length) + ", read " + String.valueOf(ivLen)); } if (ivLen == 0) { return null; } // IV is separately base64 encoded -- none of the standard base64 codec instances support decoding a slice of a // byte[] array so create a new buffer here byte[] ivBuffer = new byte[ivLen]; IOUtils.readFully(origStream, ivBuffer, 0, ivBuffer.length); return Base64.decodeBase64(ivBuffer); } } }
3,642
0
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/main/java/org/apache/gobblin/crypto/Base64KeyToStringCodec.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.crypto; import java.util.Base64; /** * Encodes and decodes a string as base64. */ public class Base64KeyToStringCodec implements KeyToStringCodec { public static final String TAG = "base64"; @Override public byte[] decodeKey(String src) { return Base64.getDecoder().decode(src); } @Override public String encodeKey(byte[] key) { return Base64.getEncoder().encodeToString(key); } }
3,643
0
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/main/java/org/apache/gobblin/converter/EncryptedSerializedRecordToSerializedRecordConverterBase.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter; import java.io.ByteArrayInputStream; import java.io.InputStream; import java.util.Collections; import lombok.extern.slf4j.Slf4j; import org.apache.commons.io.IOUtils; import org.apache.gobblin.codec.StreamCodec; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.type.RecordWithMetadata; /** * A converter that converts a encrypted {@link org.apache.gobblin.type.SerializedRecordWithMetadata} to * a {@link org.apache.gobblin.type.SerializedRecordWithMetadata}. The decryption algorithm used will be * appended to the Transfer-Encoding of the new record. */ @Slf4j public abstract class EncryptedSerializedRecordToSerializedRecordConverterBase extends Converter<String, String, RecordWithMetadata<byte[]>, RecordWithMetadata<byte[]>> { private StreamCodec decryptor; @Override public Converter<String, String, RecordWithMetadata<byte[]>, RecordWithMetadata<byte[]>> init( WorkUnitState workUnit) { super.init(workUnit); decryptor = buildDecryptor(workUnit); return this; } /** * Build the StreamCodec that will be used to decrypt each byte record. Must be provided by concrete * implementations of this class. */ protected abstract StreamCodec buildDecryptor(WorkUnitState config); @Override public String convertSchema(String inputSchema, WorkUnitState workUnit) throws SchemaConversionException { return ""; } @Override public Iterable<RecordWithMetadata<byte[]>> convertRecord(String outputSchema, RecordWithMetadata<byte[]> inputRecord, WorkUnitState workUnit) throws DataConversionException { try { ByteArrayInputStream inputStream = new ByteArrayInputStream(inputRecord.getRecord()); byte[] decryptedBytes; try (InputStream decryptedInputStream = decryptor.decodeInputStream(inputStream)) { decryptedBytes = IOUtils.toByteArray(decryptedInputStream); } inputRecord.getMetadata().getGlobalMetadata().addTransferEncoding(decryptor.getTag()); RecordWithMetadata<byte[]> serializedRecord = new RecordWithMetadata<byte[]>(decryptedBytes, inputRecord.getMetadata()); return Collections.singleton(serializedRecord); } catch (Exception e) { throw new DataConversionException(e); } } }
3,644
0
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/main/java/org/apache/gobblin/converter/SerializedRecordToEncryptedSerializedRecordConverterBase.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter; import java.io.ByteArrayOutputStream; import java.io.OutputStream; import java.util.Collections; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.codec.StreamCodec; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.type.RecordWithMetadata; /** * A converter that converts a {@link org.apache.gobblin.type.SerializedRecordWithMetadata} to a {@link org.apache.gobblin.type.SerializedRecordWithMetadata} * where the serialized bytes represent encrypted data. The encryption algorithm used will be appended to the * Transfer-Encoding of the new record. */ @Slf4j public abstract class SerializedRecordToEncryptedSerializedRecordConverterBase extends Converter<String, String, RecordWithMetadata<byte[]>, RecordWithMetadata<byte[]>> { private StreamCodec encryptor; @Override public Converter<String, String, RecordWithMetadata<byte[]>, RecordWithMetadata<byte[]>> init( WorkUnitState workUnit) { super.init(workUnit); encryptor = buildEncryptor(workUnit); return this; } /** * Build the StreamCodec that will be used to encrypt each byte record. Must be provided by concrete * implementations of this class. */ protected abstract StreamCodec buildEncryptor(WorkUnitState config); @Override public String convertSchema(String inputSchema, WorkUnitState workUnit) throws SchemaConversionException { return ""; } @Override public Iterable<RecordWithMetadata<byte[]>> convertRecord(String outputSchema, RecordWithMetadata<byte[]> inputRecord, WorkUnitState workUnit) throws DataConversionException { try { ByteArrayOutputStream bOs = new ByteArrayOutputStream(); try (OutputStream encryptedStream = encryptor.encodeOutputStream(bOs)) { encryptedStream.write(inputRecord.getRecord()); } inputRecord.getMetadata().getGlobalMetadata().addTransferEncoding(encryptor.getTag()); RecordWithMetadata<byte[]> serializedRecord = new RecordWithMetadata<byte[]>(bOs.toByteArray(), inputRecord.getMetadata()); return Collections.singleton(serializedRecord); } catch (Exception e) { throw new DataConversionException(e); } } }
3,645
0
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/jmh/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-crypto/src/jmh/java/org/apache/gobblin/crypto/EncodingBenchmark.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.crypto; import com.google.common.collect.ImmutableMap; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.OutputStream; import java.security.SecureRandom; import java.util.Map; import java.util.Random; import javax.crypto.Cipher; import javax.crypto.CipherOutputStream; import javax.crypto.SecretKey; import javax.crypto.spec.SecretKeySpec; import org.apache.commons.codec.binary.Base64OutputStream; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.Fork; import org.openjdk.jmh.annotations.Scope; import org.openjdk.jmh.annotations.Setup; import org.openjdk.jmh.annotations.State; /** * Benchmarks around the RotatingAESEncoder algorithm. * * It turns out after running some of these that Base64 encoding of the output stream also incurs a large * performance cost, so there are benchmarks in here to test the efficacy of the algorithm with a few Base64 * encoder providers too. */ @Fork(3) public class EncodingBenchmark { @State(value = Scope.Benchmark) public static class EncodingBenchmarkState { public byte[] OneKBytes; public SimpleCredentialStore credStore; @Setup public void setup() throws Exception { Random r = new Random(); OneKBytes = new byte[1024]; credStore = new SimpleCredentialStore(); r.nextBytes(OneKBytes); } } @Benchmark public byte[] write1KRecordsNewBase64(EncodingBenchmarkState state) throws IOException { ByteArrayOutputStream sink = new ByteArrayOutputStream(); OutputStream os = new RotatingAESCodec(state.credStore).encodeOutputStream(sink); os.write(state.OneKBytes); os.close(); return sink.toByteArray(); } @Benchmark public byte[] write1KRecordsBase64Only(EncodingBenchmarkState state) throws IOException { ByteArrayOutputStream sink = new ByteArrayOutputStream(); OutputStream os = new Base64OutputStream(sink); os.write(state.OneKBytes); os.close(); return sink.toByteArray(); } @Benchmark public byte[] write1KRecordsDirectCipherStream(EncodingBenchmarkState state) throws Exception { Cipher cipher = Cipher.getInstance("AES/CBC/PKCS5Padding"); cipher.init(Cipher.ENCRYPT_MODE, state.credStore.getKey()); ByteArrayOutputStream sink = new ByteArrayOutputStream(); OutputStream os = new CipherOutputStream(sink, cipher); os.write(state.OneKBytes); os.close(); return sink.toByteArray(); } static class SimpleCredentialStore implements CredentialStore { private final SecretKey key; private final byte[] keyEncoded; public SimpleCredentialStore() { SecureRandom r = new SecureRandom(); byte[] keyBytes = new byte[16]; r.nextBytes(keyBytes); key = new SecretKeySpec(keyBytes, "AES"); keyEncoded = key.getEncoded(); } @Override public byte[] getEncodedKey(String id) { if (id.equals("1")) { return keyEncoded; } return null; } @Override public Map<String, byte[]> getAllEncodedKeys() { return ImmutableMap.of("1", keyEncoded); } public SecretKey getKey() { return key; } } }
3,646
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/ingestion/google/webmaster/GoogleWebmasterExtractorIteratorTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.io.IOException; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.commons.collections.CollectionUtils; import org.mockito.ArgumentMatcher; import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.Test; import com.google.api.services.webmasters.model.ApiDimensionFilter; import org.apache.gobblin.configuration.WorkUnitState; import static org.mockito.Mockito.argThat; import static org.mockito.Mockito.eq; class CollectionEquals implements ArgumentMatcher<Collection> { private final Collection _expected; public CollectionEquals(Collection expected) { _expected = expected; } @Override public boolean matches(Collection actual) { return CollectionUtils.isEqualCollection(actual, _expected); } } @Test(groups = {"gobblin.source.extractor.extract.google.webmaster"}) public class GoogleWebmasterExtractorIteratorTest { String siteProperty = "https://www.abc.com/"; /** * Test the GoogleWebmasterExtractorIterator to make sure that it first gets all pages based on the filters * and then for each page, it asks for the queries. * @throws IOException */ @Test public void testIterator() throws IOException { GoogleWebmasterDataFetcher client = Mockito.mock(GoogleWebmasterDataFetcher.class); String country = "USA"; String date = "2016-11-01"; ArrayList<GoogleWebmasterFilter.Dimension> requestedDimensions = new ArrayList<>(); ArrayList<GoogleWebmasterDataFetcher.Metric> requestedMetrics = new ArrayList<>(); ArrayDeque<ProducerJob> allJobs = new ArrayDeque<>(); String page1 = siteProperty + "a/1"; String page2 = siteProperty + "b/1"; allJobs.add(new SimpleProducerJob(page1, date, date)); allJobs.add(new SimpleProducerJob(page2, date, date)); Mockito.when(client.getAllPages(eq(date), eq(date), eq(country), eq(GoogleWebmasterClient.API_ROW_LIMIT))) .thenReturn(allJobs); //Set performSearchAnalyticsQuery Mock1 String[] a1 = {"r1-c1", "r1-c2"}; List<String[]> results1 = new ArrayList<>(); results1.add(a1); List<ApiDimensionFilter> filters1 = new ArrayList<>(); filters1.add(GoogleWebmasterFilter.countryEqFilter(country)); filters1.add(GoogleWebmasterFilter.pageFilter(GoogleWebmasterFilter.FilterOperator.EQUALS, page1)); Mockito.when(client.performSearchAnalyticsQuery(eq(date), eq(date), eq(GoogleWebmasterClient.API_ROW_LIMIT), eq(requestedDimensions), eq(requestedMetrics), argThat(new CollectionEquals(filters1)))).thenReturn(results1); //Set performSearchAnalyticsQuery Mock2 String[] a2 = {"r2-c1", "r2-c2"}; List<String[]> results2 = new ArrayList<>(); results2.add(a2); List<ApiDimensionFilter> filters2 = new ArrayList<>(); filters2.add(GoogleWebmasterFilter.countryEqFilter(country)); filters2.add(GoogleWebmasterFilter.pageFilter(GoogleWebmasterFilter.FilterOperator.EQUALS, page2)); Mockito.when(client .performSearchAnalyticsQuery(eq(date), eq(date), eq(5000), eq(requestedDimensions), eq(requestedMetrics), argThat(new CollectionEquals(filters2)))).thenReturn(results2); Map<GoogleWebmasterFilter.Dimension, ApiDimensionFilter> map = new HashMap<>(); map.put(GoogleWebmasterFilter.Dimension.COUNTRY, GoogleWebmasterFilter.countryEqFilter(country)); WorkUnitState defaultState = GoogleWebmasterExtractorTest.getWorkUnitState1(); defaultState.setProp(GoogleWebMasterSource.KEY_QUERIES_TUNING_BATCH_SIZE, 1); GoogleWebmasterExtractorIterator iterator = new GoogleWebmasterExtractorIterator(client, date, date, requestedDimensions, requestedMetrics, map, defaultState); List<String[]> response = new ArrayList<>(); response.add(iterator.next()); response.add(iterator.next()); Assert.assertTrue(!iterator.hasNext()); Assert.assertTrue(response.contains(a1)); Assert.assertTrue(response.contains(a2)); Mockito.verify(client, Mockito.times(1)).getAllPages(eq(date), eq(date), eq(country), eq(5000)); Mockito.verify(client, Mockito.times(1)) .performSearchAnalyticsQuery(eq(date), eq(date), eq(5000), eq(requestedDimensions), eq(requestedMetrics), argThat(new CollectionEquals(filters1))); Mockito.verify(client, Mockito.times(1)) .performSearchAnalyticsQuery(eq(date), eq(date), eq(5000), eq(requestedDimensions), eq(requestedMetrics), argThat(new CollectionEquals(filters2))); } }
3,647
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/ingestion/google/webmaster/SimpleProducerJobTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.util.List; import org.testng.Assert; import org.testng.annotations.Test; @Test(groups = {"gobblin.source.extractor.extract.google.webmaster"}) public class SimpleProducerJobTest { @Test public void testNotDivisibleJobs() { ProducerJob job1 = new SimpleProducerJob("p1", "2016-11-22", "2016-11-22"); Assert.assertTrue(job1.partitionJobs().isEmpty()); ProducerJob job2 = new SimpleProducerJob("p1", "2016-11-23", "2016-11-22"); Assert.assertTrue(job2.partitionJobs().isEmpty()); } @Test public void testDivisibleJobs1() { ProducerJob job3 = new SimpleProducerJob("p1", "2016-11-22", "2016-11-23"); List<? extends ProducerJob> divides = job3.partitionJobs(); Assert.assertEquals(divides.size(), 2); Assert.assertEquals(new SimpleProducerJob("p1", "2016-11-22", "2016-11-22"), divides.get(0)); Assert.assertEquals(new SimpleProducerJob("p1", "2016-11-23", "2016-11-23"), divides.get(1)); } @Test public void testDivisibleJobs2() { ProducerJob job3 = new SimpleProducerJob("p1", "2016-11-22", "2016-11-24"); List<? extends ProducerJob> divides = job3.partitionJobs(); Assert.assertEquals(divides.size(), 2); Assert.assertEquals(new SimpleProducerJob("p1", "2016-11-22", "2016-11-23"), divides.get(0)); Assert.assertEquals(new SimpleProducerJob("p1", "2016-11-24", "2016-11-24"), divides.get(1)); } }
3,648
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/ingestion/google/webmaster/UrlTrieTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.util.ArrayList; import java.util.Arrays; import org.testng.Assert; import org.testng.annotations.Test; @Test(groups = {"gobblin.source.extractor.extract.google.webmaster"}) public class UrlTrieTest { @Test public void testTrieRoot1() { UrlTrie trie = new UrlTrie("", new ArrayList<String>()); UrlTrieNode root = trie.getRoot(); Assert.assertTrue(root.getValue() == null); //Assert.assertTrue(root.getParent() == null); } @Test public void testTrieRoot2() { UrlTrie trie = new UrlTrie(null, new ArrayList<String>()); UrlTrieNode root = trie.getRoot(); Assert.assertTrue(root.getValue() == null); //Assert.assertTrue(root.getParent() == null); } @Test public void testTrieRoot3() { UrlTrie trie = new UrlTrie("www.linkedin.com/", new ArrayList<String>()); UrlTrieNode root = trie.getRoot(); Assert.assertTrue(root.getValue().equals('/')); Assert.assertEquals(0, root.getSize()); //Assert.assertTrue(root.getParent() == null); } @Test public void testParent() { UrlTrie trie = new UrlTrie("www.linkedin.com/", Arrays.asList("www.linkedin.com/in/")); UrlTrieNode root = trie.getRoot(); Assert.assertEquals(1, root.getSize()); UrlTrieNode child = root.getChild("in/"); Assert.assertEquals(1, child.getSize()); //Assert.assertEquals(root, child.getParent().getParent().getParent()); } @Test public void testSiblings() { UrlTrie trie = new UrlTrie("https://www.linkedin.com/", Arrays.asList("https://www.linkedin.com/a", "https://www.linkedin.com/b")); UrlTrieNode root = trie.getRoot(); //Assert.assertEquals(root.nextSibling(), null); UrlTrieNode bNode = root.getChild("b"); //Assert.assertEquals(root.getChild("a").nextSibling(), bNode); //Assert.assertEquals(bNode.nextSibling(), null); } @Test public void testTrieFlat() { UrlTrie trie = new UrlTrie("https://www.linkedin.com/", Arrays.asList("https://www.linkedin.com/jobs/", "https://www.linkedin.com/in/")); UrlTrieNode root = trie.getRoot(); Assert.assertTrue(root.getValue().equals('/')); Assert.assertEquals(2, root.children.size()); Assert.assertFalse(root.isExist()); Assert.assertEquals(2, root.getSize()); // Path1 String path1 = "jobs/"; checkEmptyPath(trie, path1, 1); UrlTrieNode jobNode = trie.getChild("jobs/"); Assert.assertTrue(jobNode.getValue().equals('/')); Assert.assertEquals(1, jobNode.getSize()); Assert.assertTrue(jobNode.isExist()); // Path2 String path2 = "in/"; checkEmptyPath(trie, path2, 1); UrlTrieNode inNode = trie.getChild("in/"); Assert.assertTrue(inNode.getValue().equals('/')); Assert.assertEquals(1, inNode.getSize()); Assert.assertTrue(inNode.isExist()); } @Test public void testDuplicate() { UrlTrie trie = new UrlTrie("https://www.linkedin.com/", Arrays.asList("https://www.linkedin.com/", "https://www.linkedin.com/", "https://www.linkedin.com/in/")); UrlTrieNode root = trie.getRoot(); Assert.assertTrue(root.getValue().equals('/')); Assert.assertEquals(1, root.children.size()); Assert.assertTrue(root.isExist()); Assert.assertEquals(3, root.getSize()); // Path1 String path1 = "in/"; checkEmptyPath(trie, path1, 1); UrlTrieNode inNode = trie.getChild("in/"); Assert.assertTrue(inNode.getValue().equals('/')); Assert.assertEquals(1, inNode.getSize()); Assert.assertTrue(inNode.isExist()); } @Test public void testTrieVertical() { UrlTrie trie = new UrlTrie("https://www.linkedin.com/", Arrays.asList("https://www.linkedin.com/", "https://www.linkedin.com/in/", "https://www.linkedin.com/in/chenguo")); UrlTrieNode root = trie.getRoot(); Assert.assertTrue(root.getValue().equals('/')); Assert.assertEquals(1, root.children.size()); Assert.assertTrue(root.isExist()); Assert.assertEquals(3, root.getSize()); // Path1 String path1 = "in/"; checkEmptyPath(trie, path1, 2); UrlTrieNode inNode = trie.getChild("in/"); Assert.assertTrue(inNode.getValue().equals('/')); Assert.assertEquals(2, inNode.getSize()); Assert.assertTrue(inNode.isExist()); UrlTrieNode chenguo = inNode.getChild("chenguo"); Assert.assertEquals(root.getChild("in/chenguo"), chenguo); Assert.assertTrue(chenguo.getValue().equals('o')); Assert.assertEquals(1, chenguo.getSize()); Assert.assertTrue(chenguo.isExist()); } private void checkEmptyPath(UrlTrie trie, String path, int pathChildrenCount) { for (int i = 1; i < path.length(); ++i) { UrlTrieNode node = trie.getChild(path.substring(0, i)); Assert.assertTrue(node.getValue().equals(path.charAt(i - 1))); Assert.assertEquals(pathChildrenCount, node.getSize()); Assert.assertFalse(node.isExist()); } } }
3,649
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/ingestion/google/webmaster/UrlTriePrefixGrouperTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import org.apache.commons.lang3.tuple.Triple; import org.testng.Assert; import org.testng.annotations.Test; import org.apache.gobblin.ingestion.google.webmaster.GoogleWebmasterFilter.FilterOperator; @Test(groups = {"gobblin.source.extractor.extract.google.webmaster"}) public class UrlTriePrefixGrouperTest { private String _property = "www.linkedin.com/"; /** * The trie is: * / * 0 * 1* 2* */ @Test public void testGrouping1() { UrlTrie trie = new UrlTrie(_property, Arrays.asList(_property + "01", _property + "02")); UrlTriePrefixGrouper grouper = new UrlTriePrefixGrouper(trie, 1); ArrayList<String> chars = new ArrayList<>(); ArrayList<FilterOperator> operators = new ArrayList<>(); while (grouper.hasNext()) { Triple<String, FilterOperator, UrlTrieNode> group = grouper.next(); chars.add(group.getLeft()); operators.add(group.getMiddle()); } Assert.assertEquals(new String[]{_property + "01", _property + "02"}, chars.toArray()); Assert.assertEquals(new FilterOperator[]{FilterOperator.CONTAINS, FilterOperator.CONTAINS}, operators.toArray()); } /** * The trie is: * / * 0* * 1* 2* */ @Test public void testGrouping2() { UrlTrie trie = new UrlTrie(_property, Arrays.asList(_property + "0", _property + "01", _property + "02")); UrlTriePrefixGrouper grouper = new UrlTriePrefixGrouper(trie, 1); ArrayList<String> chars = new ArrayList<>(); ArrayList<FilterOperator> operators = new ArrayList<>(); while (grouper.hasNext()) { Triple<String, FilterOperator, UrlTrieNode> group = grouper.next(); chars.add(group.getLeft()); operators.add(group.getMiddle()); } Assert.assertEquals(new String[]{_property + "01", _property + "02", _property + "0"}, chars.toArray()); Assert.assertEquals(new FilterOperator[]{FilterOperator.CONTAINS, FilterOperator.CONTAINS, FilterOperator.EQUALS}, operators.toArray()); } /** * The trie is: * / * 0 1 2 * 3 4 5 6 * 7 */ @Test public void testTrie2GroupingWithSize3() { UrlTrie trie = UrlTriePostOrderIteratorTest.getUrlTrie2(_property); UrlTriePrefixGrouper grouper = new UrlTriePrefixGrouper(trie, 3); ArrayList<String> chars = new ArrayList<>(); ArrayList<FilterOperator> operators = new ArrayList<>(); Triple<String, FilterOperator, UrlTrieNode> group = null; while (grouper.hasNext()) { group = grouper.next(); chars.add(group.getLeft()); operators.add(group.getMiddle()); } Assert.assertEquals( new String[]{_property + "0", _property + "1", _property + "25", _property + "26", _property + "2"}, chars.toArray()); Assert.assertEquals( new FilterOperator[]{FilterOperator.CONTAINS, FilterOperator.CONTAINS, FilterOperator.CONTAINS, FilterOperator.CONTAINS, FilterOperator.EQUALS}, operators.toArray()); //The group is at www.linkedin.com/2 in the end with operator EQUALS ArrayList<String> pages = UrlTriePrefixGrouper.groupToPages(group); Assert.assertEquals(pages.toArray(), new String[]{_property + "2"}); } @Test public void testGroupToPagesWithContainsOperator() { List<String> pages = Arrays.asList(_property + "13", _property + "14"); UrlTrie trie = new UrlTrie(_property, pages); ArrayList<String> actual = UrlTriePrefixGrouper.groupToPages(Triple.of(_property, FilterOperator.CONTAINS, trie.getRoot())); Assert.assertEquals(actual.toArray(), pages.toArray()); } @Test public void testGroupToPagesWithContainsOperator2() { List<String> pages = Arrays.asList(_property + "13", _property + "14", _property + "1", _property + "1"); UrlTrie trie = new UrlTrie(_property, pages); ArrayList<String> actual = UrlTriePrefixGrouper.groupToPages(Triple.of(_property, FilterOperator.CONTAINS, trie.getRoot())); Assert.assertEquals(actual.toArray(), new String[]{_property + "13", _property + "14", _property + "1"}); } @Test public void testGroupToPagesWithEqualsOperator() { List<String> pages = Arrays.asList(_property + "13", _property + "14"); UrlTrie trie1 = new UrlTrie(_property, pages); ArrayList<String> actual1 = UrlTriePrefixGrouper.groupToPages(Triple.of(_property, FilterOperator.EQUALS, trie1.getRoot())); Assert.assertEquals(actual1.size(), 0); List<String> pagesWithRoot = new ArrayList<>(); pagesWithRoot.addAll(pages); pagesWithRoot.add(_property); UrlTrie trie2 = new UrlTrie(_property, pagesWithRoot); ArrayList<String> actual2 = UrlTriePrefixGrouper.groupToPages(Triple.of(_property, FilterOperator.EQUALS, trie2.getRoot())); Assert.assertEquals(actual2.toArray(), new String[]{_property}); } @Test public void testWhenTrieSizeLessThanGroupSize1() { List<String> pages = Arrays.asList(_property + "13"); UrlTrie trie1 = new UrlTrie(_property, pages); UrlTriePrefixGrouper grouper = new UrlTriePrefixGrouper(trie1, 1); Triple<String, FilterOperator, UrlTrieNode> next = grouper.next(); Assert.assertEquals(next.getLeft(), _property); Assert.assertEquals(next.getMiddle(), FilterOperator.CONTAINS); Assert.assertEquals(next.getRight().getValue(), Character.valueOf('/')); Assert.assertFalse(next.getRight().isExist()); Assert.assertFalse(grouper.hasNext()); } @Test public void testWhenTrieSizeLessThanGroupSize2() { List<String> pages = Arrays.asList(_property + "13"); UrlTrie trie1 = new UrlTrie(_property, pages); UrlTriePrefixGrouper grouper = new UrlTriePrefixGrouper(trie1, 2); Triple<String, FilterOperator, UrlTrieNode> next = grouper.next(); Assert.assertEquals(next.getLeft(), _property); Assert.assertEquals(next.getMiddle(), FilterOperator.CONTAINS); Assert.assertEquals(next.getRight().getValue(), Character.valueOf('/')); Assert.assertFalse(next.getRight().isExist()); Assert.assertFalse(grouper.hasNext()); } // @Test // public void fun() throws FileNotFoundException { // UrlTrie trie = new UrlTrie("https://" + _property, new ArrayList<String>()); // FileReader fileReader = new FileReader(new File("/Users/chguo/projects/seo/src/main/java/test/output2.txt")); // try (BufferedReader br = new BufferedReader(fileReader)) { // String line; // while ((line = br.readLine()) != null) { // trie.add(line); // } // } catch (IOException e) { // e.printStackTrace(); // } // // UrlTriePrefixGrouper _grouper = new UrlTriePrefixGrouper(trie, 3); //// ArrayList<String> chars = new ArrayList<>(); //// ArrayList<FilterOperator> operators = new ArrayList<>(); // while (_grouper.hasNext()) { // Triple<String, FilterOperator, UrlTrieNode> group = _grouper.next(); // System.out.println(group.getLeft() + " " + group.getMiddle() + " " + group.getRight().getSize()); // } // } }
3,650
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/ingestion/google/webmaster/ProducerJobTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.util.ArrayList; import java.util.List; import org.testng.Assert; import org.testng.annotations.Test; @Test(groups = {"gobblin.source.extractor.extract.google.webmaster"}) public class ProducerJobTest { @Test public void testSerializationWithJobsList() { ProducerJob job1 = new SimpleProducerJob("p1", "2016-11-22", "2016-11-22"); ProducerJob job2 = new SimpleProducerJob("p2", "2016-11-23", "2016-11-23"); ArrayList<ProducerJob> jobs = new ArrayList<>(); jobs.add(job1); jobs.add(job2); String json = ProducerJob.serialize(jobs); //System.out.println(json); List<ProducerJob> deserialized = SimpleProducerJob.deserialize(json); Assert.assertEquals(job1, deserialized.get(0)); Assert.assertEquals(job2, deserialized.get(1)); } @Test public void testSerializationWithEmptyList() { ArrayList<ProducerJob> jobs = new ArrayList<>(); String json = ProducerJob.serialize(jobs); List<ProducerJob> deserialized = SimpleProducerJob.deserialize(json); Assert.assertTrue(deserialized.isEmpty()); } @Test public void testSerializationWithEmptyString() { List<ProducerJob> deserialized = SimpleProducerJob.deserialize(""); Assert.assertTrue(deserialized.isEmpty()); } }
3,651
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/ingestion/google/webmaster/GoogleWebmasterDataFetcherImplTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.List; import org.apache.commons.collections.CollectionUtils; import org.apache.gobblin.configuration.WorkUnitState; import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.Test; import static org.mockito.Mockito.*; @Test(groups = {"gobblin.source.extractor.extract.google.webmaster"}) public class GoogleWebmasterDataFetcherImplTest { private String _property = "https://www.myproperty.com/"; @Test public void testGetAllPagesWhenRequestLessThan5000() throws Exception { GoogleWebmasterClient client = Mockito.mock(GoogleWebmasterClient.class); List<String> retVal = Arrays.asList("abc", "def"); Mockito.when(client.getPages(eq(_property), any(), any(), eq("ALL"), any(Integer.class), any(List.class), any(List.class), eq(0))).thenReturn(retVal); WorkUnitState workUnitState = new WorkUnitState(); workUnitState.setProp(GoogleWebMasterSource.KEY_PROPERTY, _property); GoogleWebmasterDataFetcher dataFetcher = new GoogleWebmasterDataFetcherImpl(_property, client, workUnitState); Collection<ProducerJob> allPages = dataFetcher.getAllPages(null, null, "ALL", 2); List<String> pageStrings = new ArrayList<>(); for (ProducerJob page : allPages) { pageStrings.add(page.getPage()); } Assert.assertTrue(CollectionUtils.isEqualCollection(retVal, pageStrings)); Mockito.verify(client, Mockito.times(1)) .getPages(eq(_property), any(), any(), eq("ALL"), any(Integer.class), any(List.class), any(List.class), eq(0)); } @Test public void testGetAllPagesWhenDataSizeLessThan5000AndRequestAll() throws Exception { GoogleWebmasterClient client = Mockito.mock(GoogleWebmasterClient.class); List<String> allPages = new ArrayList<>(); for (int i = 0; i < 10; ++i) { allPages.add(Integer.toString(i)); } Mockito.when(client.getPages(eq(_property), any(), any(), eq("ALL"), any(Integer.class), any(List.class), any(List.class), eq(0))).thenReturn(allPages); WorkUnitState workUnitState = new WorkUnitState(); workUnitState.setProp(GoogleWebMasterSource.KEY_PROPERTY, _property); GoogleWebmasterDataFetcher dataFetcher = new GoogleWebmasterDataFetcherImpl(_property, client, workUnitState); Collection<ProducerJob> response = dataFetcher.getAllPages(null, null, "ALL", 5000); List<String> pageStrings = new ArrayList<>(); for (ProducerJob page : response) { pageStrings.add(page.getPage()); } Assert.assertTrue(CollectionUtils.isEqualCollection(pageStrings, allPages)); Mockito.verify(client, Mockito.times(2)) .getPages(eq(_property), any(), any(), eq("ALL"), any(Integer.class), any(List.class), any(List.class), eq(0)); } @Test public void testGetPageSize1() throws Exception { WorkUnitState workUnitState = new WorkUnitState(); workUnitState.setProp(GoogleWebMasterSource.KEY_PROPERTY, _property); GoogleWebmasterClient client = Mockito.mock(GoogleWebmasterClient.class); List<String> list5000 = new ArrayList<>(); for (int i = 0; i < 5000; ++i) { list5000.add(null); } Mockito.when(client.getPages(any(String.class), any(String.class), any(String.class), any(String.class), eq(GoogleWebmasterClient.API_ROW_LIMIT), any(), any(), eq(0))).thenReturn(list5000); GoogleWebmasterDataFetcherImpl dataFetcher = new GoogleWebmasterDataFetcherImpl(_property, client, workUnitState); Assert.assertEquals(dataFetcher.getPagesSize("start_date", "end_date", "country", null, null), 5000); Mockito.when(client.getPages(any(String.class), any(String.class), any(String.class), any(String.class), eq(GoogleWebmasterClient.API_ROW_LIMIT), any(), any(), eq(5000))).thenReturn(list5000); Assert.assertEquals(dataFetcher.getPagesSize("start_date", "end_date", "country", null, null), 10000); Mockito.when(client.getPages(any(String.class), any(String.class), any(String.class), any(String.class), eq(GoogleWebmasterClient.API_ROW_LIMIT), any(), any(), eq(10000))).thenReturn(list5000); Assert.assertEquals(dataFetcher.getPagesSize("start_date", "end_date", "country", null, null), 15000); } @Test public void testGetPageSize2() throws Exception { WorkUnitState workUnitState = new WorkUnitState(); workUnitState.setProp(GoogleWebMasterSource.KEY_PROPERTY, _property); GoogleWebmasterClient client = Mockito.mock(GoogleWebmasterClient.class); List<String> list2 = new ArrayList<>(); for (int i = 0; i < 2; ++i) { list2.add(null); } Mockito.when(client.getPages(any(String.class), any(String.class), any(String.class), any(String.class), eq(GoogleWebmasterClient.API_ROW_LIMIT), any(), any(), eq(0))).thenReturn(list2); GoogleWebmasterDataFetcherImpl dataFetcher = new GoogleWebmasterDataFetcherImpl(_property, client, workUnitState); int size = dataFetcher.getPagesSize("start_date", "end_date", "country", null, null); Assert.assertEquals(size, 2); } @Test public void testGetPageSize3() throws Exception { WorkUnitState workUnitState = new WorkUnitState(); workUnitState.setProp(GoogleWebMasterSource.KEY_PROPERTY, _property); GoogleWebmasterClient client = Mockito.mock(GoogleWebmasterClient.class); List<String> list5000 = new ArrayList<>(); for (int i = 0; i < 5000; ++i) { list5000.add(null); } Mockito.when(client.getPages(any(String.class), any(String.class), any(String.class), any(String.class), eq(GoogleWebmasterClient.API_ROW_LIMIT), any(), any(), eq(0))).thenReturn(list5000); Mockito.when(client.getPages(any(String.class), any(String.class), any(String.class), any(String.class), eq(GoogleWebmasterClient.API_ROW_LIMIT), any(), any(), eq(5000))).thenReturn(list5000); Mockito.when(client.getPages(any(String.class), any(String.class), any(String.class), any(String.class), eq(GoogleWebmasterClient.API_ROW_LIMIT), any(), any(), eq(10000))).thenReturn(list5000); Mockito.when(client.getPages(any(String.class), any(String.class), any(String.class), any(String.class), eq(GoogleWebmasterClient.API_ROW_LIMIT), any(), any(), eq(15000))).thenReturn(list5000); Mockito.when(client.getPages(any(String.class), any(String.class), any(String.class), any(String.class), eq(GoogleWebmasterClient.API_ROW_LIMIT), any(), any(), eq(20000))).thenReturn(list5000); Mockito.when(client.getPages(any(String.class), any(String.class), any(String.class), any(String.class), eq(GoogleWebmasterClient.API_ROW_LIMIT), any(), any(), eq(25000))).thenReturn(list5000); List<String> list2 = new ArrayList<>(); for (int i = 0; i < 2; ++i) { list2.add(null); } Mockito.when(client.getPages(any(String.class), any(String.class), any(String.class), any(String.class), eq(GoogleWebmasterClient.API_ROW_LIMIT), any(), any(), eq(30000))).thenReturn(list2); GoogleWebmasterDataFetcherImpl dataFetcher = new GoogleWebmasterDataFetcherImpl(_property, client, workUnitState); int size = dataFetcher.getPagesSize("start_date", "end_date", "country", null, null); Assert.assertEquals(size, 30002); } }
3,652
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/ingestion/google/webmaster/GoogleWebmasterExtractorTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.io.IOException; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.Test; import org.apache.gobblin.configuration.State; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.source.extractor.DataRecordException; import org.apache.gobblin.source.extractor.WatermarkInterval; import org.apache.gobblin.source.extractor.extract.LongWatermark; import org.apache.gobblin.source.workunit.Extract; import org.apache.gobblin.source.workunit.WorkUnit; @Test(groups = {"gobblin.source.extractor.extract.google.webmaster"}) public class GoogleWebmasterExtractorTest { /** * Test that positionMaps and iterators are constructed correctly in the constructor */ @Test public void testConstructor() throws IOException, DataRecordException { WorkUnitState wuState = getWorkUnitState1(); wuState.setProp(GoogleWebMasterSource.KEY_REQUEST_FILTERS, "Country.USA,Country.ALL"); List<GoogleWebmasterFilter.Dimension> dimensions = Arrays.asList(GoogleWebmasterFilter.Dimension.PAGE, GoogleWebmasterFilter.Dimension.COUNTRY); List<GoogleWebmasterDataFetcher.Metric> metrics = Arrays.asList(GoogleWebmasterDataFetcher.Metric.CLICKS); Map<String, Integer> positionMap = new HashMap<>(); positionMap.put(GoogleWebmasterDataFetcher.Metric.CLICKS.toString(), 0); positionMap.put(GoogleWebmasterFilter.Dimension.COUNTRY.toString(), 1); positionMap.put(GoogleWebmasterFilter.Dimension.PAGE.toString(), 2); GoogleWebmasterDataFetcher dataFetcher1 = Mockito.mock(GoogleWebmasterDataFetcher.class); GoogleWebmasterDataFetcher dataFetcher2 = Mockito.mock(GoogleWebmasterDataFetcher.class); GoogleWebmasterExtractor extractor = new GoogleWebmasterExtractor(wuState, wuState.getWorkunit().getLowWatermark(LongWatermark.class).getValue(), wuState.getWorkunit().getExpectedHighWatermark(LongWatermark.class).getValue(), positionMap, dimensions, metrics, null, Arrays.asList(dataFetcher1, dataFetcher2)); List<GoogleWebmasterExtractorIterator> iterators = extractor.getIterators(); Assert.assertEquals(iterators.size(), 4); Assert.assertEquals(iterators.get(0).getCountry(), "USA"); Assert.assertEquals(iterators.get(1).getCountry(), "ALL"); Assert.assertEquals(iterators.get(2).getCountry(), "USA"); Assert.assertEquals(iterators.get(3).getCountry(), "ALL"); List<int[]> responseToOutputSchema = extractor.getPositionMaps(); Assert.assertEquals(responseToOutputSchema.size(), 4); Assert.assertEquals(new int[]{2, 1, 0}, responseToOutputSchema.get(0)); //country is Country.USA Assert.assertEquals(new int[]{2, 0}, responseToOutputSchema.get(1)); //country is Country.ALL, so the country request will be removed. Assert.assertEquals(new int[]{2, 1, 0}, responseToOutputSchema.get(2)); Assert.assertEquals(new int[]{2, 0}, responseToOutputSchema.get(3)); } public static WorkUnitState getWorkUnitState1() { WorkUnit wu = new WorkUnit(new Extract(Extract.TableType.APPEND_ONLY, "namespace", "table")); wu.setWatermarkInterval( new WatermarkInterval(new LongWatermark(20160101235959L), new LongWatermark(20160102235959L))); State js = new State(); return new WorkUnitState(wu, js); } }
3,653
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/ingestion/google/webmaster/TrieBasedProducerJobTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.util.List; import org.apache.commons.lang3.tuple.Triple; import org.testng.Assert; import org.testng.annotations.Test; @Test(groups = {"gobblin.source.extractor.extract.google.webmaster"}) public class TrieBasedProducerJobTest { private String _property = "www.linkedin.com/"; @Test public void testPartitionJobs() throws Exception { UrlTrie trie = UrlTriePostOrderIteratorTest.getUrlTrie2(_property); UrlTriePrefixGrouper grouper = new UrlTriePrefixGrouper(trie, 4); String startDate = "2016-11-29"; String endDate = "2016-11-30"; Triple<String, GoogleWebmasterFilter.FilterOperator, UrlTrieNode> node0 = grouper.next(); TrieBasedProducerJob job0 = new TrieBasedProducerJob(startDate, endDate, node0, 100); Assert.assertEquals(job0.getPage(), _property + "0"); Assert.assertEquals(job0.getOperator(), GoogleWebmasterFilter.FilterOperator.CONTAINS); Assert.assertEquals(job0.getStartDate(), startDate); Assert.assertEquals(job0.getEndDate(), endDate); Assert.assertEquals(job0.getPagesSize(), 3); List<? extends ProducerJob> granularJobs = job0.partitionJobs(); Assert.assertEquals(granularJobs.size(), 3); ProducerJob job03 = granularJobs.get(0); Assert.assertEquals(job03.getPage(), _property + "03"); Assert.assertEquals(job03.getOperator(), GoogleWebmasterFilter.FilterOperator.CONTAINS); Assert.assertEquals(((TrieBasedProducerJob) job03).getGroupSize(), 2); List<? extends ProducerJob> job03Dates = job03.partitionJobs(); Assert.assertEquals(job03Dates.size(), 2); Assert.assertEquals(job03Dates.get(0), new SimpleProducerJob(_property + "03", startDate, startDate)); Assert.assertEquals(job03Dates.get(1), new SimpleProducerJob(_property + "03", endDate, endDate)); ProducerJob job04 = granularJobs.get(1); Assert.assertEquals(job04.getPage(), _property + "04"); Assert.assertEquals(job04.getOperator(), GoogleWebmasterFilter.FilterOperator.CONTAINS); Assert.assertEquals(((TrieBasedProducerJob) job04).getGroupSize(), 2); List<? extends ProducerJob> job04Dates = job04.partitionJobs(); Assert.assertEquals(job04Dates.size(), 2); Assert.assertEquals(job04Dates.get(0), new SimpleProducerJob(_property + "04", startDate, startDate)); Assert.assertEquals(job04Dates.get(1), new SimpleProducerJob(_property + "04", endDate, endDate)); ProducerJob job0Only = granularJobs.get(2); Assert.assertEquals(job0Only.getPage(), _property + "0"); Assert.assertEquals(job0Only.getOperator(), GoogleWebmasterFilter.FilterOperator.EQUALS); Assert.assertEquals(((TrieBasedProducerJob) job0Only).getGroupSize(), 2); List<? extends ProducerJob> job0OnlyDates = job0Only.partitionJobs(); Assert.assertEquals(job0OnlyDates.size(), 2); Assert.assertEquals(job0OnlyDates.get(0), new SimpleProducerJob(_property + "0", startDate, startDate)); Assert.assertEquals(job0OnlyDates.get(1), new SimpleProducerJob(_property + "0", endDate, endDate)); Triple<String, GoogleWebmasterFilter.FilterOperator, UrlTrieNode> node1 = grouper.next(); TrieBasedProducerJob job1 = new TrieBasedProducerJob(startDate, endDate, node1, grouper.getGroupSize()); Assert.assertEquals(job1.getPage(), _property + "1"); Assert.assertEquals(job1.getOperator(), GoogleWebmasterFilter.FilterOperator.CONTAINS); Assert.assertEquals(job1.getStartDate(), startDate); Assert.assertEquals(job1.getEndDate(), endDate); Assert.assertEquals(job1.getPagesSize(), 1); Assert.assertEquals(job1.partitionJobs().size(), 2); Triple<String, GoogleWebmasterFilter.FilterOperator, UrlTrieNode> node2 = grouper.next(); TrieBasedProducerJob job2 = new TrieBasedProducerJob(startDate, endDate, node2, grouper.getGroupSize()); Assert.assertEquals(job2.getPage(), _property + "2"); Assert.assertEquals(job2.getOperator(), GoogleWebmasterFilter.FilterOperator.CONTAINS); Assert.assertEquals(job2.getStartDate(), startDate); Assert.assertEquals(job2.getEndDate(), endDate); Assert.assertEquals(job2.getPagesSize(), 4); List<? extends ProducerJob> job2Partitions = job2.partitionJobs(); Assert.assertEquals(job2Partitions.size(), 3); ProducerJob job5 = job2Partitions.get(0); Assert.assertEquals(job5.getPage(), _property + "25"); Assert.assertEquals(job5.getOperator(), GoogleWebmasterFilter.FilterOperator.CONTAINS); Assert.assertEquals(job5.getStartDate(), startDate); Assert.assertEquals(job5.getEndDate(), endDate); Assert.assertEquals(job5.getPagesSize(), 2); Assert.assertEquals(job5.partitionJobs().size(), 2); ProducerJob job6 = job2Partitions.get(1); Assert.assertEquals(job6.getPage(), _property + "26"); Assert.assertEquals(job6.getOperator(), GoogleWebmasterFilter.FilterOperator.CONTAINS); Assert.assertEquals(job6.getStartDate(), startDate); Assert.assertEquals(job6.getEndDate(), endDate); Assert.assertEquals(job6.getPagesSize(), 1); ProducerJob job2Only = job2Partitions.get(2); Assert.assertEquals(job2Only.getPage(), _property + "2"); Assert.assertEquals(job2Only.getOperator(), GoogleWebmasterFilter.FilterOperator.EQUALS); Assert.assertEquals(job2Only.getStartDate(), startDate); Assert.assertEquals(job2Only.getEndDate(), endDate); Assert.assertEquals(job2Only.getPagesSize(), 1); Assert.assertFalse(grouper.hasNext()); } }
3,654
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/ingestion/google/webmaster/UrlTriePostOrderIteratorTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.util.ArrayList; import java.util.Arrays; import org.apache.commons.lang3.tuple.Pair; import org.testng.Assert; import org.testng.annotations.Test; @Test(groups = {"gobblin.source.extractor.extract.google.webmaster"}) public class UrlTriePostOrderIteratorTest { private String _property = "www.linkedin.com/"; @Test public void testEmptyTrie1WithSize1() { UrlTrie trie = new UrlTrie("", new ArrayList<String>()); UrlTriePostOrderIterator iterator = new UrlTriePostOrderIterator(trie, 1); Assert.assertFalse(iterator.hasNext()); } @Test public void testEmptyTrie2WithSize1() { UrlTrie trie = new UrlTrie(_property, new ArrayList<String>()); UrlTriePostOrderIterator iterator = new UrlTriePostOrderIterator(trie, 1); Assert.assertFalse(iterator.hasNext()); } /** * The trie is: * / * 0 * 1 * 2 */ @Test public void testVerticalTrie1TraversalWithSize1() { UrlTrie trie = new UrlTrie(_property, Arrays.asList(_property + "0", _property + "01", _property + "012")); UrlTriePostOrderIterator iterator = new UrlTriePostOrderIterator(trie, 1); ArrayList<String> chars = new ArrayList<>(); while (iterator.hasNext()) { Pair<String, UrlTrieNode> next = iterator.next(); chars.add(next.getLeft()); } Assert.assertEquals(new String[]{_property + "012", _property + "01", _property + "0", _property}, chars.toArray()); } /** * The trie is: * / * 0 * 1 * 2 */ @Test public void testVerticalTrie1TraversalWithSize2() { UrlTrie trie = new UrlTrie(_property, Arrays.asList(_property + "0", _property + "01", _property + "012")); UrlTriePostOrderIterator iterator = new UrlTriePostOrderIterator(trie, 2); ArrayList<String> chars = new ArrayList<>(); while (iterator.hasNext()) { Pair<String, UrlTrieNode> next = iterator.next(); chars.add(next.getLeft()); } Assert.assertEquals(new String[]{_property + "01", _property + "0", _property}, chars.toArray()); } /** * The trie is: * / * 0 * 1 * 2 */ @Test public void testVerticalTrie1TraversalWithSize3() { UrlTrie trie = new UrlTrie(_property, Arrays.asList(_property + "0", _property + "01", _property + "012")); UrlTriePostOrderIterator iterator = new UrlTriePostOrderIterator(trie, 3); ArrayList<String> chars = new ArrayList<>(); while (iterator.hasNext()) { Pair<String, UrlTrieNode> next = iterator.next(); chars.add(next.getLeft()); } //the root node is a leaf node Assert.assertEquals(new String[]{_property}, chars.toArray()); } /** * The trie is: * / * 0 * 1 * 2 */ @Test public void testVerticalTrie1TraversalWithSize4() { UrlTrie trie = new UrlTrie(_property, Arrays.asList(_property + "0", _property + "01", _property + "012")); UrlTriePostOrderIterator iterator = new UrlTriePostOrderIterator(trie, 4); ArrayList<String> chars = new ArrayList<>(); while (iterator.hasNext()) { Pair<String, UrlTrieNode> next = iterator.next(); chars.add(next.getLeft()); } //the root node is a leaf node Assert.assertEquals(new String[]{_property}, chars.toArray()); } @Test public void testTrie1TraversalWithSize1() { UrlTrie trie = getUrlTrie1(_property); UrlTriePostOrderIterator iterator = new UrlTriePostOrderIterator(trie, 1); ArrayList<String> chars = new ArrayList<>(); while (iterator.hasNext()) { Pair<String, UrlTrieNode> next = iterator.next(); chars.add(next.getLeft()); } Assert.assertEquals(new String[]{_property + "0", _property + "13", _property + "14", _property + "1", _property}, chars.toArray()); } @Test public void testTrie2TraversalWithSize1() { UrlTrie trie = getUrlTrie2(_property); UrlTriePostOrderIterator iterator = new UrlTriePostOrderIterator(trie, 1); ArrayList<String> chars = new ArrayList<>(); while (iterator.hasNext()) { Pair<String, UrlTrieNode> next = iterator.next(); chars.add(next.getLeft()); } Assert.assertEquals(new String[]{ _property + "03", _property + "04", _property + "0", _property + "1", _property + "257", _property + "25", _property + "26", _property + "2", _property}, chars.toArray()); } @Test public void testTrie2TraversalWithSize2() { UrlTrie trie = getUrlTrie2(_property); UrlTriePostOrderIterator iterator = new UrlTriePostOrderIterator(trie, 2); ArrayList<String> chars = new ArrayList<>(); while (iterator.hasNext()) { Pair<String, UrlTrieNode> next = iterator.next(); chars.add(next.getLeft()); } Assert.assertEquals(new String[]{// _property + "03", //group size 1, contains _property + "04", //group size 1, contains _property + "0", //group size 1(count is 3), equals _property + "1", //group size 1, contains _property + "25", //group size 2, contains _property + "26", //group size 1, contains _property + "2", //group size 1(count is 4), equals _property //group size 1(count is 9), equals }, chars.toArray()); } @Test public void testTrie2TraversalWithSize3() { UrlTrie trie = getUrlTrie2(_property); UrlTriePostOrderIterator iterator = new UrlTriePostOrderIterator(trie, 3); ArrayList<String> chars = new ArrayList<>(); while (iterator.hasNext()) { Pair<String, UrlTrieNode> next = iterator.next(); chars.add(next.getLeft()); } Assert.assertEquals(new String[]{// _property + "0", //group size 3, contains _property + "1", //group size 1, contains _property + "25", //group size 2, contains _property + "26", //group size 1, contains _property + "2", //group size 1(count is 4), equals _property //group size 1(count is 9), equals }, chars.toArray()); } /** * The trie is: * / * 0 1 * 3 4 */ public static UrlTrie getUrlTrie1(String property) { return new UrlTrie(property, Arrays.asList(property + "1", property + "0", property + "13", property + "14")); } /** * The trie is: * / * 0 1 2 * 3 4 5 6 * 7 */ public static UrlTrie getUrlTrie2(String property) { return new UrlTrie(property, Arrays.asList(property + "26", property + "257", property + "25", property + "1", property + "0", property + "2", property + "03", property + "04")); } }
3,655
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/source/extractor
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/source/extractor/filebased/GoogleDriveSourceTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.extractor.filebased; import static org.mockito.Mockito.*; import java.io.IOException; import java.util.List; import org.testng.Assert; import org.testng.annotations.Test; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import org.apache.gobblin.configuration.State; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.source.extractor.Extractor; import org.apache.gobblin.source.extractor.extract.google.GoogleDriveExtractor; import org.apache.gobblin.source.extractor.extract.google.GoogleDriveFsHelper; import org.apache.gobblin.source.extractor.extract.google.GoogleDriveSource; @Test(groups = { "gobblin.source.extractor.google" }) public class GoogleDriveSourceTest { @SuppressWarnings("unchecked") public void testGetcurrentFsSnapshot() throws FileBasedHelperException { @SuppressWarnings("rawtypes") GoogleDriveSource source = new GoogleDriveSource<>(); GoogleDriveFsHelper fsHelper = mock(GoogleDriveFsHelper.class); source.fsHelper = fsHelper; List<String> fileIds = ImmutableList.of("test1", "test2", "test3"); when(fsHelper.ls(anyString())).thenReturn(fileIds); long timestamp = System.currentTimeMillis(); when(fsHelper.getFileMTime(anyString())).thenReturn(timestamp); List<String> expected = Lists.newArrayList(); for (String fileId : fileIds) { expected.add(fileId + source.splitPattern + timestamp); } Assert.assertEquals(expected, source.getcurrentFsSnapshot(new State())); } public void testGetExtractor() throws IOException { @SuppressWarnings("rawtypes") GoogleDriveSource source = new GoogleDriveSource<>(); GoogleDriveFsHelper fsHelper = mock(GoogleDriveFsHelper.class); source.fsHelper = fsHelper; Extractor extractor = source.getExtractor(new WorkUnitState()); Assert.assertTrue(extractor instanceof GoogleDriveExtractor); } }
3,656
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/source/extractor/extract
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/source/extractor/extract/google/GoogleDriveFsHelperTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.extractor.extract.google; import static org.mockito.Mockito.*; import java.io.IOException; import java.io.InputStream; import org.apache.commons.lang.mutable.MutableInt; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import org.testng.Assert; import org.testng.annotations.Test; import com.google.api.client.util.DateTime; import com.google.api.services.drive.Drive; import com.google.api.services.drive.Drive.Files; import com.google.api.services.drive.Drive.Files.*; import com.google.api.services.drive.model.File; import com.google.api.services.drive.model.FileList; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.io.Closer; import static org.apache.gobblin.source.extractor.extract.google.GoogleDriveFileSystem.*; import org.apache.gobblin.configuration.State; import org.apache.gobblin.source.extractor.filebased.FileBasedHelperException; @Test(groups = { "gobblin.source.extractor.google" }) public class GoogleDriveFsHelperTest { private Drive client; private Files files; private void setUp() { client = mock(Drive.class); files = mock(Files.class); when(client.files()).thenReturn(files); } public void closeTest() throws IOException, FileBasedHelperException { State state = new State(); setUp(); GoogleDriveFsHelper fsHelper = new GoogleDriveFsHelper(state, client, Closer.create()); Get getResult = mock(Get.class); InputStream is = mock(InputStream.class); when(client.files()).thenReturn(files); when(files.get(anyString())).thenReturn(getResult); when(getResult.executeMediaAsInputStream()).thenReturn(is); fsHelper.getFileStream("test"); fsHelper.close(); verify(is, times(1)).close(); } public void deleteTest() throws IOException { State state = new State(); setUp(); GoogleDriveFsHelper fsHelper = new GoogleDriveFsHelper(state, client, Closer.create()); Delete delete = mock(Delete.class); String fileId = "test_file_id"; when(files.delete(fileId)).thenReturn(delete); fsHelper.deleteFile(fileId); verify(delete, times(1)).execute(); } public void testPagination() throws IOException, FileBasedHelperException { State state = new State(); state.appendToSetProp(GoogleDriveFileSystem.PAGE_SIZE, Integer.toString(1)); GoogleDriveFsHelper fsHelper = new GoogleDriveFsHelper(state, client, Closer.create()); List listRequest = mock(List.class); when(files.list()).thenReturn(listRequest); when(listRequest.setPageSize(anyInt())).thenReturn(listRequest); when(listRequest.setFields(anyString())).thenReturn(listRequest); when(listRequest.setQ(anyString())).thenReturn(listRequest); when(listRequest.setPageToken(anyString())).thenReturn(listRequest); int paginatedCalls = 5; final MutableInt i = new MutableInt(paginatedCalls); final File file = new File(); file.setId("testId"); file.setModifiedTime(new DateTime(System.currentTimeMillis())); when(listRequest.execute()).thenAnswer(new Answer<FileList>() { @Override public FileList answer(InvocationOnMock invocation) throws Throwable { FileList fileList = new FileList(); fileList.setFiles(ImmutableList.of(file)); if (i.intValue() > 0) { fileList.setNextPageToken("token"); i.decrement(); } return fileList; } }); fsHelper.ls("test"); int expectedCalls = 1 + paginatedCalls; verify(listRequest, times(expectedCalls)).execute(); } public void testList() throws IOException, FileBasedHelperException { java.util.List<String> filesRoot = Lists.newArrayList("f0_1", "f0_2", "f0_3", "f0_4","f0_5"); java.util.List<String> filesL1 = Lists.newArrayList("f1_1", "f1_2", "f1_3", "f1_4"); java.util.List<String> filesL2 = Lists.newArrayList("f2_1", "f2_2"); String folderL1 = "folderL1"; String folderL2 = "folderL2"; String fileName = "test"; FileList rootFileList = createFileList(filesRoot, folderL1); FileList FileListL1 = createFileList(filesL1, folderL2); FileList FileListL2 = createFileList(filesL2, null); State state = new State(); state.appendToSetProp(GoogleDriveFileSystem.PAGE_SIZE, Integer.toString(1)); GoogleDriveFsHelper fsHelper = new GoogleDriveFsHelper(state, client, Closer.create()); List listRequest = mock(List.class); when(files.list()).thenReturn(listRequest); when(listRequest.setFields(anyString())).thenReturn(listRequest); when(listRequest.setPageSize(anyInt())).thenReturn(listRequest); GoogleDriveFileSystem fs = new GoogleDriveFileSystem(); when(listRequest.execute()).thenReturn(rootFileList); List ListL1Request = mock(List.class); when(listRequest.setQ(fs.buildQuery(folderL1, null).get())).thenReturn(ListL1Request); when(ListL1Request.execute()).thenReturn(FileListL1); List ListL2Request = mock(List.class); when(listRequest.setQ(fs.buildQuery(folderL2, null).get())).thenReturn(ListL2Request); when(ListL2Request.execute()).thenReturn(FileListL2); java.util.List<String> actual = fsHelper.ls(folderL2); java.util.List<String> expected = Lists.newArrayList(filesL2); Assert.assertTrue(actual.containsAll(expected) && expected.containsAll(actual)); actual = fsHelper.ls(folderL1); expected.addAll(filesL1); Assert.assertTrue(actual.containsAll(expected) && expected.containsAll(actual)); actual = fsHelper.ls(null); expected.addAll(filesRoot); Assert.assertTrue(actual.containsAll(expected) && expected.containsAll(actual)); } private FileList createFileList(java.util.List<String> fileIds, String folderId) { FileList fileList = new FileList(); java.util.List<File> list = Lists.newArrayList(); for (String fileId : fileIds) { File f = new File(); f.setId(fileId); f.setModifiedTime(new DateTime(System.currentTimeMillis())); list.add(f); } if (folderId != null) { File f = new File(); f.setMimeType(FOLDER_MIME_TYPE); f.setId(folderId); f.setModifiedTime(new DateTime(System.currentTimeMillis())); list.add(f); } fileList.setFiles(list); return fileList; } }
3,657
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/source/extractor/extract
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/source/extractor/extract/google/GoogleDriveFileSystemTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.extractor.extract.google; import java.io.IOException; import org.apache.hadoop.fs.Path; import org.testng.Assert; import org.testng.annotations.Test; import com.google.common.base.Optional; /** * Test for GoogleDriveFileSystemTest. Most of tests are done via @see GoogleDriveFsHelperTest * */ @Test(groups = { "gobblin.source.extractor.google" }) public class GoogleDriveFileSystemTest { public void testQuery() throws IOException { GoogleDriveFileSystem fs = new GoogleDriveFileSystem(); String folderId = "test_folder_id"; String fileName = "test_file_name"; Optional<String> query = fs.buildQuery(null, null); Assert.assertEquals(query, Optional.absent()); query = fs.buildQuery(folderId, null); Assert.assertEquals(query, Optional.of("'test_folder_id' in parents")); query = fs.buildQuery(null, fileName); Assert.assertEquals(query, Optional.of("name contains 'test_file_name'")); query = fs.buildQuery(folderId, fileName); Assert.assertEquals(query, Optional.of("'test_folder_id' in parents and name contains 'test_file_name'")); fs.close(); } public void toFileIdTest() { String fileId = "test"; String fileIdWithSlash = "test2/test1"; String fileIdWithTwoSlashes = "test3/test2/test1"; String fileIdWithThreeSlashes = "test4/test3/test2/test1"; Assert.assertEquals(GoogleDriveFileSystem.toFileId(new Path(fileId)), fileId); Assert.assertEquals(GoogleDriveFileSystem.toFileId(new Path(fileIdWithSlash)), fileIdWithSlash); Assert.assertEquals(GoogleDriveFileSystem.toFileId(new Path(fileIdWithTwoSlashes)), fileIdWithTwoSlashes); Assert.assertEquals(GoogleDriveFileSystem.toFileId(new Path(fileIdWithThreeSlashes)), fileIdWithThreeSlashes); } }
3,658
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/source/extractor/extract
Create_ds/gobblin/gobblin-modules/google-ingestion/src/test/java/org/apache/gobblin/source/extractor/extract/google/GoogleAnalyticsUnsampledExtractorTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.extractor.extract.google; import java.io.IOException; import java.util.concurrent.TimeUnit; import org.apache.commons.lang.mutable.MutableInt; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import org.testng.Assert; import org.testng.annotations.Test; import com.google.api.services.analytics.Analytics; import com.google.api.services.analytics.Analytics.Management; import com.google.api.services.analytics.Analytics.Management.UnsampledReports; import com.google.api.services.analytics.Analytics.Management.UnsampledReports.Get; import com.google.api.services.analytics.model.UnsampledReport; import com.google.api.services.analytics.model.UnsampledReport.DriveDownloadDetails; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.exception.NonTransientException; import org.apache.gobblin.source.extractor.Extractor; import static org.apache.gobblin.source.extractor.extract.google.GoogleAnalyticsUnsampledExtractor.DOWNLOAD_TYPE_GOOGLE_DRIVE; import static org.apache.gobblin.source.extractor.extract.google.GoogleAnalyticsUnsampledExtractor.POLL_RETRY_PREFIX; import static org.apache.gobblin.source.extractor.extract.google.GoogleAnalyticsUnsampledExtractor.ReportCreationStatus; import static org.apache.gobblin.util.retry.RetryerFactory.RETRY_INTERVAL_MS; import static org.apache.gobblin.util.retry.RetryerFactory.RETRY_TIME_OUT_MS; import static org.mockito.Mockito.*; @Test(groups = { "gobblin.source.extractor.google" }) public class GoogleAnalyticsUnsampledExtractorTest { private WorkUnitState wuState; private Analytics gaService; private Get getReq; private static final String EXPECTED_FILE_ID = "testFileId"; public void testPollForCompletion() throws IOException { wuState = new WorkUnitState(); wuState.setProp(POLL_RETRY_PREFIX + RETRY_TIME_OUT_MS, TimeUnit.SECONDS.toMillis(30L)); wuState.setProp(POLL_RETRY_PREFIX + RETRY_INTERVAL_MS, 1L); GoogleAnalyticsUnsampledExtractor extractor = setup(ReportCreationStatus.COMPLETED, wuState, false); UnsampledReport requestedReport = new UnsampledReport() .setAccountId("testAccountId") .setWebPropertyId("testWebPropertyId") .setProfileId("testProfileId") .setId("testId"); String actualFileId = extractor.pollForCompletion(wuState, gaService, requestedReport).getDriveDownloadDetails().getDocumentId(); Assert.assertEquals(actualFileId, EXPECTED_FILE_ID); verify(getReq, atLeast(5)).execute(); } public void testPollForCompletionFailure() throws IOException { wuState = new WorkUnitState(); wuState.setProp(POLL_RETRY_PREFIX + RETRY_TIME_OUT_MS, TimeUnit.SECONDS.toMillis(30L)); wuState.setProp(POLL_RETRY_PREFIX + RETRY_INTERVAL_MS, 1L); GoogleAnalyticsUnsampledExtractor extractor = setup(ReportCreationStatus.FAILED, wuState, false); UnsampledReport requestedReport = new UnsampledReport() .setAccountId("testAccountId") .setWebPropertyId("testWebPropertyId") .setProfileId("testProfileId") .setId("testId"); try { extractor.pollForCompletion(wuState, gaService, requestedReport); Assert.fail("Should have failed with failed status"); } catch (Exception e) { Assert.assertTrue(e.getCause().getCause() instanceof NonTransientException); } verify(getReq, atLeast(5)).execute(); } public void testPollForCompletionWithException() throws IOException { wuState = new WorkUnitState(); wuState.setProp(POLL_RETRY_PREFIX + RETRY_TIME_OUT_MS, TimeUnit.SECONDS.toMillis(30L)); wuState.setProp(POLL_RETRY_PREFIX + RETRY_INTERVAL_MS, 1L); GoogleAnalyticsUnsampledExtractor extractor = setup(ReportCreationStatus.COMPLETED, wuState, true); UnsampledReport requestedReport = new UnsampledReport() .setAccountId("testAccountId") .setWebPropertyId("testWebPropertyId") .setProfileId("testProfileId") .setId("testId"); String actualFileId = extractor.pollForCompletion(wuState, gaService, requestedReport).getDriveDownloadDetails().getDocumentId(); Assert.assertEquals(actualFileId, EXPECTED_FILE_ID); verify(getReq, atLeast(5)).execute(); } private GoogleAnalyticsUnsampledExtractor setup(final ReportCreationStatus status, WorkUnitState wuState, final boolean throwException) throws IOException { Extractor actualExtractor = mock(Extractor.class); gaService = mock(Analytics.class); Management mgmt = mock(Management.class); when(gaService.management()).thenReturn(mgmt); UnsampledReports req = mock(UnsampledReports.class); when(mgmt.unsampledReports()).thenReturn(req); getReq = mock(Get.class); when(req.get(anyString(), anyString(), anyString(), anyString())).thenReturn(getReq); int pollCount = 10; final MutableInt countDown = new MutableInt(pollCount); when(getReq.execute()).then(new Answer<UnsampledReport>() { @Override public UnsampledReport answer(InvocationOnMock invocation) throws Throwable { countDown.decrement(); if (countDown.intValue() == 0) { UnsampledReport response = new UnsampledReport(); DriveDownloadDetails details = new DriveDownloadDetails(); details.setDocumentId(EXPECTED_FILE_ID); response.setStatus(status.name()) .setDownloadType(DOWNLOAD_TYPE_GOOGLE_DRIVE) .setDriveDownloadDetails(details); return response; } else if (throwException) { throw new RuntimeException("Dummy exception."); } return new UnsampledReport(); } }); return new GoogleAnalyticsUnsampledExtractor<>(wuState, actualExtractor, gaService); } }
3,659
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google/AsyncIteratorWithDataSink.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google; import java.util.Iterator; import java.util.NoSuchElementException; import java.util.concurrent.LinkedBlockingDeque; import java.util.concurrent.TimeUnit; import lombok.extern.slf4j.Slf4j; @Slf4j public abstract class AsyncIteratorWithDataSink<T> implements Iterator<T> { private final Object lock = new Object(); private volatile Throwable exceptionInProducerThread = null; private Thread _producerThread; protected LinkedBlockingDeque<T> _dataSink; private final int _pollBlockingTime; private T _next = null; protected AsyncIteratorWithDataSink(int queueSize, int pollBlockingTime) { log.info(String.format("Setting queue size: %d, poll blocking second: %d", queueSize, pollBlockingTime)); _dataSink = new LinkedBlockingDeque<>(queueSize); _pollBlockingTime = pollBlockingTime; } @Override public boolean hasNext() { initialize(); if (_next != null) { return true; } //if _next doesn't exist, try polling the next one. try { _next = _dataSink.poll(_pollBlockingTime, TimeUnit.SECONDS); while (_next == null) { if (_producerThread.isAlive()) { log.info(String.format("Producer job not done yet. Will re-poll for %s second(s)...", _pollBlockingTime)); _next = _dataSink.poll(_pollBlockingTime, TimeUnit.SECONDS); continue; } synchronized (lock) { if (exceptionInProducerThread != null) { throw new RuntimeException( String.format("Found exception in producer thread %s", _producerThread.getName()), exceptionInProducerThread); } } log.info("Producer job done. No more data in the queue."); return false; } return true; } catch (InterruptedException e) { throw new RuntimeException(e); } } private void initialize() { if (_producerThread == null) { _producerThread = new Thread(getProducerRunnable()); _producerThread.setUncaughtExceptionHandler(getExceptionHandler()); _producerThread.start(); } } protected abstract Runnable getProducerRunnable(); @Override public T next() { if (hasNext()) { T toReturn = _next; _next = null; return toReturn; } throw new NoSuchElementException(); } @Override public void remove() { throw new UnsupportedOperationException(); } private Thread.UncaughtExceptionHandler getExceptionHandler() { return new Thread.UncaughtExceptionHandler() { @Override public void uncaughtException(Thread t, Throwable e) { synchronized (lock) { exceptionInProducerThread = e; } } }; } }
3,660
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google/GoggleIngestionConfigurationKeys.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google; public class GoggleIngestionConfigurationKeys { public static final String DAY_PARTITIONER_KEY_PREFIX = "writer.partitioner.google_ingestion"; /** * Optional. Default to String.Empty * Prepend a prefix to each partition */ public static final String KEY_PARTITIONER_PREFIX = DAY_PARTITIONER_KEY_PREFIX + "prefix"; /** * Optional. Default to false. * Determine whether to include column names into the partition path. */ public static final String KEY_INCLUDE_COLUMN_NAMES = DAY_PARTITIONER_KEY_PREFIX + "column_names.include"; /** * Optional. Default to "Date". * Configure the column name for "Date" field/column. */ public static final String KEY_DATE_COLUMN_NAME = DAY_PARTITIONER_KEY_PREFIX + "date.column_name"; /** * Optional. Default to "yyyy-MM-dd". * Configure the date string format for date value in records */ public static final String KEY_DATE_FORMAT = DAY_PARTITIONER_KEY_PREFIX + "date.format"; /** * Configure the size of underlying blocking queue of the asynchronized iterator - AsyncIteratorWithDataSink * Default to 2000. */ public static final String SOURCE_ASYNC_ITERATOR_BLOCKING_QUEUE_SIZE = "source.async_iterator.blocking_queue_size"; /** * Configure the poll blocking time of underlying blocking queue of the asynchronized iterator - AsyncIteratorWithDataSink * Default to 1 second. */ public static final String SOURCE_ASYNC_ITERATOR_POLL_BLOCKING_TIME = "source.async_iterator.poll_blocking_time"; }
3,661
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google/DayPartitioner.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google; import org.apache.avro.Schema; import org.apache.avro.SchemaBuilder; import org.apache.avro.generic.GenericData; import org.apache.avro.generic.GenericRecord; import org.apache.commons.lang3.StringUtils; import org.joda.time.DateTime; import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormatter; import org.apache.gobblin.configuration.State; import org.apache.gobblin.writer.partitioner.WriterPartitioner; /** * This day partitioner is responsible to for partition the output into the layout as * - timestamp_append * - yyyy * - MM * - dd * - part.your.data.output.avro * * In order to get the date column for partitioning, you must provide GoggleIngestionConfigurationKeys.KEY_DATE_COLUMN_NAME, * otherwise, the column will be default to "Date". And the date format is default to "yyyy-MM-dd", * you can change it by configuring the key GoggleIngestionConfigurationKeys.KEY_DATE_FORMAT. * * You can futher enable adding column names to the output paths. * If you turn on column names option (configured by GoggleIngestionConfigurationKeys.KEY_INCLUDE_COLUMN_NAMES), the layout would become * - timestamp_append * - year=yyyy * - month=MM * - day=dd * - part.your.data.output.avro */ public class DayPartitioner implements WriterPartitioner<GenericRecord> { private static final String PARTITION_COLUMN_PREFIX = "type"; private static final String PARTITION_COLUMN_YEAR = "year"; private static final String PARTITION_COLUMN_MONTH = "month"; private static final String PARTITION_COLUMN_DAY = "day"; private static final String DEFAULT_DATE_COLUMN = "Date"; private static final String DEFAULT_DATE_FORMAT = "yyyy-MM-dd"; private static final String NAME = "YearMonthDayPartitioner"; private static final String NAME_SPACE = "gobblin.ingestion.google"; private final boolean _withColumnNames; private final String _prefix; private final boolean _withPrefix; private final String _dateColumn; private final DateTimeFormatter _dateFormatter; private final Schema _partitionSchema; public DayPartitioner(State state, int numBranches, int branchId) { _withColumnNames = state.getPropAsBoolean(GoggleIngestionConfigurationKeys.KEY_INCLUDE_COLUMN_NAMES, false); _prefix = state.getProp(GoggleIngestionConfigurationKeys.KEY_PARTITIONER_PREFIX); _withPrefix = StringUtils.isNotBlank(_prefix); _dateColumn = state.getProp(GoggleIngestionConfigurationKeys.KEY_DATE_COLUMN_NAME, DEFAULT_DATE_COLUMN); _dateFormatter = DateTimeFormat.forPattern(state.getProp(GoggleIngestionConfigurationKeys.KEY_DATE_FORMAT, DEFAULT_DATE_FORMAT)); SchemaBuilder.FieldAssembler<Schema> assembler = SchemaBuilder.record(NAME).namespace(NAME_SPACE).fields(); Schema stringType = Schema.create(Schema.Type.STRING); if (_withPrefix) { assembler = assembler.name(PARTITION_COLUMN_PREFIX).type(stringType).noDefault(); } _partitionSchema = assembler.name(PARTITION_COLUMN_YEAR).type(stringType).noDefault().name(PARTITION_COLUMN_MONTH).type(stringType) .noDefault().name(PARTITION_COLUMN_DAY).type(stringType).noDefault().endRecord(); } @Override public Schema partitionSchema() { return _partitionSchema; } @Override public GenericRecord partitionForRecord(GenericRecord record) { GenericRecord partition = new GenericData.Record(_partitionSchema); String dateString = record.get(_dateColumn).toString(); DateTime date = _dateFormatter.parseDateTime(dateString); if (_withPrefix) { if (_withColumnNames) { partition.put(PARTITION_COLUMN_PREFIX, PARTITION_COLUMN_PREFIX + "=" + _prefix); } else { partition.put(PARTITION_COLUMN_PREFIX, _prefix); } } if (_withColumnNames) { partition.put(PARTITION_COLUMN_YEAR, PARTITION_COLUMN_YEAR + "=" + date.getYear()); partition.put(PARTITION_COLUMN_MONTH, PARTITION_COLUMN_MONTH + "=" + date.getMonthOfYear()); partition.put(PARTITION_COLUMN_DAY, PARTITION_COLUMN_DAY + "=" + date.getDayOfMonth()); } else { partition.put(PARTITION_COLUMN_YEAR, date.getYear()); partition.put(PARTITION_COLUMN_MONTH, date.getMonthOfYear()); partition.put(PARTITION_COLUMN_DAY, date.getDayOfMonth()); } return partition; } }
3,662
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google/util/SchemaUtil.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.util; import com.google.gson.JsonObject; import org.apache.gobblin.converter.avro.JsonElementConversionFactory; public class SchemaUtil { public static JsonObject createColumnJson(String columnName, boolean isNullable, JsonElementConversionFactory.Type columnType) { JsonObject columnJson = new JsonObject(); columnJson.addProperty("columnName", columnName); columnJson.addProperty("isNullable", isNullable); JsonObject typeJson = new JsonObject(); typeJson.addProperty("type", columnType.toString()); columnJson.add("dataType", typeJson); return columnJson; } }
3,663
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google/webmaster/GoogleWebMasterSourceDaily.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.io.IOException; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; import com.google.api.client.auth.oauth2.Credential; import com.google.api.services.webmasters.WebmastersScopes; import com.google.common.base.Preconditions; import com.google.gson.JsonArray; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.State; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.source.extractor.extract.google.GoogleCommon; import org.apache.gobblin.source.extractor.extract.google.GoogleCommonKeys; import org.apache.gobblin.source.extractor.partition.Partition; import org.apache.gobblin.source.extractor.watermark.DateWatermark; import org.apache.gobblin.source.extractor.watermark.TimestampWatermark; import static org.apache.gobblin.configuration.ConfigurationKeys.SOURCE_CONN_PRIVATE_KEY; import static org.apache.gobblin.configuration.ConfigurationKeys.SOURCE_CONN_USERNAME; import static org.apache.gobblin.configuration.ConfigurationKeys.SOURCE_CONN_USE_PROXY_PORT; import static org.apache.gobblin.configuration.ConfigurationKeys.SOURCE_CONN_USE_PROXY_URL; /** * The logic of calculating the watermarks in this GoogleWebMasterSourceDaily only works with the configuration below: * * source.querybased.watermark.type=hour * source.querybased.partition.interval=24 */ @Slf4j public class GoogleWebMasterSourceDaily extends GoogleWebMasterSource { @Override GoogleWebmasterExtractor createExtractor(WorkUnitState state, Map<String, Integer> columnPositionMap, List<GoogleWebmasterFilter.Dimension> requestedDimensions, List<GoogleWebmasterDataFetcher.Metric> requestedMetrics, JsonArray schemaJson) throws IOException { Preconditions.checkArgument( state.getProp(ConfigurationKeys.SOURCE_QUERYBASED_WATERMARK_TYPE).compareToIgnoreCase("Hour") == 0); Preconditions.checkArgument(state.getPropAsInt(ConfigurationKeys.SOURCE_QUERYBASED_PARTITION_INTERVAL) == 24); Partition partition = Partition.deserialize(state.getWorkunit()); long lowWatermark = partition.getLowWatermark(); long expectedHighWatermark = partition.getHighWatermark(); /* This change is needed because 1. The partition behavior changed due to commit 7d730fcb0263b8ca820af0366818160d638d1336 [7d730fc] by zxcware <zxcware@gmail.com> on April 3, 2017 at 11:47:41 AM PDT 2. Google Search Console API only cares about Dates, and are both side inclusive. Therefore, do the following processing. */ int dateDiff = partition.isHighWatermarkInclusive() ? 1 : 0; long highWatermarkDate = DateWatermark.adjustWatermark(Long.toString(expectedHighWatermark), dateDiff); long updatedExpectedHighWatermark = TimestampWatermark.adjustWatermark(Long.toString(highWatermarkDate), -1); updatedExpectedHighWatermark = Math.max(lowWatermark, updatedExpectedHighWatermark); GoogleWebmasterClientImpl gscClient = new GoogleWebmasterClientImpl(getCredential(state), state.getProp(ConfigurationKeys.SOURCE_ENTITY)); return new GoogleWebmasterExtractor(gscClient, state, lowWatermark, updatedExpectedHighWatermark, columnPositionMap, requestedDimensions, requestedMetrics, schemaJson); } private static Credential getCredential(State wuState) { String scope = wuState.getProp(GoogleCommonKeys.API_SCOPES, WebmastersScopes.WEBMASTERS_READONLY); Preconditions.checkArgument(Objects.equals(WebmastersScopes.WEBMASTERS_READONLY, scope) || Objects .equals(WebmastersScopes.WEBMASTERS, scope), "The scope for WebMaster must either be WEBMASTERS_READONLY or WEBMASTERS"); String credentialFile = wuState.getProp(SOURCE_CONN_PRIVATE_KEY); List<String> scopes = Collections.singletonList(scope); // return GoogleCredential.fromStream(new FileInputStream(credentialFile)) // .createScoped(Collections.singletonList(scope)); return new GoogleCommon.CredentialBuilder(credentialFile, scopes) .fileSystemUri(wuState.getProp(GoogleCommonKeys.PRIVATE_KEY_FILESYSTEM_URI)) .proxyUrl(wuState.getProp(SOURCE_CONN_USE_PROXY_URL)).port(wuState.getProp(SOURCE_CONN_USE_PROXY_PORT)) .serviceAccountId(wuState.getProp(SOURCE_CONN_USERNAME)).build(); } }
3,664
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google/webmaster/GoogleWebmasterDataFetcher.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.List; import com.google.api.client.googleapis.batch.json.JsonBatchCallback; import com.google.api.services.webmasters.model.ApiDataRow; import com.google.api.services.webmasters.model.ApiDimensionFilter; import com.google.api.services.webmasters.model.SearchAnalyticsQueryResponse; /** * GoogleWebmasterDataFetcher implements the logic to download all query data(e.g. page, query, clicks, impressions, CTR, position) for a given date and country. * * The user of this GoogleWebmasterDataFetcher should follow these steps: * 1. Call getAllPages to get all pages based on your filters. * 2. For each page we get at last step, call performSearchAnalyticsQuery to get query data. If query data is not available, then this page won't be included in the return value. * * Note: * Due to the limitation of the Google API -- each API request can return at most 5000 rows of records, you need to take additional steps to fetch all data that the Google Search Console keeps. * * There is a rule to check whether Google Search Console keeps more than 5000 rows of data based on your request. The rule is that if you request for 5000 rows, but the response has less than 5000 rows; then in this case you've got all data that the Google service provides. If you request for 5000 rows, and the API returns you 5000 rows, there is a high chance that the Google service has more data matching your query, but due to the limitation of the API, only first 5000 rows returned. * */ public abstract class GoogleWebmasterDataFetcher { public abstract String getSiteProperty(); enum Metric { CLICKS, IMPRESSIONS, CTR, POSITION } /** * Results are composed of [[requestedDimension list], clicks, impressions, ctr, position] * @param rowLimit row limit for this API call * @param requestedDimensions a list of dimension requests. The dimension values can be found at the first part of the return value * @param filters filters of your request */ public abstract List<String[]> performSearchAnalyticsQuery(String startDate, String endDate, int rowLimit, List<GoogleWebmasterFilter.Dimension> requestedDimensions, List<Metric> requestedMetrics, Collection<ApiDimensionFilter> filters) throws IOException; /** * Call API in batches */ public abstract void performSearchAnalyticsQueryInBatch(List<ProducerJob> jobs, List<ArrayList<ApiDimensionFilter>> filterList, List<JsonBatchCallback<SearchAnalyticsQueryResponse>> callbackList, List<GoogleWebmasterFilter.Dimension> requestedDimensions, int rowLimit) throws IOException; /** * Return all pages given (date, country) filter * @param country country code string * @param rowLimit this is mostly for testing purpose. In order to get all pages, set this to the API row limit, which is 5000 */ public abstract Collection<ProducerJob> getAllPages(String startDate, String endDate, String country, int rowLimit) throws IOException; public static List<String[]> convertResponse(List<Metric> requestedMetrics, SearchAnalyticsQueryResponse response) { List<ApiDataRow> rows = response.getRows(); if (rows == null || rows.isEmpty()) { return new ArrayList<>(); } int arraySize = rows.get(0).getKeys().size() + requestedMetrics.size(); List<String[]> ret = new ArrayList<>(rows.size()); for (ApiDataRow row : rows) { List<String> keys = row.getKeys(); String[] data = new String[arraySize]; int i = 0; for (; i < keys.size(); ++i) { data[i] = keys.get(i); } for (Metric requestedMetric : requestedMetrics) { if (requestedMetric == Metric.CLICKS) { data[i] = row.getClicks().toString(); } else if (requestedMetric == Metric.IMPRESSIONS) { data[i] = row.getImpressions().toString(); } else if (requestedMetric == Metric.CTR) { data[i] = String.format("%.5f", row.getCtr()); } else if (requestedMetric == Metric.POSITION) { data[i] = String.format("%.2f", row.getPosition()); } else { throw new RuntimeException("Unknown Google Webmaster Metric Type"); } ++i; } ret.add(data); } return ret; } }
3,665
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google/webmaster/TrieBasedProducerJob.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.util.ArrayList; import java.util.List; import org.apache.commons.lang3.tuple.Triple; public class TrieBasedProducerJob extends ProducerJob { private final String _startDate; private final String _endDate; private final int _groupSize; private final Triple<String, GoogleWebmasterFilter.FilterOperator, UrlTrieNode> _jobNode; TrieBasedProducerJob(String startDate, String endDate, Triple<String, GoogleWebmasterFilter.FilterOperator, UrlTrieNode> jobNode, int groupSize) { _startDate = startDate; _endDate = endDate; _jobNode = jobNode; _groupSize = groupSize; } @Override public String getPage() { return _jobNode.getLeft(); } @Override public String getStartDate() { return _startDate; } @Override public String getEndDate() { return _endDate; } @Override public GoogleWebmasterFilter.FilterOperator getOperator() { return _jobNode.getMiddle(); } @Override public int getPagesSize() { if (isOperatorEquals()) { return 1; } else { return _jobNode.getRight().getSize(); } } /** * The implementation here will first partition the job by pages, and then by dates. * @return */ @Override public List<? extends ProducerJob> partitionJobs() { UrlTrieNode root = _jobNode.getRight(); if (isOperatorEquals() || root.getSize() == 1) { //Either at an Equals-Node or a Leaf-Node, both of which actually has actual size 1. return super.partitionJobs(); } else { if (_groupSize <= 1) { throw new RuntimeException("This is impossible. When group size is 1, the operator must be equals"); } UrlTrie trie = new UrlTrie(getPage(), root); int gs = Math.min(root.getSize(), _groupSize); UrlTriePrefixGrouper grouper = new UrlTriePrefixGrouper(trie, (int) Math.ceil(gs / 2.0)); List<TrieBasedProducerJob> jobs = new ArrayList<>(); while (grouper.hasNext()) { jobs.add(new TrieBasedProducerJob(_startDate, _endDate, grouper.next(), grouper.getGroupSize())); } return jobs; } } private boolean isOperatorEquals() { return getOperator().equals(GoogleWebmasterFilter.FilterOperator.EQUALS); } @Override public String toString() { return String.format( "TrieBasedProducerJob{_page='%s', _startDate='%s', _endDate='%s', _operator='%s', _groupSize='%s', _nodeSize='%s'}", getPage(), _startDate, _endDate, getOperator(), _groupSize, _jobNode.getRight().getSize()); } public int getGroupSize() { return _groupSize; } }
3,666
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google/webmaster/UrlGrouper.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.util.Iterator; import org.apache.commons.lang3.tuple.Triple; /** * Package the URL pages/nodes into groups given the group size while traversing the UrlTrie by utilizing a TrieIterator. If the current node is not a "leaf" node defined by the TrieIterator, then a "fake" group of size 1 will be created by only including this node. * * Iterating the groups with a Triple type return value: * * Triple.1 is this group's root URL. The full URL to the root node of the group. * Triple.2 is the FilterOperator type for this group. * case 1. If the descendants of this group is <= groupSize, the operator is FilterOperator.CONTAINS. This is a real group. * case 2. Otherwise, the node will only be returned if it exists, with the operator FilterOperator.EQUALS. This group is actually a single value. * Triple.3 is the root node of this group. */ public interface UrlGrouper extends Iterator<Triple<String, GoogleWebmasterFilter.FilterOperator, UrlTrieNode>> { int getGroupSize(); }
3,667
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google/webmaster/GoogleWebmasterExtractor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import org.joda.time.DateTime; import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormatter; import com.google.api.services.webmasters.model.ApiDimensionFilter; import com.google.common.base.Splitter; import com.google.gson.JsonArray; import com.google.common.collect.Iterables; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.source.extractor.DataRecordException; import org.apache.gobblin.source.extractor.Extractor; import org.apache.gobblin.source.extractor.extract.LongWatermark; @Slf4j @Alpha public class GoogleWebmasterExtractor implements Extractor<String, String[]> { private final static Splitter splitter = Splitter.on(",").omitEmptyStrings().trimResults(); private final JsonArray _schema; private final WorkUnitState _wuState; private final DateTimeFormatter dateFormatter = DateTimeFormat.forPattern("yyyy-MM-dd"); private final boolean _includeSource; /** * _current is an index that indicates which iterator is under processing. */ private int _current = 0; private List<GoogleWebmasterExtractorIterator> _iterators = new ArrayList<>(); /** * Each element keeps a mapping from API response order to output schema order. * The array index matches the order of API response. * The array values matches the order of output schema. */ private List<int[]> _positionMaps = new ArrayList<>(); private final DateTime _startDate; private final long _expectedHighWaterMark; private final DateTime _expectedHighWaterMarkDate; public GoogleWebmasterExtractor(GoogleWebmasterClient gscClient, WorkUnitState wuState, long lowWatermark, long expectedHighWaterMark, Map<String, Integer> columnPositionMap, List<GoogleWebmasterFilter.Dimension> requestedDimensions, List<GoogleWebmasterDataFetcher.Metric> requestedMetrics, JsonArray schemaJson) throws IOException { this(wuState, lowWatermark, expectedHighWaterMark, columnPositionMap, requestedDimensions, requestedMetrics, schemaJson, createGoogleWebmasterDataFetchers(wuState.getProp(GoogleWebMasterSource.KEY_PROPERTY), gscClient, wuState)); } private static List<GoogleWebmasterDataFetcher> createGoogleWebmasterDataFetchers(String properties, GoogleWebmasterClient gscClient, WorkUnitState wuState) throws IOException { List<GoogleWebmasterDataFetcher> fetchers = new ArrayList<>(); Iterable<String> props = splitter.split(properties); for (String prop : props) { fetchers.add(new GoogleWebmasterDataFetcherImpl(prop, gscClient, wuState)); } return fetchers; } /** * For test only */ GoogleWebmasterExtractor(WorkUnitState wuState, long lowWatermark, long expectedHighWaterMark, Map<String, Integer> columnPositionMap, List<GoogleWebmasterFilter.Dimension> requestedDimensions, List<GoogleWebmasterDataFetcher.Metric> requestedMetrics, JsonArray schemaJson, List<GoogleWebmasterDataFetcher> dataFetchers) { DateTimeFormatter watermarkFormatter = DateTimeFormat.forPattern("yyyyMMddHHmmss"); _startDate = watermarkFormatter.parseDateTime(Long.toString(lowWatermark)); _expectedHighWaterMark = expectedHighWaterMark; _expectedHighWaterMarkDate = watermarkFormatter.parseDateTime(Long.toString(expectedHighWaterMark)); log.info(String.format("Creating GoogleWebmasterExtractor for [%s, %s] for job %s.", _startDate.toString(), _expectedHighWaterMarkDate.toString(), wuState.getProp(ConfigurationKeys.SOURCE_ENTITY))); _wuState = wuState; _schema = schemaJson; _includeSource = wuState.getWorkunit().getPropAsBoolean(GoogleWebMasterSource.KEY_INCLUDE_SOURCE_PROPERTY, GoogleWebMasterSource.DEFAULT_INCLUDE_SOURCE_PROPERTY); Iterable<Map<GoogleWebmasterFilter.Dimension, ApiDimensionFilter>> filterGroups = getFilterGroups(wuState); for (GoogleWebmasterDataFetcher dataFetcher : dataFetchers) { for (Map<GoogleWebmasterFilter.Dimension, ApiDimensionFilter> filters : filterGroups) { List<GoogleWebmasterFilter.Dimension> actualDimensionRequests = new ArrayList<>(requestedDimensions); //Need to remove the dimension from actualDimensionRequests if the filter for that dimension is ALL/Aggregated for (Map.Entry<GoogleWebmasterFilter.Dimension, ApiDimensionFilter> filter : filters.entrySet()) { if (filter.getValue() == null) { actualDimensionRequests.remove(filter.getKey()); } } String startDate = dateFormatter.print(_startDate); String endDate = dateFormatter.print(_expectedHighWaterMarkDate); GoogleWebmasterExtractorIterator iterator = new GoogleWebmasterExtractorIterator(dataFetcher, startDate, endDate, actualDimensionRequests, requestedMetrics, filters, wuState); log.info("Created " + iterator.toString()); // positionMapping is to address the cases when requested dimensions/metrics order // is different from the column order in source.schema int[] positionMapping = new int[actualDimensionRequests.size() + requestedMetrics.size()]; int i = 0; for (; i < actualDimensionRequests.size(); ++i) { positionMapping[i] = columnPositionMap.get(actualDimensionRequests.get(i).toString()); } for (GoogleWebmasterDataFetcher.Metric requestedMetric : requestedMetrics) { positionMapping[i++] = columnPositionMap.get(requestedMetric.toString()); } //One positionMapping is corresponding to one iterator. _iterators.add(iterator); _positionMaps.add(positionMapping); } } } /** * Currently, the filter group is just one filter at a time, there is no cross-dimension filters combination. * TODO: May need to implement this feature in the future based on use cases. */ private Iterable<Map<GoogleWebmasterFilter.Dimension, ApiDimensionFilter>> getFilterGroups(WorkUnitState wuState) { List<Map<GoogleWebmasterFilter.Dimension, ApiDimensionFilter>> filters = new ArrayList<>(); for (String filter : splitter.split(wuState.getProp(GoogleWebMasterSource.KEY_REQUEST_FILTERS))) { String[] parts = Iterables.toArray(Splitter.on(".").split(filter), String.class); String dimString = parts[0].toUpperCase(); String valueString = parts[1].toUpperCase(); GoogleWebmasterFilter.Dimension dimension = GoogleWebmasterFilter.Dimension.valueOf(dimString); Map<GoogleWebmasterFilter.Dimension, ApiDimensionFilter> map = new HashMap<>(); if (dimension == GoogleWebmasterFilter.Dimension.COUNTRY) { map.put(GoogleWebmasterFilter.Dimension.COUNTRY, GoogleWebmasterFilter.countryEqFilter(valueString)); } else { throw new UnsupportedOperationException("Only country filter is supported for now"); } filters.add(map); } return filters; } @Override public String getSchema() throws IOException { return _schema.toString(); } @Override public String[] readRecord(@Deprecated String[] reuse) throws DataRecordException, IOException { while (_current < _iterators.size()) { GoogleWebmasterExtractorIterator iterator = _iterators.get(_current); if (iterator.isFailed()) { log.info(String.format("Extractor failed at iterator %d: %s", _current, iterator.toString())); // Task retry reuses the same extractor instead of creating a new one. // Reinitialize processed iterators and set extractor to restart from the very beginning for (int i = 0; i <= _current; ++i) { _iterators.set(i, new GoogleWebmasterExtractorIterator(_iterators.get(i))); } log.info(String.format("Resetting _current index from %d to 0 to restart from the beginning", _current)); _current = 0; iterator = _iterators.get(_current); } int[] positionMap = _positionMaps.get(_current); if (iterator.hasNext()) { String[] apiResponse = iterator.next(); int size = _schema.size(); String[] record = new String[size]; for (int i = 0; i < positionMap.length; ++i) { record[positionMap[i]] = apiResponse[i]; } //unfilled elements should be nullable. if (_includeSource) { record[size - 1] = iterator.getProperty(); } return record; } log.info(iterator.toString() + " finished successfully. ^_^"); ++_current; } return null; } @Override public long getExpectedRecordCount() { if (_current == _iterators.size()) { //Any positive number will be okay. //Need to add this because of this commit: //76ae45a by ibuenros on 12/20/16 at 11:34AM Query based source will reset to low watermark if previous run did not process any data for that table. return 1; } return 0; } @Override public long getHighWatermark() { throw new UnsupportedOperationException("This method has been deprecated!"); } @Override public void close() throws IOException { if (_current == _iterators.size()) { log.info(String.format("Successfully finished fetching data from Google Search Console from %s to %s.", dateFormatter.print(_startDate), dateFormatter.print(_expectedHighWaterMarkDate))); _wuState.setActualHighWatermark(new LongWatermark(_expectedHighWaterMark)); } else { log.error(String.format("Had problems fetching data from Google Search Console from %s to %s.", dateFormatter.print(_startDate), dateFormatter.print(_expectedHighWaterMarkDate))); } } /** * For test only */ List<GoogleWebmasterExtractorIterator> getIterators() { return _iterators; } /** * For test only */ List<int[]> getPositionMaps() { return _positionMaps; } }
3,668
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google/webmaster/GoogleWebmasterExtractorIterator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.io.IOException; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Deque; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentLinkedDeque; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingDeque; import java.util.concurrent.TimeUnit; import org.slf4j.Logger; import com.google.api.client.googleapis.batch.json.JsonBatchCallback; import com.google.api.client.googleapis.json.GoogleJsonError; import com.google.api.client.http.HttpHeaders; import com.google.api.client.repackaged.com.google.common.base.Preconditions; import com.google.api.services.webmasters.model.ApiDimensionFilter; import com.google.api.services.webmasters.model.SearchAnalyticsQueryResponse; import com.google.common.base.Optional; import com.google.common.base.Joiner; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.ingestion.google.AsyncIteratorWithDataSink; import org.apache.gobblin.ingestion.google.GoggleIngestionConfigurationKeys; import org.apache.gobblin.util.ExecutorsUtils; import org.apache.gobblin.util.limiter.RateBasedLimiter; /** * This iterator holds a GoogleWebmasterDataFetcher, through which it get all pages. And then for each page, it will get all query data(Clicks, Impressions, CTR, Position). Basically, it will cache all pages got, and for each page, cache the detailed query data, and then iterate through them one by one. */ @Slf4j class GoogleWebmasterExtractorIterator extends AsyncIteratorWithDataSink<String[]> { private final RateBasedLimiter LIMITER; private final int ROUND_TIME_OUT; private final int BATCH_SIZE; private final int TRIE_GROUP_SIZE; private final boolean APPLY_TRIE_ALGO; private final int MAX_RETRY_ROUNDS; private final int ROUND_COOL_DOWN; private final int PAGE_LIMIT; private final int QUERY_LIMIT; private final GoogleWebmasterDataFetcher _webmaster; private final String _startDate; private final String _endDate; private final String _country; private final Map<GoogleWebmasterFilter.Dimension, ApiDimensionFilter> _filterMap; //This is the requested dimensions sent to Google API private final List<GoogleWebmasterFilter.Dimension> _requestedDimensions; private final List<GoogleWebmasterDataFetcher.Metric> _requestedMetrics; private final WorkUnitState _wuState; private boolean _failed = false; public GoogleWebmasterExtractorIterator(GoogleWebmasterExtractorIterator iterator) { this(iterator._webmaster, iterator._startDate, iterator._endDate, iterator._requestedDimensions, iterator._requestedMetrics, iterator._filterMap, iterator._wuState); } public GoogleWebmasterExtractorIterator(GoogleWebmasterDataFetcher webmaster, String startDate, String endDate, List<GoogleWebmasterFilter.Dimension> requestedDimensions, List<GoogleWebmasterDataFetcher.Metric> requestedMetrics, Map<GoogleWebmasterFilter.Dimension, ApiDimensionFilter> filterMap, WorkUnitState wuState) { super(wuState.getPropAsInt(GoggleIngestionConfigurationKeys.SOURCE_ASYNC_ITERATOR_BLOCKING_QUEUE_SIZE, 2000), wuState.getPropAsInt(GoggleIngestionConfigurationKeys.SOURCE_ASYNC_ITERATOR_POLL_BLOCKING_TIME, 1)); _wuState = wuState; Preconditions.checkArgument(!filterMap.containsKey(GoogleWebmasterFilter.Dimension.PAGE), "Doesn't support filters for page for the time being. Will implement support later. If page filter is provided, the code won't take the responsibility of get all pages, so it will just return all queries for that page."); _webmaster = webmaster; _startDate = startDate; _endDate = endDate; _requestedDimensions = requestedDimensions; _requestedMetrics = requestedMetrics; _filterMap = filterMap; _country = GoogleWebmasterFilter.countryFilterToString(filterMap.get(GoogleWebmasterFilter.Dimension.COUNTRY)); PAGE_LIMIT = wuState.getPropAsInt(GoogleWebMasterSource.KEY_REQUEST_PAGE_LIMIT, GoogleWebmasterClient.API_ROW_LIMIT); Preconditions.checkArgument(PAGE_LIMIT >= 1, "Page limit must be at least 1."); QUERY_LIMIT = wuState.getPropAsInt(GoogleWebMasterSource.KEY_REQUEST_QUERY_LIMIT, GoogleWebmasterClient.API_ROW_LIMIT); Preconditions.checkArgument(QUERY_LIMIT >= 1, "Query limit must be at least 1."); ROUND_TIME_OUT = wuState.getPropAsInt(GoogleWebMasterSource.KEY_QUERIES_TUNING_TIME_OUT, 120); Preconditions.checkArgument(ROUND_TIME_OUT > 0, "Time out must be positive."); MAX_RETRY_ROUNDS = wuState.getPropAsInt(GoogleWebMasterSource.KEY_QUERIES_TUNING_RETRIES, 40); Preconditions.checkArgument(MAX_RETRY_ROUNDS >= 0, "Retry rounds cannot be negative."); ROUND_COOL_DOWN = wuState.getPropAsInt(GoogleWebMasterSource.KEY_QUERIES_TUNING_COOL_DOWN, 250); Preconditions.checkArgument(ROUND_COOL_DOWN >= 0, "Initial cool down time cannot be negative."); // QPS limit can be found at // https://developers.google.com/webmaster-tools/search-console-api-original/v3/limits // Setting the default QPS to be 2 batches per second with a batch of size 2. // So the default QPS is set at 2*2=4. double batchesPerSecond = wuState.getPropAsDouble(GoogleWebMasterSource.KEY_QUERIES_TUNING_BATCHES_PER_SECOND, 2); Preconditions.checkArgument(batchesPerSecond > 0, "Requests per second must be positive."); BATCH_SIZE = wuState.getPropAsInt(GoogleWebMasterSource.KEY_QUERIES_TUNING_BATCH_SIZE, 2); Preconditions.checkArgument(BATCH_SIZE >= 1, "Batch size must be at least 1."); LIMITER = new RateBasedLimiter(batchesPerSecond, TimeUnit.SECONDS); TRIE_GROUP_SIZE = wuState.getPropAsInt(GoogleWebMasterSource.KEY_QUERIES_TUNING_GROUP_SIZE, 500); Preconditions.checkArgument(TRIE_GROUP_SIZE >= 1, "Group size must be at least 1."); APPLY_TRIE_ALGO = wuState.getPropAsBoolean(GoogleWebMasterSource.KEY_REQUEST_TUNING_ALGORITHM, false); if (APPLY_TRIE_ALGO) { Preconditions.checkArgument(PAGE_LIMIT == GoogleWebmasterClient.API_ROW_LIMIT, "Page limit must be set at 5000 if you want to use the advanced algorithm. This indicates that you understand what you are doing."); } } @Override protected Runnable getProducerRunnable() { try { log.info("Start getting all pages for " + this.toString()); Collection<ProducerJob> allJobs = _webmaster.getAllPages(_startDate, _endDate, _country, PAGE_LIMIT); return new ResponseProducer(allJobs); } catch (Exception e) { log.info(this.toString() + " failed while creating a ResponseProducer", e); _failed = true; sleepBeforeRetry(); throw new RuntimeException(e); } } public boolean isFailed() { return _failed; } public String getCountry() { return _country; } public String getProperty() { return _webmaster.getSiteProperty(); } @Override public String toString() { return String .format("GoogleWebmasterExtractorIterator{property=%s, startDate=%s, endDate=%s, country=%s}", getProperty(), _startDate, _endDate, _country); } private static void sleepBeforeRetry() { try { log.info("Sleep 20 seconds before task level retry"); //Sleep 30 seconds before restarting, we need to set this because: // 1. Gobblin sleeps for 0 seconds at the first retry. // 2. Gobblin doesn't sleep between subsequent tasks. //See the quote limit at https://developers.google.com/webmaster-tools/search-console-api-original/v3/limits Thread.sleep(20000); } catch (InterruptedException e) { throw new RuntimeException(e); } } /** * ResponseProducer gets the query data for allPages in an async way. * It utilize Executors.newCachedThreadPool to submit API request in a configurable speed. * API request speed can be tuned by BATCHES_PER_SECOND, ROUND_COOL_DOWN, COOL_DOWN_STEP and MAX_RETRY_ROUNDS. * The speed must be controlled because it cannot succeed the Google API quota, which can be found in your Google API Manager. * If you send the request too fast, you will get "403 Forbidden - Quota Exceeded" exception. Those pages will be handled by next round of retries. */ private class ResponseProducer implements Runnable { private Deque<ProducerJob> _jobsToProcess; ResponseProducer(Collection<ProducerJob> jobs) { int size = jobs.size(); if (size == 0) { _jobsToProcess = new ArrayDeque<>(); return; } if (APPLY_TRIE_ALGO) { List<String> pages = new ArrayList<>(size); for (ProducerJob job : jobs) { pages.add(job.getPage()); } UrlTrie trie = new UrlTrie(_webmaster.getSiteProperty(), pages); UrlTriePrefixGrouper grouper = new UrlTriePrefixGrouper(trie, TRIE_GROUP_SIZE); //Doesn't need to be a ConcurrentLinkedDeque, because it will only be read by one thread. _jobsToProcess = new ArrayDeque<>(size); while (grouper.hasNext()) { _jobsToProcess.add(new TrieBasedProducerJob(_startDate, _endDate, grouper.next(), grouper.getGroupSize())); } } else { if (jobs.getClass().equals(ArrayDeque.class)) { _jobsToProcess = (ArrayDeque<ProducerJob>) jobs; } else { //Doesn't need to be a ConcurrentLinkedDeque, because it will only be read by one thread. _jobsToProcess = new ArrayDeque<>(jobs); } } } @Override public void run() { int r = 0; //indicates current round. try { //check if any seed got adding back. while (r <= MAX_RETRY_ROUNDS) { int totalPages = 0; for (ProducerJob job : _jobsToProcess) { totalPages += job.getPagesSize(); } if (r > 0) { log.info(String.format("Starting #%d round retries of size %d for %s", r, totalPages, _country)); } ProgressReporter reporter = new ProgressReporter(log, totalPages); //retries needs to be concurrent because multiple threads will write to it. ConcurrentLinkedDeque<ProducerJob> retries = new ConcurrentLinkedDeque<>(); ExecutorService es = Executors.newFixedThreadPool(10, ExecutorsUtils.newDaemonThreadFactory(Optional.of(log), Optional.of(this.getClass().getSimpleName()))); List<ProducerJob> batch = new ArrayList<>(BATCH_SIZE); while (!_jobsToProcess.isEmpty()) { //This is the only place to poll job from queue. Writing to a new queue is async. ProducerJob job = _jobsToProcess.poll(); if (batch.size() < BATCH_SIZE) { batch.add(job); } if (batch.size() == BATCH_SIZE) { es.submit(getResponses(batch, retries, _dataSink, reporter)); batch = new ArrayList<>(BATCH_SIZE); } } //Send the last batch if (!batch.isEmpty()) { es.submit(getResponses(batch, retries, _dataSink, reporter)); } log.info(String.format("Submitted all jobs at round %d.", r)); es.shutdown(); //stop accepting new requests boolean terminated = es.awaitTermination(ROUND_TIME_OUT, TimeUnit.MINUTES); if (!terminated) { es.shutdownNow(); throw new RuntimeException(String.format( "Timed out while downloading query data for country-%s at round %d. Next round now has size %d.", _country, r, retries.size())); } if (retries.isEmpty()) { break; //game over } ++r; _jobsToProcess = retries; //Cool down before starting the next round of retry Thread.sleep(ROUND_COOL_DOWN); } if (r == MAX_RETRY_ROUNDS + 1) { log.error(String.format("Exceeded maximum retries. There are %d unprocessed jobs.", _jobsToProcess.size())); StringBuilder sb = new StringBuilder(); sb.append("You can add as hot start jobs to continue: ").append(System.lineSeparator()) .append(System.lineSeparator()); sb.append(ProducerJob.serialize(_jobsToProcess)); sb.append(System.lineSeparator()); log.error(sb.toString()); } log.info(String .format("ResponseProducer finishes for %s from %s to %s at retry round %d", _country, _startDate, _endDate, r)); } catch (InterruptedException e) { log.info(GoogleWebmasterExtractorIterator.this.toString() + " failed while executing the ResponseProducer"); _failed = true; sleepBeforeRetry(); throw new RuntimeException(e); } } /** * Call the API, then * OnSuccess: put each record into the responseQueue * OnFailure: add current job back to retries */ private Runnable getResponse(final ProducerJob job, final ConcurrentLinkedDeque<ProducerJob> retries, final LinkedBlockingDeque<String[]> responseQueue, final ProgressReporter reporter) { return new Runnable() { @Override public void run() { try { final ArrayList<ApiDimensionFilter> filters = new ArrayList<>(); filters.addAll(_filterMap.values()); filters.add(GoogleWebmasterFilter.pageFilter(job.getOperator(), job.getPage())); LIMITER.acquirePermits(1); List<String[]> results = _webmaster .performSearchAnalyticsQuery(job.getStartDate(), job.getEndDate(), QUERY_LIMIT, _requestedDimensions, _requestedMetrics, filters); onSuccess(job, results, responseQueue, retries); reporter.report(job.getPagesSize(), _country); } catch (IOException e) { onFailure(e.getMessage(), job, retries); } catch (InterruptedException e) { log.error(String .format("Interrupted while trying to get queries for job %s. Current retry size is %d.", job, retries.size())); } } }; } /** * Call the APIs with a batch request * OnSuccess: put each record into the responseQueue * OnFailure: add current job to retries */ private Runnable getResponses(final List<ProducerJob> jobs, final ConcurrentLinkedDeque<ProducerJob> retries, final LinkedBlockingDeque<String[]> responseQueue, final ProgressReporter reporter) { final int size = jobs.size(); if (size == 1) { return getResponse(jobs.get(0), retries, responseQueue, reporter); } final ResponseProducer producer = this; return new Runnable() { @Override public void run() { try { List<ArrayList<ApiDimensionFilter>> filterList = new ArrayList<>(size); List<JsonBatchCallback<SearchAnalyticsQueryResponse>> callbackList = new ArrayList<>(size); List<String> jobPages = new ArrayList<>(); for (ProducerJob j : jobs) { jobPages.add(j.getPage()); final ProducerJob job = j; //to capture this variable final String page = job.getPage(); final ArrayList<ApiDimensionFilter> filters = new ArrayList<>(); filters.addAll(_filterMap.values()); filters.add(GoogleWebmasterFilter.pageFilter(job.getOperator(), page)); filterList.add(filters); callbackList.add(new JsonBatchCallback<SearchAnalyticsQueryResponse>() { @Override public void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { producer.onFailure(e.getMessage(), job, retries); log.info(job.getPage() + " failed"); } @Override public void onSuccess(SearchAnalyticsQueryResponse searchAnalyticsQueryResponse, HttpHeaders responseHeaders) throws IOException { List<String[]> results = GoogleWebmasterDataFetcher.convertResponse(_requestedMetrics, searchAnalyticsQueryResponse); producer.onSuccess(job, results, responseQueue, retries); log.debug(job.getPage() + " succeeded"); } }); } log.debug("Submitting jobs: " + Arrays.toString(jobPages.toArray())); LIMITER.acquirePermits(1); _webmaster .performSearchAnalyticsQueryInBatch(jobs, filterList, callbackList, _requestedDimensions, QUERY_LIMIT); int processed = 0; for (ProducerJob job : jobs) { processed += job.getPagesSize(); } reporter.report(processed, _country); } catch (IOException e) { log.warn("Batch request failed. Jobs: " + Joiner.on(",").join(jobs)); for (ProducerJob job : jobs) { retries.add(job); } } catch (InterruptedException e) { log.error(String.format("Interrupted while trying to get queries for jobs %s. Current retry size is %d.", Joiner.on(",").join(jobs), retries.size())); } } }; } private void onFailure(String errMsg, ProducerJob job, ConcurrentLinkedDeque<ProducerJob> retries) { log.debug(String.format("OnFailure: will retry job %s.%sReason:%s", job, System.lineSeparator(), errMsg)); retries.add(job); } private void onSuccess(ProducerJob job, List<String[]> results, LinkedBlockingDeque<String[]> responseQueue, ConcurrentLinkedDeque<ProducerJob> pagesToRetry) { int size = results.size(); if (size == GoogleWebmasterClient.API_ROW_LIMIT) { List<? extends ProducerJob> granularJobs = job.partitionJobs(); if (granularJobs.isEmpty()) { //The job is not divisible //TODO: 99.99% cases we are good. But what if it happens, what can we do? log.warn(String.format( "There might be more query data for your job %s. Currently, downloading more than the Google API limit '%d' is not supported.", job, GoogleWebmasterClient.API_ROW_LIMIT)); } else { log.info(String.format("Partition current job %s", job)); pagesToRetry.addAll(granularJobs); return; } } log.debug(String.format("Finished %s. Current Queue size: %d. Record size: %d.", job, responseQueue.size(), size)); try { for (String[] r : results) { responseQueue.put(r); } } catch (InterruptedException e) { log.error(e.getMessage()); throw new RuntimeException(e); } } } } class ProgressReporter { private volatile int checkPointCount = 0; //Current check point accumulator private volatile int totalProcessed = 0; //Total processed accumulatro private final Logger _log; private final int _total; //Total number of jobs. private final int _checkPoint; //report at every check point ProgressReporter(Logger log, int total) { this(log, total, 20); } /** * @param total indicate the total size of the job * @param frequency indicate the frequency of reporting. * e.g. If set frequency to 20. Then, the reporter will report 20 times at every 5%. */ private ProgressReporter(Logger log, int total, int frequency) { _log = log; _total = total; _checkPoint = (int) Math.max(1, Math.ceil(1.0 * total / frequency)); } public synchronized void report(int progress, String country) { checkPointCount += progress; if (checkPointCount >= _checkPoint) { totalProcessed += checkPointCount; checkPointCount = 0; _log.info(String.format("Current progress: %d of %d processed for %s", totalProcessed, _total, country)); } } }
3,669
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google/webmaster/UrlTriePostOrderIterator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.util.ArrayDeque; import java.util.Deque; import java.util.Iterator; import java.util.Map; import java.util.NoSuchElementException; import org.apache.commons.lang3.tuple.Pair; import com.google.api.client.repackaged.com.google.common.base.Preconditions; /** * This is a post-order iterator that traverses the nodes on the URL trie with a stopping rule, which is, it will not go deeper into the nodes whose size(defined as the number of descendant URLs and itself if itself is a URL page) is less than or equal to the stopping size. In other words, those nodes with size less than or equal to the stopping size will be treated as leaf nodes. * * Iteration value: * Pair.1 is the full path to current node. * Pair.2 is current node. */ public class UrlTriePostOrderIterator implements Iterator<Pair<String, UrlTrieNode>> { private final int _groupSize; private final StringBuilder _currentPrefixSb; private Deque<UrlTrieNode> _unprocessed = new ArrayDeque<>(); private UrlTrieNode _currentNode; private UrlTrieNode _lastVisited = null; private UrlTrieNode _toReturn; public UrlTriePostOrderIterator(UrlTrie trie, int stoppingSize) { Preconditions.checkArgument(stoppingSize > 0); _currentNode = trie.getRoot(); String prefix = trie.getPrefix(); _currentPrefixSb = new StringBuilder(); if (prefix != null) { _currentPrefixSb.append(prefix); } _groupSize = stoppingSize; } @Override public boolean hasNext() { if (_toReturn != null) { return true; } while (!_unprocessed.isEmpty() || !isStoppingNode(_currentNode)) { if (!isStoppingNode(_currentNode)) { //keep going down if not at leaf _unprocessed.push(_currentNode); _currentPrefixSb.append(_currentNode.getValue()); Map.Entry<Character, UrlTrieNode> next = _currentNode.children.firstEntry(); if (next == null) { _currentNode = null; } else { _currentNode = next.getValue(); } } else { UrlTrieNode peekNode = _unprocessed.peek(); if (_currentNode != null || peekNode.children.isEmpty() || peekNode.children.lastEntry().getValue() == _lastVisited) { //_currentNode is a returnable stopping node if (_currentNode != null) { _toReturn = _currentNode; } else { _toReturn = _unprocessed.pop(); _currentPrefixSb.setLength(_currentPrefixSb.length() - 1); } //If there is no parent, it's the last one; otherwise, move to right UrlTrieNode parent = _unprocessed.peek(); if (parent == null) { return true; //we've got the last one. } //move to the right sibling. Set to null, if there is no right sibling. Map.Entry<Character, UrlTrieNode> sibling = parent.children.higherEntry(_toReturn.getValue()); if (sibling == null) { _currentNode = null; } else { _currentNode = sibling.getValue(); } return true; } else { //hand over to the next loop to move right _currentNode = peekNode; } } } //This case happens when the whole trie has fewer URLs than the group size if (_lastVisited == null && _currentNode.getSize() > 0) { //_currentNode is now at the root node, which is a leaf by the iterator's definition _toReturn = _currentNode; return true; } return false; } /** * A node is a stopping node, from which you cannot go deeper, if * 1. this node is null * 2. this node has descendants <= groupSize, but this node is returnable */ private boolean isStoppingNode(UrlTrieNode node) { return node == null || node.getSize() <= _groupSize; } @Override public Pair<String, UrlTrieNode> next() { if (hasNext()) { _lastVisited = _toReturn; _toReturn = null; return Pair.of(_currentPrefixSb.toString() + _lastVisited.getValue(), _lastVisited); } throw new NoSuchElementException(); } @Override public void remove() { throw new UnsupportedOperationException(); } }
3,670
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google/webmaster/GoogleWebmasterClient.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.io.IOException; import java.util.List; import com.google.api.client.googleapis.batch.BatchRequest; import com.google.api.services.webmasters.Webmasters; import com.google.api.services.webmasters.model.ApiDimensionFilter; import com.google.api.services.webmasters.model.ApiDimensionFilterGroup; /** * Provide basic accesses to Google Search Console by utilizing Google Webmaster API. * Details about Google Webmaster or Google Search Console API can be found at https://developers.google.com/webmaster-tools/ */ public abstract class GoogleWebmasterClient { public static final int API_ROW_LIMIT = 5000; /** * Return all pages given all constraints. * @param siteProperty your site property string * @param startDate date string with format "yyyy-MM-dd" * @param endDate date string with format "yyyy-MM-dd" * @param country country code string * @param rowLimit limit the number of rows returned by the API. The API maximum limit is 5000. * @param requestedDimensions requested dimensions of the API call. * @param filters a list of filters. Include the country filter if you need it, even though you've provided the country string. * @param startRow this is a 0 based index configuration to set the starting row of your API request. Even though the API row limit is 5000, you can send a request starting from row 5000, so you will be able to get data from row 5000 to row 9999. * @return Return all pages given all constraints. */ public abstract List<String> getPages(String siteProperty, String startDate, String endDate, String country, int rowLimit, List<GoogleWebmasterFilter.Dimension> requestedDimensions, List<ApiDimensionFilter> filters, int startRow) throws IOException; /** * Perform the api call for search analytics query * @param siteProperty your site property string * @param startDate date string with format "yyyy-MM-dd" * @param endDate date string with format "yyyy-MM-dd" * @param dimensions your requested dimensions * @param filterGroup filters for your API request. Provide your filters in a group, find utility functions in GoogleWebmasterFilter * @param rowLimit the row limit for your API response * @param startRow this is a 0 based index configuration to set the starting row of your API request. Even though the API row limit is 5000, you can send a request starting from row 5000, so you will be able to get data from row 5000 to row 9999. * @return return the response of Google Webmaster API */ public abstract Webmasters.Searchanalytics.Query createSearchAnalyticsQuery(String siteProperty, String startDate, String endDate, List<GoogleWebmasterFilter.Dimension> dimensions, ApiDimensionFilterGroup filterGroup, int rowLimit, int startRow) throws IOException; public abstract BatchRequest createBatch(); }
3,671
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google/webmaster/ProducerJob.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.List; import org.joda.time.DateTime; import org.joda.time.Days; import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormatter; import com.google.gson.Gson; import com.google.gson.GsonBuilder; public abstract class ProducerJob { static final DateTimeFormatter dateFormatter = DateTimeFormat.forPattern("yyyy-MM-dd"); static final GsonBuilder gsonBuilder = new GsonBuilder(); public abstract String getPage(); /** * format is "yyyy-MM-dd" */ public abstract String getStartDate(); /** * format is "yyyy-MM-dd" */ public abstract String getEndDate(); public abstract GoogleWebmasterFilter.FilterOperator getOperator(); /** * return how many pages are included in this job */ public abstract int getPagesSize(); public List<? extends ProducerJob> partitionJobs() { DateTime start = dateFormatter.parseDateTime(getStartDate()); DateTime end = dateFormatter.parseDateTime(getEndDate()); int days = Days.daysBetween(start, end).getDays(); if (days <= 0) { return new ArrayList<>(); } int step = days / 2; return Arrays.asList(new SimpleProducerJob(getPage(), getStartDate(), dateFormatter.print(start.plusDays(step))), new SimpleProducerJob(getPage(), dateFormatter.print(start.plusDays(step + 1)), getEndDate())); } public static String serialize(Collection<ProducerJob> jobs) { //TODO: don't need to recreate objects if it's of type SimpleProducerJob Collection<ProducerJob> producerJobs = new ArrayList<>(jobs.size()); for (ProducerJob job : jobs) { producerJobs.add(new SimpleProducerJob(job)); } Gson gson = gsonBuilder.create(); return gson.toJson(producerJobs); } }
3,672
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google/webmaster/SimpleProducerJob.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.util.ArrayList; import java.util.List; import java.util.Objects; import com.google.gson.Gson; import com.google.gson.JsonArray; import com.google.gson.JsonParser; import com.google.gson.reflect.TypeToken; public class SimpleProducerJob extends ProducerJob { private final String _page; private final String _startDate; private final String _endDate; private static final GoogleWebmasterFilter.FilterOperator _operator = GoogleWebmasterFilter.FilterOperator.EQUALS; SimpleProducerJob(String page, String startDate, String endDate) { _page = page; _startDate = startDate; _endDate = endDate; } public SimpleProducerJob(ProducerJob job) { this(job.getPage(), job.getStartDate(), job.getEndDate()); } public static List<ProducerJob> deserialize(String jobs) { if (jobs == null || jobs.trim().isEmpty()) { jobs = "[]"; } JsonArray jobsJson = new JsonParser().parse(jobs).getAsJsonArray(); return new Gson().fromJson(jobsJson, new TypeToken<ArrayList<SimpleProducerJob>>() { }.getType()); } @Override public String getPage() { return _page; } @Override public String getStartDate() { return _startDate; } @Override public String getEndDate() { return _endDate; } @Override public GoogleWebmasterFilter.FilterOperator getOperator() { return _operator; } @Override public int getPagesSize() { return 1; } @Override public int hashCode() { return Objects.hash(_page, _startDate, _endDate, _operator); } @Override public boolean equals(Object obj) { if (obj == null) { return false; } if (!SimpleProducerJob.class.isAssignableFrom(obj.getClass())) { return false; } SimpleProducerJob other = (SimpleProducerJob) obj; return Objects.equals(_page, other._page) && Objects.equals(_startDate, other._startDate) && Objects.equals( _endDate, other._endDate) && Objects.equals(_operator, other._operator); } @Override public String toString() { return String.format("SimpleProducerJob{_page='%s', _startDate='%s', _endDate='%s', _operator=%s}", _page, _startDate, _endDate, _operator); } }
3,673
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google/webmaster/GoogleWebmasterDayPartitioner.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import org.apache.gobblin.configuration.State; import org.apache.gobblin.ingestion.google.DayPartitioner; public class GoogleWebmasterDayPartitioner extends DayPartitioner { public GoogleWebmasterDayPartitioner(State state, int numBranches, int branchId) { super(state, numBranches, branchId); } }
3,674
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google/webmaster/GoogleWebmasterDataFetcherImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import com.google.api.client.googleapis.batch.BatchRequest; import com.google.api.client.googleapis.batch.json.JsonBatchCallback; import com.google.api.client.repackaged.com.google.common.base.Preconditions; import com.google.api.services.webmasters.model.ApiDimensionFilter; import com.google.api.services.webmasters.model.SearchAnalyticsQueryResponse; import com.google.common.base.Optional; import java.io.IOException; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.Queue; import java.util.concurrent.ConcurrentLinkedDeque; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.tuple.Pair; import org.apache.gobblin.configuration.State; import org.apache.gobblin.util.ExecutorsUtils; import org.apache.gobblin.util.limiter.RateBasedLimiter; import static org.apache.gobblin.ingestion.google.webmaster.GoogleWebmasterFilter.*; @Slf4j public class GoogleWebmasterDataFetcherImpl extends GoogleWebmasterDataFetcher { private final double API_REQUESTS_PER_SECOND; private final RateBasedLimiter LIMITER; private final int PAGES_COUNT_COOLDOWN_TIME; //In seconds private final int PAGES_GET_COOLDOWN_TIME; //In seconds private final int GET_PAGES_RETRIES; private final String _siteProperty; private final GoogleWebmasterClient _client; private final List<ProducerJob> _jobs; GoogleWebmasterDataFetcherImpl(String siteProperty, GoogleWebmasterClient client, State wuState) throws IOException { _siteProperty = siteProperty; Preconditions.checkArgument(_siteProperty.endsWith("/"), "The site property must end in \"/\""); _client = client; _jobs = getHotStartJobs(wuState); API_REQUESTS_PER_SECOND = wuState.getPropAsDouble(GoogleWebMasterSource.KEY_PAGES_TUNING_REQUESTS_PER_SECOND, 4.5); PAGES_COUNT_COOLDOWN_TIME = wuState.getPropAsInt(GoogleWebMasterSource.KEY_PAGES_COUNT_TUNING_COOLDOWN_TIME, 30); PAGES_GET_COOLDOWN_TIME = wuState.getPropAsInt(GoogleWebMasterSource.KEY_PAGES_GET_TUNING_COOLDOWN_TIME, 5); LIMITER = new RateBasedLimiter(API_REQUESTS_PER_SECOND, TimeUnit.SECONDS); GET_PAGES_RETRIES = wuState.getPropAsInt(GoogleWebMasterSource.KEY_PAGES_TUNING_MAX_RETRIES, 120); } private static List<ProducerJob> getHotStartJobs(State wuState) { String hotStartString = wuState.getProp(GoogleWebMasterSource.KEY_REQUEST_HOT_START, ""); if (!hotStartString.isEmpty()) { return SimpleProducerJob.deserialize(hotStartString); } return new ArrayList<>(); } /** * Due to the limitation of the API, we can get a maximum of 5000 rows at a time. Another limitation is that, results are sorted by click count descending. If two rows have the same click count, they are sorted in an arbitrary way. (Read more at https://developers.google.com/webmaster-tools/v3/searchanalytics). So we try to get all pages by partitions, if a partition has 5000 rows returned. We try partition current partition into more granular levels. * */ @Override public Collection<ProducerJob> getAllPages(String startDate, String endDate, String country, int rowLimit) throws IOException { log.info("Requested row limit: " + rowLimit); if (!_jobs.isEmpty()) { log.info("Service got hot started."); return _jobs; } ApiDimensionFilter countryFilter = GoogleWebmasterFilter.countryEqFilter(country); List<GoogleWebmasterFilter.Dimension> requestedDimensions = new ArrayList<>(); requestedDimensions.add(GoogleWebmasterFilter.Dimension.PAGE); int expectedSize = -1; if (rowLimit >= GoogleWebmasterClient.API_ROW_LIMIT) { //expected size only makes sense when the data set size is larger than GoogleWebmasterClient.API_ROW_LIMIT expectedSize = getPagesSize(startDate, endDate, country, requestedDimensions, Arrays.asList(countryFilter)); log.info(String.format("Expected number of pages is %d for market-%s from %s to %s", expectedSize, GoogleWebmasterFilter.countryFilterToString(countryFilter), startDate, endDate)); } Queue<Pair<String, FilterOperator>> jobs = new ArrayDeque<>(); jobs.add(Pair.of(_siteProperty, FilterOperator.CONTAINS)); Collection<String> allPages = getPages(startDate, endDate, requestedDimensions, countryFilter, jobs, Math.min(rowLimit, GoogleWebmasterClient.API_ROW_LIMIT)); int actualSize = allPages.size(); log.info(String.format("A total of %d pages fetched for property %s at country-%s from %s to %s", actualSize, _siteProperty, country, startDate, endDate)); if (expectedSize != -1 && actualSize != expectedSize) { log.warn(String.format("Expected page size is %d, but only able to get %d", expectedSize, actualSize)); } ArrayDeque<ProducerJob> producerJobs = new ArrayDeque<>(actualSize); for (String page : allPages) { producerJobs.add(new SimpleProducerJob(page, startDate, endDate)); } return producerJobs; } /** * @return the size of all pages data set */ int getPagesSize(final String startDate, final String endDate, final String country, final List<Dimension> requestedDimensions, final List<ApiDimensionFilter> apiDimensionFilters) { final ExecutorService es = Executors.newCachedThreadPool( ExecutorsUtils.newDaemonThreadFactory(Optional.of(log), Optional.of(this.getClass().getSimpleName()))); int startRow = 0; long groupSize = Math.max(1, Math.round(API_REQUESTS_PER_SECOND)); List<Future<Integer>> results = new ArrayList<>((int) groupSize); int max = -1; while (true) { for (int i = 0; i < groupSize; ++i) { final int start = startRow; startRow += GoogleWebmasterClient.API_ROW_LIMIT; Future<Integer> submit = es.submit(() -> { log.info(String.format("Getting page size from %s...", start)); String interruptedMsg = String.format("Interrupted while trying to get the size of all pages for %s. Current start row is %d.", country, start); int r = 0; while (r <= GET_PAGES_RETRIES) { ++r; try { LIMITER.acquirePermits(1); } catch (InterruptedException e) { log.error("RateBasedLimiter: " + interruptedMsg, e); return -1; } try { List<String> pages = _client.getPages(_siteProperty, startDate, endDate, country, GoogleWebmasterClient.API_ROW_LIMIT, requestedDimensions, apiDimensionFilters, start); if (pages.size() == 0) { return 0; } int totalPages = pages.size() + start; log.info(String.format("At least %s pages exist. Continuing...", totalPages)); return totalPages; } catch (IOException e) { log.info(String.format("Getting page size from %s failed due to %s. Retrying...", start, e.getMessage())); coolDown(r, PAGES_COUNT_COOLDOWN_TIME); } } throw new RuntimeException(String.format( "Getting all pages reaches the maximum number of retires %d. Date range: %s ~ %s. Country: %s.", GET_PAGES_RETRIES, startDate, endDate, country)); }); results.add(submit); } List<Integer> pagesCount = new ArrayList<>(); for (Future<Integer> result : results) { try { pagesCount.add(result.get()); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } } if (pagesCount.stream().allMatch(x -> x == 0)) { return max; } max = Math.max(max, Collections.max(pagesCount)); if (max % GoogleWebmasterClient.API_ROW_LIMIT != 0) { return max; } results.clear(); } } private void coolDown(int r, int secondsInterval) { int milliSeconds = secondsInterval + (r / 5) * secondsInterval; milliSeconds *= 1000; log.info(String.format("Sleeping for %s seconds", milliSeconds / 1000)); try { Thread.sleep(milliSeconds); } catch (InterruptedException e1) { throw new RuntimeException(e1); } } /** * Get all pages in an async mode. */ private Collection<String> getPages(String startDate, String endDate, List<Dimension> dimensions, ApiDimensionFilter countryFilter, Queue<Pair<String, FilterOperator>> toProcess, int rowLimit) { String country = GoogleWebmasterFilter.countryFilterToString(countryFilter); ConcurrentLinkedDeque<String> allPages = new ConcurrentLinkedDeque<>(); int r = 0; while (r <= GET_PAGES_RETRIES) { ++r; log.info(String.format("Get pages at round %d with size %d.", r, toProcess.size())); ConcurrentLinkedDeque<Pair<String, FilterOperator>> nextRound = new ConcurrentLinkedDeque<>(); ExecutorService es = Executors.newFixedThreadPool(10, ExecutorsUtils.newDaemonThreadFactory(Optional.of(log), Optional.of(this.getClass().getSimpleName()))); while (!toProcess.isEmpty()) { submitJob(toProcess.poll(), countryFilter, startDate, endDate, dimensions, es, allPages, nextRound, rowLimit); } //wait for jobs to finish and start next round if necessary. try { es.shutdown(); boolean terminated = es.awaitTermination(5, TimeUnit.MINUTES); if (!terminated) { es.shutdownNow(); log.warn("Timed out while getting all pages for country-{} at round {}. Next round now has size {}.", country, r, nextRound.size()); } } catch (InterruptedException e) { throw new RuntimeException(e); } if (nextRound.isEmpty()) { break; } toProcess = nextRound; coolDown(r, PAGES_GET_COOLDOWN_TIME); } if (r == GET_PAGES_RETRIES + 1) { throw new RuntimeException( String.format("Getting all pages reaches the maximum number of retires %d. Date range: %s ~ %s. Country: %s.", GET_PAGES_RETRIES, startDate, endDate, country)); } return allPages; } private void submitJob(final Pair<String, FilterOperator> job, final ApiDimensionFilter countryFilter, final String startDate, final String endDate, final List<Dimension> dimensions, ExecutorService es, final ConcurrentLinkedDeque<String> allPages, final ConcurrentLinkedDeque<Pair<String, FilterOperator>> nextRound, final int rowLimit) { es.submit(new Runnable() { @Override public void run() { try { LIMITER.acquirePermits(1); } catch (InterruptedException e) { throw new RuntimeException("RateBasedLimiter got interrupted.", e); } String countryString = countryFilterToString(countryFilter); List<ApiDimensionFilter> filters = new LinkedList<>(); filters.add(countryFilter); String prefix = job.getLeft(); FilterOperator operator = job.getRight(); String jobString = String.format("job(prefix: %s, operator: %s)", prefix, operator); filters.add(GoogleWebmasterFilter.pageFilter(operator, prefix)); List<String> pages; try { pages = _client.getPages(_siteProperty, startDate, endDate, countryString, rowLimit, dimensions, filters, 0); log.debug( String.format("%d pages fetched for %s market-%s from %s to %s.", pages.size(), jobString, countryString, startDate, endDate)); } catch (IOException e) { log.debug(String.format("%s failed due to %s. Retrying...", jobString, e.getMessage())); nextRound.add(job); return; } //If the number of pages is at the LIMIT, it must be a "CONTAINS" job. //We need to create sub-tasks, and check current page with "EQUALS" if (pages.size() == GoogleWebmasterClient.API_ROW_LIMIT) { log.info(String.format("Expanding the prefix '%s'", prefix)); nextRound.add(Pair.of(prefix, FilterOperator.EQUALS)); for (String expanded : getUrlPartitions(prefix)) { nextRound.add(Pair.of(expanded, FilterOperator.CONTAINS)); } } else { //Otherwise, we've done with current job. allPages.addAll(pages); } } }); } /** * This doesn't cover all cases but more than 99.9% captured. * * According to the standard (RFC-3986), here are possible characters: * unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" * reserved = gen-delims / sub-delims * gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@" * sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "=" * * * Not included: * reserved = gen-delims / sub-delims * gen-delims = "[" / "]" * sub-delims = "(" / ")" / "," / ";" */ private ArrayList<String> getUrlPartitions(String prefix) { ArrayList<String> expanded = new ArrayList<>(); //The page prefix is case insensitive, A-Z is not necessary. for (char c = 'a'; c <= 'z'; ++c) { expanded.add(prefix + c); } for (int num = 0; num <= 9; ++num) { expanded.add(prefix + num); } expanded.add(prefix + "-"); expanded.add(prefix + "."); expanded.add(prefix + "_"); //most important expanded.add(prefix + "~"); expanded.add(prefix + "/"); //most important expanded.add(prefix + "%"); //most important expanded.add(prefix + ":"); expanded.add(prefix + "?"); expanded.add(prefix + "#"); expanded.add(prefix + "@"); expanded.add(prefix + "!"); expanded.add(prefix + "$"); expanded.add(prefix + "&"); expanded.add(prefix + "+"); expanded.add(prefix + "*"); expanded.add(prefix + "'"); expanded.add(prefix + "="); return expanded; } @Override public List<String[]> performSearchAnalyticsQuery(String startDate, String endDate, int rowLimit, List<Dimension> requestedDimensions, List<Metric> requestedMetrics, Collection<ApiDimensionFilter> filters) throws IOException { SearchAnalyticsQueryResponse response = _client.createSearchAnalyticsQuery(_siteProperty, startDate, endDate, requestedDimensions, GoogleWebmasterFilter.andGroupFilters(filters), rowLimit, 0).execute(); return convertResponse(requestedMetrics, response); } @Override public void performSearchAnalyticsQueryInBatch(List<ProducerJob> jobs, List<ArrayList<ApiDimensionFilter>> filterList, List<JsonBatchCallback<SearchAnalyticsQueryResponse>> callbackList, List<Dimension> requestedDimensions, int rowLimit) throws IOException { BatchRequest batchRequest = _client.createBatch(); for (int i = 0; i < jobs.size(); ++i) { ProducerJob job = jobs.get(i); ArrayList<ApiDimensionFilter> filters = filterList.get(i); JsonBatchCallback<SearchAnalyticsQueryResponse> callback = callbackList.get(i); _client.createSearchAnalyticsQuery(_siteProperty, job.getStartDate(), job.getEndDate(), requestedDimensions, GoogleWebmasterFilter.andGroupFilters(filters), rowLimit, 0).queue(batchRequest, callback); } batchRequest.execute(); } @Override public String getSiteProperty() { return _siteProperty; } }
3,675
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google/webmaster/UrlTriePrefixGrouper.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.util.ArrayList; import java.util.Iterator; import java.util.NoSuchElementException; import org.apache.commons.lang3.tuple.Pair; import org.apache.commons.lang3.tuple.Triple; public class UrlTriePrefixGrouper implements UrlGrouper { private final int _groupSize; private final UrlTrie _trie; private final Iterator<Pair<String, UrlTrieNode>> _iterator; private Triple<String, GoogleWebmasterFilter.FilterOperator, UrlTrieNode> _retVal; public UrlTriePrefixGrouper(UrlTrie trie, int groupSize) { _trie = trie; _groupSize = groupSize; _iterator = new UrlTriePostOrderIterator(trie, groupSize); } @Override public boolean hasNext() { if (_retVal != null) { return true; } while (_iterator.hasNext() && _retVal == null) { Pair<String, UrlTrieNode> nextPair = _iterator.next(); UrlTrieNode nextNode = nextPair.getRight(); if (nextNode.getSize() <= _groupSize) { _retVal = Triple.of(nextPair.getLeft(), GoogleWebmasterFilter.FilterOperator.CONTAINS, nextNode); return true; } else if (nextNode.isExist()) { _retVal = Triple.of(nextPair.getLeft(), GoogleWebmasterFilter.FilterOperator.EQUALS, nextNode); return true; } } return false; } @Override public Triple<String, GoogleWebmasterFilter.FilterOperator, UrlTrieNode> next() { if (hasNext()) { Triple<String, GoogleWebmasterFilter.FilterOperator, UrlTrieNode> retVal = _retVal; _retVal = null; return retVal; } throw new NoSuchElementException(); } public UrlTrie getTrie() { return _trie; } /** * Get the detailed pages under this group */ public static ArrayList<String> groupToPages(Triple<String, GoogleWebmasterFilter.FilterOperator, UrlTrieNode> group) { ArrayList<String> ret = new ArrayList<>(); if (group.getMiddle().equals(GoogleWebmasterFilter.FilterOperator.EQUALS)) { if (group.getRight().isExist()) { ret.add(group.getLeft()); } } else if (group.getMiddle().equals(GoogleWebmasterFilter.FilterOperator.CONTAINS)) { UrlTrie trie = new UrlTrie(group.getLeft(), group.getRight()); Iterator<Pair<String, UrlTrieNode>> iterator = new UrlTriePostOrderIterator(trie, 1); while (iterator.hasNext()) { Pair<String, UrlTrieNode> next = iterator.next(); if (next.getRight().isExist()) { ret.add(next.getLeft()); } } } return ret; } @Override public void remove() { throw new UnsupportedOperationException(); } @Override public int getGroupSize() { return _groupSize; } }
3,676
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google/webmaster/UrlTrieNode.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.util.TreeMap; public class UrlTrieNode { public TreeMap<Character, UrlTrieNode> children = new TreeMap<>(); //immediate, first level children. private Character _value; private boolean _exist = false; //the count/size for all nodes with actual values/pages starting from itself and include all children, grand-children, etc... private int _size = 0; public UrlTrieNode(Character value) { _value = value; } public void add(String path) { UrlTrieNode parent = this; parent.increaseCount(); for (int i = 0; i < path.length(); ++i) { Character c = path.charAt(i); UrlTrieNode child = parent.children.get(c); if (child == null) { child = new UrlTrieNode(c); parent.children.put(c, child); } child.increaseCount(); parent = child; } parent._exist = true; } public UrlTrieNode getChild(String path) { UrlTrieNode parent = this; for (int i = 0; i < path.length(); ++i) { Character c = path.charAt(i); UrlTrieNode child = parent.children.get(c); if (child == null) { return null; } parent = child; } return parent; } // public UrlTrieNode nextSibling() { // if (_parent == null) { // return null; // } // Map.Entry<Character, UrlTrieNode> sibling = _parent.children.higherEntry(_value); // if (sibling == null) { // return null; // } // return sibling.getValue(); // } public Character getValue() { return _value; } public boolean isExist() { return _exist; } public int getSize() { return _size; } public void increaseCount() { ++_size; } @Override public String toString() { return "UrlTrieNode{" + "_value=" + _value + ", _exist=" + _exist + ", _size=" + _size + '}'; } }
3,677
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google/webmaster/GoogleWebmasterFilter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.util.ArrayList; import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Locale; import com.google.api.services.webmasters.model.ApiDimensionFilter; import com.google.api.services.webmasters.model.ApiDimensionFilterGroup; public class GoogleWebmasterFilter { //Reference http://www.nationsonline.org/oneworld/country_code_list.htm for a full list of "ISO 3166-1 alpha-3 country code" private static HashSet<String> countryCodes; static { String[] countries = Locale.getISOCountries(); countryCodes = new HashSet<>(countries.length); for (String country : countries) { Locale locale = new Locale("", country); countryCodes.add(locale.getISO3Country()); } } enum Dimension { DATE, PAGE, COUNTRY, QUERY, DEVICE, SEARCH_TYPE, SEARCH_APPEARANCE } enum FilterOperator { EQUALS, CONTAINS, NOTCONTAINS } private static ApiDimensionFilter build(String dimension, String operator, String expression) { return new ApiDimensionFilter().setDimension(dimension).setOperator(operator).setExpression(expression); } static ApiDimensionFilter pageFilter(FilterOperator op, String expression) { //Operator string is case insensitive return build(Dimension.PAGE.toString(), op.toString(), expression); } static ApiDimensionFilter countryEqFilter(String country) { String countryCode = validateCountryCode(country); if (countryCode.equals("ALL")) { return null; } return build(Dimension.COUNTRY.toString(), FilterOperator.EQUALS.toString().toLowerCase(), countryCode); } static String countryFilterToString(ApiDimensionFilter countryFilter) { String country; if (countryFilter == null) { country = "ALL"; } else { country = countryFilter.getExpression(); } return country; } static ApiDimensionFilterGroup andGroupFilters(Collection<ApiDimensionFilter> filters) { if (filters == null || filters.isEmpty()) { return null; } List<ApiDimensionFilter> filtersList; if (filters instanceof List) { filtersList = (List<ApiDimensionFilter>) filters; } else { filtersList = new ArrayList<>(filters); } return new ApiDimensionFilterGroup().setFilters(filtersList).setGroupType("and"); } static String validateCountryCode(String countryCode) { String upper = countryCode.toUpperCase(); if (upper.equals("ALL") || countryCodes.contains(upper)) { return upper; } throw new RuntimeException(String.format( "Unknown country code '%s' in configuration file. Please provide a valid ISO 3166-1 alpha-3 country code. Use 'ALL' if you want to download data without a country filter.", countryCode)); } }
3,678
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google/webmaster/GoogleWebmasterClientImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import com.google.api.client.auth.oauth2.Credential; import com.google.api.client.googleapis.batch.BatchRequest; import com.google.api.client.repackaged.com.google.common.base.Preconditions; import com.google.api.services.webmasters.Webmasters; import com.google.api.services.webmasters.model.ApiDataRow; import com.google.api.services.webmasters.model.ApiDimensionFilter; import com.google.api.services.webmasters.model.ApiDimensionFilterGroup; import com.google.api.services.webmasters.model.SearchAnalyticsQueryRequest; import com.google.api.services.webmasters.model.SearchAnalyticsQueryResponse; import org.apache.gobblin.source.extractor.extract.google.GoogleCommon; public class GoogleWebmasterClientImpl extends GoogleWebmasterClient { private final Webmasters.Searchanalytics _analytics; private final Webmasters _service; public GoogleWebmasterClientImpl(Credential credential, String appName) throws IOException { //transport: new NetHttpTransport() or GoogleNetHttpTransport.newTrustedTransport() //jsonFactory: new JacksonFactory() or JacksonFactory.getDefaultInstance() _service = new Webmasters.Builder(credential.getTransport(), GoogleCommon.getJsonFactory(), credential) .setApplicationName(appName).build(); _analytics = _service.searchanalytics(); } @Override public BatchRequest createBatch() { return _service.batch(); } @Override public List<String> getPages(String siteProperty, String startDate, String endDate, String country, int rowLimit, List<GoogleWebmasterFilter.Dimension> requestedDimensions, List<ApiDimensionFilter> filters, int startRow) throws IOException { checkRowLimit(rowLimit); Preconditions.checkArgument(requestedDimensions.contains(GoogleWebmasterFilter.Dimension.PAGE)); SearchAnalyticsQueryResponse rspByCountry = createSearchAnalyticsQuery(siteProperty, startDate, endDate, requestedDimensions, GoogleWebmasterFilter.andGroupFilters(filters), rowLimit, startRow).execute(); List<ApiDataRow> pageRows = rspByCountry.getRows(); List<String> pages = new ArrayList<>(rowLimit); if (pageRows != null) { int pageIndex = requestedDimensions.indexOf(GoogleWebmasterFilter.Dimension.PAGE); for (ApiDataRow row : pageRows) { pages.add(row.getKeys().get(pageIndex)); } } return pages; } @Override public Webmasters.Searchanalytics.Query createSearchAnalyticsQuery(String siteProperty, String startDate, String endDate, List<GoogleWebmasterFilter.Dimension> dimensions, ApiDimensionFilterGroup filterGroup, int rowLimit, int startRow) throws IOException { List<String> dimensionStrings = new ArrayList<>(); for (GoogleWebmasterFilter.Dimension dimension : dimensions) { dimensionStrings.add(dimension.toString().toLowerCase()); } SearchAnalyticsQueryRequest request = new SearchAnalyticsQueryRequest().setStartDate(startDate).setEndDate(endDate).setRowLimit(rowLimit) .setDimensions(dimensionStrings).setStartRow(startRow); if (filterGroup != null) { request.setDimensionFilterGroups(Arrays.asList(filterGroup)); } return _analytics.query(siteProperty, request); } private static void checkRowLimit(int rowLimit) { Preconditions.checkArgument(rowLimit > 0 && rowLimit <= API_ROW_LIMIT, "Row limit for Google Search Console API must be within range (0, 5000]"); } }
3,679
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google/webmaster/UrlTrie.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.util.Collection; import org.apache.commons.lang3.tuple.Pair; public class UrlTrie { private final UrlTrieNode _root; private final String _prefix; /** * @param rootPage use the longest common prefix as your _root page. * e.g. if your pages are "www.linkedin.com/in/", "www.linkedin.com/jobs/", "www.linkedin.com/groups/" * The longest common prefix is "www.linkedin.com/", and it will be your _root page. * And the last "/" will be used as a TrieRoot. * @param pages */ public UrlTrie(String rootPage, Collection<String> pages) { Pair<String, UrlTrieNode> defaults = getPrefixAndDefaultRoot(rootPage); _prefix = defaults.getLeft(); _root = defaults.getRight(); for (String page : pages) { add(page); } } /** * prefix is different from RootPage that the RootPage has an extra char in the end. And this last char will be used to construct the root node of the trie. */ public UrlTrie(String rootPage, UrlTrieNode root) { Pair<String, UrlTrieNode> defaults = getPrefixAndDefaultRoot(rootPage); _prefix = defaults.getLeft(); _root = root; } private Pair<String, UrlTrieNode> getPrefixAndDefaultRoot(String rootPage) { if (rootPage == null || rootPage.isEmpty()) { return Pair.of(null, new UrlTrieNode(null)); } else { String prefix = rootPage.substring(0, rootPage.length() - 1); Character lastChar = rootPage.charAt(rootPage.length() - 1); return Pair.of(prefix, new UrlTrieNode(lastChar)); } } public void add(String page) { if (_prefix == null || _prefix.isEmpty()) { _root.add(page); } else { if (!page.startsWith(_prefix)) { throw new IllegalArgumentException( String.format("Found a page '%s' not starting with the root page '%s'", page, _prefix)); } _root.add(page.substring(_prefix.length() + 1)); //1 comes from the last char in root. } } public UrlTrieNode getChild(String path) { return _root.getChild(path); } public UrlTrieNode getRoot() { return _root; } public String getPrefix() { return _prefix; } }
3,680
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/ingestion/google/webmaster/GoogleWebMasterSource.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.ingestion.google.webmaster; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import com.google.common.base.Preconditions; import com.google.common.base.Splitter; import com.google.gson.JsonArray; import com.google.gson.JsonElement; import com.google.gson.JsonParser; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.converter.avro.JsonElementConversionFactory; import org.apache.gobblin.ingestion.google.util.SchemaUtil; import org.apache.gobblin.source.extractor.Extractor; import org.apache.gobblin.source.extractor.extract.QueryBasedSource; import org.apache.gobblin.source.workunit.WorkUnit; /** * Google Webmaster API enables you to download data from Google Search Console for search analytics of the verified sites. See more here https://developers.google.com/webmaster-tools/. Configure the Google Webmaster Source for starting a daily job to download search analytics data. This gobblin job partitions the whole task into sub-tasks for each day. Each sub-task is handled by a GoogleWebmasterExtractor for that date, and each GoogleWebmasterExtractor holds a queue of GoogleWebmasterExtractorIterators, each of which does the query task for each filter(Currently, only the country filter is supported.) on that date. * * The minimum unit of querying range is date. Change the range by configuring "source.querybased.start.value" and "source.querybased.end.value". Note that the analytics data for Google Search Console has a delay or 3 days. So cap your configuration of "source.querybased.append.max.watermark.limit" by "CURRENTDATE-3". See the documentation details of each configuration in the GoogleWebMasterSource fields. * */ @Alpha abstract class GoogleWebMasterSource extends QueryBasedSource<String, String[]> { public static final String SOURCE_GOOGLE_WEBMASTER_PREFIX = "source.google_webmasters."; /** * Must Provide. * Provide the property site URL whose google search analytics data you want to download */ public static final String KEY_PROPERTY = SOURCE_GOOGLE_WEBMASTER_PREFIX + "property_urls"; /** * Optional. Default to false. * Determine whether to add source property as the last column to your configured schema */ public static final String KEY_INCLUDE_SOURCE_PROPERTY = SOURCE_GOOGLE_WEBMASTER_PREFIX + "source_property.include"; /** * Optional. Default to "Source". * Determine the column name for the additional source property origin column if included */ public static final String KEY_SOURCE_PROPERTY_COLUMN_NAME = SOURCE_GOOGLE_WEBMASTER_PREFIX + "source_property.column_name"; /** * The filters that will be passed to all your API requests. * Filter format is [GoogleWebmasterFilter.Dimension].[DimensionValue] * Currently, this filter operator is "EQUALS" and only Country dimension is supported. Will extend this feature according to more use cases in the futher. */ public static final String KEY_REQUEST_FILTERS = SOURCE_GOOGLE_WEBMASTER_PREFIX + "request.filters"; /** * Must Provide. * * Allowed dimensions can be found in the enum GoogleWebmasterFilter.Dimension */ public static final String KEY_REQUEST_DIMENSIONS = SOURCE_GOOGLE_WEBMASTER_PREFIX + "request.dimensions"; /** * Must Provide. * * Allowed metrics can be found in the enum GoogleWebmasterDataFetcher.Metric */ public static final String KEY_REQUEST_METRICS = SOURCE_GOOGLE_WEBMASTER_PREFIX + "request.metrics"; /** * Optional: Default to 5000, which is the maximum allowed. * * The response row limit when you ask for pages. Set it to 5000 when you want to get all pages, which might be larger than 5000. */ public static final String KEY_REQUEST_PAGE_LIMIT = SOURCE_GOOGLE_WEBMASTER_PREFIX + "request.page_limit"; /** * Optional: Default to String.empty * Hot start this service with pre-set pages. Once this is set, the service will ignore KEY_REQUEST_PAGE_LIMIT, and won't get all pages, but use the pre-set pages instead. */ public static final String KEY_REQUEST_HOT_START = SOURCE_GOOGLE_WEBMASTER_PREFIX + "request.hot_start"; /** * Optional: Default to 5000, which is the maximum allowed. * * The response row limit when you ask for queries. */ public static final String KEY_REQUEST_QUERY_LIMIT = SOURCE_GOOGLE_WEBMASTER_PREFIX + "request.query_limit"; public static final String TUNING = SOURCE_GOOGLE_WEBMASTER_PREFIX + "request.tuning."; // =============================================== // ========= GET QUERIES TUNING BEGIN ========== // =============================================== public static final String QUERIES_TUNING = TUNING + "get_queries."; /** * Optional. Default to 120 minutes. * Set the time out in minutes for each round while getting queries. */ public static final String KEY_QUERIES_TUNING_TIME_OUT = QUERIES_TUNING + "time_out"; /** * Optional. Default to 40. * Tune the maximum rounds of retries while getting queries. */ public static final String KEY_QUERIES_TUNING_RETRIES = QUERIES_TUNING + "max_reties"; /** * Optional. Default to 250 millisecond. * Tune the cool down time between each round of retry. */ public static final String KEY_QUERIES_TUNING_COOL_DOWN = QUERIES_TUNING + "cool_down_time"; /** * Optional. Default to 2.25 batches per second. * Tune the speed of API requests. */ public static final String KEY_QUERIES_TUNING_BATCHES_PER_SECOND = QUERIES_TUNING + "batches_per_second"; /** * Optional. Default to 2. * Tune the size of a batch. Batch API calls together to reduce the number of HTTP connections. * Note: A set of n requests batched together counts toward your usage limit as n requests, not as one request. The batch request is taken apart into a set of requests before processing. * Read more at https://developers.google.com/webmaster-tools/v3/how-tos/batch */ public static final String KEY_QUERIES_TUNING_BATCH_SIZE = QUERIES_TUNING + "batch_size"; /** * Optional. Default to 500. * Set the group size for UrlTriePrefixGrouper */ public static final String KEY_QUERIES_TUNING_GROUP_SIZE = QUERIES_TUNING + "trie_group_size"; /** * Optional. Default to false. * Choose whether to apply the trie based algorithm while getting all queries. * * If set to true, you also need to set page_limit to 5000 indicating that you want to get all pages because trie based algorithm won't give you expected results if you just need a subset of all pages. */ public static final String KEY_REQUEST_TUNING_ALGORITHM = QUERIES_TUNING + "apply_trie"; // ============================================= // ========= GET QUERIES TUNING END ========== // ============================================= // ============================================= // ========= GET PAGES TUNING BEGIN ========== // ============================================= public static final String PAGES_TUNING = TUNING + "get_pages."; /** * Optional. Default to 5.0. * Tune the speed of API requests while getting all pages. */ public static final String KEY_PAGES_TUNING_REQUESTS_PER_SECOND = PAGES_TUNING + "requests_per_second"; /** * Optional. Default to 120. * Tune the number of maximum retries while getting all pages. Consider the following affecting factors while setting this number: * 1. the length of shared prefix path may be very long * 2. the Quota Exceeded exception */ public static final String KEY_PAGES_TUNING_MAX_RETRIES = PAGES_TUNING + "max_retries"; /** * Optional. Default to 30 seconds. * Set the cooldown time in seconds while getting the page count. */ public static final String KEY_PAGES_COUNT_TUNING_COOLDOWN_TIME = PAGES_TUNING + "size.cooldown"; /** * Optional. Default to 5 seconds. * Set the cooldown time in seconds while getting all pages. */ public static final String KEY_PAGES_GET_TUNING_COOLDOWN_TIME = PAGES_TUNING + "get.cooldown"; // ============================================= // ========= GET PAGES TUNING END ============ // ============================================= private final static Splitter splitter = Splitter.on(",").omitEmptyStrings().trimResults(); public static final boolean DEFAULT_INCLUDE_SOURCE_PROPERTY = false; public static final String DEFAULT_SOURCE_PROPERTY_COLUMN_NAME = "Source"; @Override public Extractor<String, String[]> getExtractor(WorkUnitState state) throws IOException { List<GoogleWebmasterFilter.Dimension> requestedDimensions = getRequestedDimensions(state); List<GoogleWebmasterDataFetcher.Metric> requestedMetrics = getRequestedMetrics(state); WorkUnit workunit = state.getWorkunit(); String schema = workunit.getProp(ConfigurationKeys.SOURCE_SCHEMA); JsonArray schemaJson = new JsonParser().parse(schema).getAsJsonArray(); Map<String, Integer> columnPositionMap = new HashMap<>(); for (int i = 0; i < schemaJson.size(); ++i) { JsonElement jsonElement = schemaJson.get(i); String columnName = jsonElement.getAsJsonObject().get("columnName").getAsString().toUpperCase(); columnPositionMap.put(columnName, i); } if (workunit.getPropAsBoolean(GoogleWebMasterSource.KEY_INCLUDE_SOURCE_PROPERTY, DEFAULT_INCLUDE_SOURCE_PROPERTY)) { String columnName = workunit.getProp(KEY_SOURCE_PROPERTY_COLUMN_NAME, DEFAULT_SOURCE_PROPERTY_COLUMN_NAME); schemaJson.add(SchemaUtil.createColumnJson(columnName, false, JsonElementConversionFactory.Type.STRING)); } validateFilters(state.getProp(GoogleWebMasterSource.KEY_REQUEST_FILTERS)); validateRequests(columnPositionMap, requestedDimensions, requestedMetrics); return createExtractor(state, columnPositionMap, requestedDimensions, requestedMetrics, schemaJson); } abstract GoogleWebmasterExtractor createExtractor(WorkUnitState state, Map<String, Integer> columnPositionMap, List<GoogleWebmasterFilter.Dimension> requestedDimensions, List<GoogleWebmasterDataFetcher.Metric> requestedMetrics, JsonArray schemaJson) throws IOException; private void validateFilters(String filters) { String countryPrefix = "COUNTRY."; for (String filter : splitter.split(filters)) { if (filter.toUpperCase().startsWith(countryPrefix)) { GoogleWebmasterFilter.validateCountryCode(filter.substring(countryPrefix.length())); } } } private void validateRequests(Map<String, Integer> columnPositionMap, List<GoogleWebmasterFilter.Dimension> requestedDimensions, List<GoogleWebmasterDataFetcher.Metric> requestedMetrics) { for (GoogleWebmasterFilter.Dimension dimension : requestedDimensions) { Preconditions.checkState(columnPositionMap.containsKey(dimension.toString()), "Your requested dimension must exist in the source.schema."); } for (GoogleWebmasterDataFetcher.Metric metric : requestedMetrics) { Preconditions.checkState(columnPositionMap.containsKey(metric.toString()), "Your requested metric must exist in the source.schema."); } } private List<GoogleWebmasterFilter.Dimension> getRequestedDimensions(WorkUnitState wuState) { List<GoogleWebmasterFilter.Dimension> dimensions = new ArrayList<>(); String dimensionsString = wuState.getProp(GoogleWebMasterSource.KEY_REQUEST_DIMENSIONS); for (String dim : splitter.split(dimensionsString)) { dimensions.add(GoogleWebmasterFilter.Dimension.valueOf(dim.toUpperCase())); } return dimensions; } private List<GoogleWebmasterDataFetcher.Metric> getRequestedMetrics(WorkUnitState wuState) { List<GoogleWebmasterDataFetcher.Metric> metrics = new ArrayList<>(); String metricsString = wuState.getProp(GoogleWebMasterSource.KEY_REQUEST_METRICS); for (String metric : splitter.split(metricsString)) { metrics.add(GoogleWebmasterDataFetcher.Metric.valueOf(metric.toUpperCase())); } return metrics; } }
3,681
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/source/extractor/extract
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/source/extractor/extract/google/GoogleDriveFsHelper.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.extractor.extract.google; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.List; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.api.services.drive.Drive; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Optional; import com.google.common.base.Preconditions; import com.google.common.io.Closer; import org.apache.gobblin.configuration.State; import org.apache.gobblin.source.extractor.filebased.FileBasedHelperException; import org.apache.gobblin.source.extractor.filebased.TimestampAwareFileBasedHelper; /** * File system helper for Google drive. */ public class GoogleDriveFsHelper implements TimestampAwareFileBasedHelper { private static final Logger LOG = LoggerFactory.getLogger(GoogleDriveFsHelper.class); static final String BUFFER_SIZE_BYTE = GoogleCommonKeys.GOOGLE_SOURCE_PREFIX + "buffer_size_bytes"; private final FileSystem fileSystem; private final Closer closer; private final Optional<Integer> bufferSizeByte; public GoogleDriveFsHelper(State state, Drive client) { this(state, client, Closer.create()); } @VisibleForTesting GoogleDriveFsHelper(State state, Drive client, Closer closer) { this.closer = closer; this.fileSystem = this.closer.register(new GoogleDriveFileSystem(client)); if (state.contains(BUFFER_SIZE_BYTE)) { this.bufferSizeByte = Optional.of(state.getPropAsInt(BUFFER_SIZE_BYTE)); } else { this.bufferSizeByte = Optional.absent(); } } @Override public long getFileSize(String fileId) throws FileBasedHelperException { Preconditions.checkNotNull(fileId, "fileId is required"); Path p = new Path(fileId); try { FileStatus status = fileSystem.getFileStatus(p); return status.getLen(); } catch (IOException e) { throw new FileBasedHelperException("Failed to get metadata on " + fileId, e); } } @Override public void connect() throws FileBasedHelperException { //None of other methods would not work without connect(). To make it simple, cstr already made it connected //so that it does not need to worry about having initialized or not and deal with race condition on initializing. } @Override public void close() throws IOException { closer.close(); } /** * List files under folder ID recursively. Folder won't be included in the result. If there's no files under folder ID, it returns empty list. * If folder ID is not defined, it will provide files under root directory. * {@inheritDoc} * @see org.apache.gobblin.source.extractor.filebased.FileBasedHelper#ls(java.lang.String) */ @Override public List<String> ls(String folderId) throws FileBasedHelperException { List<String> result = new ArrayList<>(); if (StringUtils.isEmpty(folderId)) { folderId = "/"; } Path p = new Path(folderId); FileStatus[] statusList = null; try { statusList = fileSystem.listStatus(p); } catch (FileNotFoundException e) { return result; } catch (IOException e) { throw new FileBasedHelperException("Falied to list status on path " + p + ", folderID: " + folderId, e); } for (FileStatus status : statusList) { if (status.isDirectory()) { result.addAll(ls(GoogleDriveFileSystem.toFileId(status.getPath()))); } else { result.add(GoogleDriveFileSystem.toFileId(status.getPath())); } } return result; } @Override public InputStream getFileStream(String fileId) throws FileBasedHelperException { Preconditions.checkNotNull(fileId, "fileId is required"); Path p = new Path(fileId); try { if (bufferSizeByte.isPresent()) { return fileSystem.open(p, bufferSizeByte.get()); } return fileSystem.open(p); } catch (IOException e) { throw new FileBasedHelperException("Failed to open files stream on path: " + p + " , fileId: " + fileId, e); } } /** * Permanently delete the file from Google drive (skipping trash) * @param path * @throws IOException */ public void deleteFile(String fileId) throws IOException { Preconditions.checkNotNull(fileId, "fileId is required"); Path p = new Path(fileId); if (LOG.isDebugEnabled()) { LOG.debug("Deleting path: " + p + " , fileId: " + fileId); } fileSystem.delete(p, true); } @Override public long getFileMTime(String fileId) throws FileBasedHelperException { Preconditions.checkNotNull(fileId, "fileId is required"); Path p = new Path(fileId); try { FileStatus status = fileSystem.getFileStatus(p); return status.getModificationTime(); } catch (IOException e) { throw new FileBasedHelperException("Failed to retrieve getModificationTime on path: " + p + " , fileId: " + fileId, e); } } }
3,682
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/source/extractor/extract
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/source/extractor/extract/google/GoogleDriveSource.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.extractor.extract.google; import java.io.IOException; import java.util.ArrayList; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.api.client.auth.oauth2.Credential; import com.google.api.client.repackaged.com.google.common.base.Preconditions; import com.google.api.services.drive.Drive; import com.google.common.io.Closer; import static org.apache.gobblin.configuration.ConfigurationKeys.*; import static org.apache.gobblin.source.extractor.extract.google.GoogleCommonKeys.*; import org.apache.gobblin.configuration.SourceState; import org.apache.gobblin.configuration.State; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.source.extractor.Extractor; import org.apache.gobblin.source.extractor.filebased.FileBasedHelperException; import org.apache.gobblin.source.extractor.filebased.FileBasedSource; /** * Source for Google drive using GoogleDriveFsHelper. * @param <S> * @param <D> */ public class GoogleDriveSource<S, D> extends FileBasedSource<S, D> { private static final Logger LOG = LoggerFactory.getLogger(GoogleDriveSource.class); public static final String GOOGLE_DRIVE_PREFIX = GOOGLE_SOURCE_PREFIX + "drive."; public static final String BUFFER_BYTE_SIZE = "buffer_byte_size"; private final Closer closer = Closer.create(); /** * As Google Drive extractor needs file system helper, it invokes to initialize file system helper. * {@inheritDoc} * @see org.apache.gobblin.source.Source#getExtractor(org.apache.gobblin.configuration.WorkUnitState) */ @Override public Extractor<S, D> getExtractor(WorkUnitState state) throws IOException { Preconditions.checkNotNull(state, "WorkUnitState should not be null"); LOG.info("WorkUnitState from getExtractor: " + state); try { //GoogleDriveExtractor needs GoogleDriveFsHelper initFileSystemHelper(state); } catch (FileBasedHelperException e) { throw new IOException(e); } Preconditions.checkNotNull(fsHelper, "File system helper should not be null"); return new GoogleDriveExtractor<>(state, fsHelper); } /** * Initialize file system helper at most once for this instance. * {@inheritDoc} * @see org.apache.gobblin.source.extractor.filebased.FileBasedSource#initFileSystemHelper(org.apache.gobblin.configuration.State) */ @Override public synchronized void initFileSystemHelper(State state) throws FileBasedHelperException { if (fsHelper == null) { Credential credential = new GoogleCommon.CredentialBuilder(state.getProp(SOURCE_CONN_PRIVATE_KEY), state.getPropAsList(API_SCOPES)) .fileSystemUri(state.getProp(PRIVATE_KEY_FILESYSTEM_URI)) .proxyUrl(state.getProp(SOURCE_CONN_USE_PROXY_URL)) .port(state.getProp(SOURCE_CONN_USE_PROXY_PORT)) .serviceAccountId(state.getProp(SOURCE_CONN_USERNAME)) .build(); Drive driveClient = new Drive.Builder(credential.getTransport(), GoogleCommon.getJsonFactory(), credential) .setApplicationName(Preconditions.checkNotNull(state.getProp(APPLICATION_NAME), "ApplicationName is required")) .build(); this.fsHelper = closer.register(new GoogleDriveFsHelper(state, driveClient)); } } /** * Provide list of files snapshot where snap shot is consist of list of file ID with modified time. * Folder ID and file ID are all optional where missing folder id represent search from root folder where * missing file ID represents all files will be included on current and subfolder. * * {@inheritDoc} * @see org.apache.gobblin.source.extractor.filebased.FileBasedSource#getcurrentFsSnapshot(org.apache.gobblin.configuration.State) */ @Override public List<String> getcurrentFsSnapshot(State state) { List<String> results = new ArrayList<>(); String folderId = state.getProp(SOURCE_FILEBASED_DATA_DIRECTORY, ""); try { LOG.info("Running ls with folderId: " + folderId); List<String> fileIds = this.fsHelper.ls(folderId); for (String fileId : fileIds) { results.add(fileId + splitPattern + this.fsHelper.getFileMTime(fileId)); } } catch (FileBasedHelperException e) { throw new RuntimeException("Failed to retrieve list of file IDs for folderID: " + folderId, e); } return results; } @Override public void shutdown(SourceState state) { try { closer.close(); } catch (IOException e) { throw new RuntimeException(e); } } }
3,683
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/source/extractor/extract
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/source/extractor/extract/google/GoogleAnalyticsUnsampledSource.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.extractor.extract.google; import java.io.IOException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.source.extractor.Extractor; import org.apache.gobblin.source.extractor.extract.QueryBasedSource; /** * Source for Google analytics unsampled report. * * For unsampled report, the minimum unit of querying range is date. To increase # of partition * use hour on "source.querybased.watermark.type" and have "source.querybased.partition.interval" as 23 * This will make each partition to have only one day in each query. * (Note that, by design, # of max partition is limited by "source.max.number.of.partitions"). * * @param <S> * @param <D> */ public class GoogleAnalyticsUnsampledSource<S, D> extends QueryBasedSource<S, D> { private static final Logger LOG = LoggerFactory.getLogger(GoogleAnalyticsUnsampledSource.class); protected static final String GOOGLE_ANALYTICS_SOURCE_PREFIX = "source.google_analytics."; protected static final String GA_REPORT_PREFIX = GOOGLE_ANALYTICS_SOURCE_PREFIX + "report."; protected static final String METRICS = GA_REPORT_PREFIX + "metrics"; protected static final String DIMENSIONS = GA_REPORT_PREFIX + "dimensions"; protected static final String SEGMENTS = GA_REPORT_PREFIX + "segments"; protected static final String FILTERS = GA_REPORT_PREFIX + "filters"; protected static final String ACCOUNT_ID = GA_REPORT_PREFIX + "account_id"; protected static final String WEB_PROPERTY_ID = GA_REPORT_PREFIX + "web_property_id"; protected static final String VIEW_ID = GA_REPORT_PREFIX + "view_id"; protected static final String DATE_FORMAT = "yyyy-MM-dd"; @Override public Extractor<S, D> getExtractor(WorkUnitState state) throws IOException { return new GoogleAnalyticsUnsampledExtractor<>(state); } }
3,684
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/source/extractor/extract
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/source/extractor/extract/google/GoogleDriveFileSystem.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.extractor.extract.google; import java.io.BufferedInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; import java.util.ArrayList; import java.util.List; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.util.Progressable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.api.client.auth.oauth2.Credential; import com.google.api.client.googleapis.json.GoogleJsonError; import com.google.api.client.googleapis.json.GoogleJsonResponseException; import com.google.api.services.drive.Drive; import com.google.api.services.drive.model.File; import com.google.api.services.drive.model.FileList; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Optional; import com.google.common.base.Preconditions; import com.google.common.io.Closer; import static org.apache.gobblin.configuration.ConfigurationKeys.*; import static org.apache.gobblin.source.extractor.extract.google.GoogleCommonKeys.*; import org.apache.gobblin.configuration.State; import org.apache.gobblin.util.HadoopUtils; import org.apache.gobblin.util.io.SeekableFSInputStream; /** * A {@link FileSystem} implementation that provides the {@link FileSystem} interface for an Google Drive server. * <ul> * <li>Note that {@link GoogleDriveFileSystem} currently only supports list, get, delete use cases. * <li>Google drive has two different identifier. File ID and File name -- where folder is just different mime-type of File. * As File name can be duplicate under same folder, all path that GoogleDriveFileSystem takes assumes that it's a File ID. * <li>It is the caller's responsibility to call {@link #close()} on this {@link FileSystem} to disconnect the session. * * {@link GoogleDriveFileSystem} does not cache instance and {@link FileSystem#get(Configuration)} creates a new {@link GoogleDriveFileSystem} everytime * instead of cached copy. * </ul> */ public class GoogleDriveFileSystem extends FileSystem { private static final Logger LOG = LoggerFactory.getLogger(GoogleDriveFileSystem.class); static final String PAGE_SIZE = GOOGLE_SOURCE_PREFIX + "fs_helper.page_size"; //for paginated API static final String FOLDER_MIME_TYPE = "application/vnd.google-apps.folder"; static final int DEFAULT_PAGE_SIZE = 50; private Drive client; private final Closer closer; private int pageSize = DEFAULT_PAGE_SIZE; public GoogleDriveFileSystem(Drive client) { this(); this.client = client; } public GoogleDriveFileSystem(Drive client, int pageSize) { this(client); Preconditions.checkArgument(pageSize > 0, "pageSize should be positive number"); this.pageSize = pageSize; } public GoogleDriveFileSystem() { super(); this.closer = Closer.create(); } @Override public synchronized void initialize(URI uri, Configuration conf) throws IOException { if (this.client == null) { super.initialize(uri, conf); State state = HadoopUtils.getStateFromConf(conf); Credential credential = new GoogleCommon.CredentialBuilder(state.getProp(SOURCE_CONN_PRIVATE_KEY), state.getPropAsList(API_SCOPES)) .fileSystemUri(state.getProp(PRIVATE_KEY_FILESYSTEM_URI)) .proxyUrl(state.getProp(SOURCE_CONN_USE_PROXY_URL)) .port(state.getProp(SOURCE_CONN_USE_PROXY_PORT)) .serviceAccountId(state.getProp(SOURCE_CONN_USERNAME)) .build(); this.client = new Drive.Builder(credential.getTransport(), GoogleCommon.getJsonFactory(), credential) .setApplicationName(Preconditions.checkNotNull(state.getProp(APPLICATION_NAME), "ApplicationName is required")) .build(); this.pageSize = state.getPropAsInt(PAGE_SIZE, DEFAULT_PAGE_SIZE); } } @Override public FSDataInputStream open(Path path, int bufferSize) throws IOException { return closer.register(new FSDataInputStream( new SeekableFSInputStream( new BufferedInputStream( client.files().get(toFileId(path)).executeMediaAsInputStream(), bufferSize)))); } @Override public FSDataInputStream open(Path path) throws IOException { return closer.register(new FSDataInputStream( new SeekableFSInputStream( new BufferedInputStream( client.files().get(toFileId(path)).executeMediaAsInputStream())))); } @Override public boolean delete(Path path, boolean recursive) throws IOException { Preconditions.checkArgument(recursive, "Non-recursive is not supported."); String fileId = toFileId(path); LOG.debug("Deleting file: " + fileId); try { client.files().delete(fileId).execute(); } catch (GoogleJsonResponseException e) { GoogleJsonError error = e.getDetails(); if (404 == error.getCode()) { //Non-existing file id return false; } throw e; } return true; } @Override public FileStatus[] listStatus(Path path) throws FileNotFoundException, IOException { String folderId = toFileId(path); List<File> fileMetadata = lsFileMetadata(folderId, null); if (fileMetadata.isEmpty()) { throw new FileNotFoundException(); } FileStatus[] statusArr = new FileStatus[fileMetadata.size()]; int idx = 0; for (File metadata: fileMetadata) { FileStatus status = toFileStatus(metadata); statusArr[idx++] = status; } return statusArr; } private List<File> lsFileMetadata(String folderId, String fileId) throws IOException { String pageToken = null; List<File> result = new ArrayList<>(); Optional<String> query = buildQuery(folderId, fileId); do { Drive.Files.List request = client.files() .list() .setFields("files/id,files/mimeType,files/modifiedTime,files/size,files/permissions") .setPageSize(pageSize); if (query.isPresent()) { request = request.setQ(query.get()); } if (pageToken != null) { request = request.setPageToken(pageToken); } LOG.info("Google drive List request: " + request); if (LOG.isDebugEnabled()) { LOG.debug("Google drive List request: " + request); } FileList fileList = null; try { fileList = request.execute(); } catch (GoogleJsonResponseException e) { GoogleJsonError error = e.getDetails(); if (404 == error.getCode()) { throw new FileNotFoundException("File not found. Request: " + request); } throw e; } pageToken = fileList.getNextPageToken(); List<File> files = fileList.getFiles(); if (files == null || files.isEmpty()) { return result; } result.addAll(files); } while (pageToken != null); return result; } /** * Build query for Google drive. * @see https://developers.google.com/drive/v3/web/search-parameters * * @param folderId * @param fileName * @return Query */ @VisibleForTesting Optional<String> buildQuery(String folderId, String fileName) { if (StringUtils.isEmpty(folderId) && StringUtils.isEmpty(fileName)) { return Optional.absent(); } StringBuilder query = new StringBuilder(); if (StringUtils.isNotEmpty(folderId)) { query.append("'").append(folderId).append("'") .append(" in parents"); } if (StringUtils.isNotEmpty(fileName)) { if (query.length() > 0) { query.append(" and "); } query.append("name contains ") .append("'").append(fileName).append("'"); } return Optional.of(query.toString()); } /** * org.apache.hadoop.fs.Path assumes that there separator in file system naming and "/" is the separator. * When org.apache.hadoop.fs.Path sees "/" in path String, it splits into parent and name. As fileID is a random * String determined by Google and it can contain "/" itself, this method check if parent and name is separated and * restore "/" back to file ID. * * @param p * @return */ public static String toFileId(Path p) { if (p.isRoot()) { return ""; } final String format = "%s" + Path.SEPARATOR + "%s"; if (p.getParent() != null && StringUtils.isEmpty(p.getParent().getName())) { return p.getName(); } return String.format(format, toFileId(p.getParent()), p.getName()); } @Override public void close() throws IOException { super.close(); closer.close(); } @Override public FileStatus getFileStatus(Path p) throws IOException { Preconditions.checkNotNull(p); String fileId = toFileId(p); File metadata = client.files().get(fileId) .setFields("id,mimeType,modifiedTime,size,permissions") .execute(); return toFileStatus(metadata); } private FileStatus toFileStatus(File metadata) { return new FileStatus(metadata.getSize() == null ? 0L : metadata.getSize(), FOLDER_MIME_TYPE.equals(metadata.getMimeType()), -1, -1, metadata.getModifiedTime().getValue(), new Path(metadata.getId())); } //Below are unsupported methods @Override public void setWorkingDirectory(Path new_dir) { throw new UnsupportedOperationException(); } @Override public Path getWorkingDirectory() { throw new UnsupportedOperationException(); } @Override public boolean mkdirs(Path f, FsPermission permission) throws IOException { throw new UnsupportedOperationException(); } @Override public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { throw new UnsupportedOperationException(); } @Override public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException { throw new UnsupportedOperationException(); } @Override public boolean rename(Path src, Path dst) throws IOException { throw new UnsupportedOperationException(); } @Override public URI getUri() { throw new UnsupportedOperationException(); } }
3,685
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/source/extractor/extract
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/source/extractor/extract/google/GoogleCommon.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.extractor.extract.google; import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; import java.net.Proxy; import java.net.URI; import java.nio.file.Files; import java.nio.file.attribute.PosixFilePermissions; import java.security.GeneralSecurityException; import java.util.Collection; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.api.client.auth.oauth2.Credential; import com.google.api.client.googleapis.GoogleUtils; import com.google.api.client.googleapis.auth.oauth2.GoogleCredential; import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport; import com.google.api.client.http.HttpTransport; import com.google.api.client.http.javanet.NetHttpTransport; import com.google.api.client.json.JsonFactory; import com.google.api.client.json.gson.GsonFactory; import com.google.common.base.Optional; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; /** * Utility class that has static methods for Google services. * */ public class GoogleCommon { private static final Logger LOG = LoggerFactory.getLogger(GoogleCommon.class); private static final JsonFactory JSON_FACTORY = GsonFactory.getDefaultInstance(); private static final String JSON_FILE_EXTENSION = ".json"; private static final FsPermission USER_READ_PERMISSION_ONLY = new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE); public static class CredentialBuilder { private final String privateKeyPath; private final Collection<String> serviceAccountScopes; private String fileSystemUri; private String serviceAccountId; private String proxyUrl; //Port as String type, so that client can easily pass null instead of checking the existence of it. //( e.g: state.getProp(key) vs state.contains(key) + state.getPropAsInt(key) ) private String portStr; public CredentialBuilder(String privateKeyPath, Collection<String> serviceAccountScopes) { Preconditions.checkArgument(!StringUtils.isEmpty(privateKeyPath), "privateKeyPath is required."); Preconditions.checkArgument(serviceAccountScopes != null && !serviceAccountScopes.isEmpty(), "serviceAccountScopes is required."); this.privateKeyPath = privateKeyPath; this.serviceAccountScopes = ImmutableList.copyOf(serviceAccountScopes); } public CredentialBuilder fileSystemUri(String fileSystemUri) { this.fileSystemUri = fileSystemUri; return this; } public CredentialBuilder serviceAccountId(String serviceAccountId) { this.serviceAccountId = serviceAccountId; return this; } public CredentialBuilder proxyUrl(String proxyUrl) { this.proxyUrl = proxyUrl; return this; } public CredentialBuilder port(int port) { this.portStr = Integer.toString(port); return this; } public CredentialBuilder port(String portStr) { this.portStr = portStr; return this; } public Credential build() { try { HttpTransport transport = newTransport(proxyUrl, portStr); if (privateKeyPath.trim().toLowerCase().endsWith(JSON_FILE_EXTENSION)) { LOG.info("Getting Google service account credential from JSON"); return buildCredentialFromJson(privateKeyPath, Optional.fromNullable(fileSystemUri), transport, serviceAccountScopes); } else { LOG.info("Getting Google service account credential from P12"); return buildCredentialFromP12(privateKeyPath, Optional.fromNullable(fileSystemUri), Optional.fromNullable(serviceAccountId), transport, serviceAccountScopes); } } catch (IOException | GeneralSecurityException e) { throw new RuntimeException("Failed to create credential", e); } } } /** * As Google API only accepts java.io.File for private key, and this method copies private key into local file system. * Once Google credential is instantiated, it deletes copied private key file. * * @param privateKeyPath * @param fsUri * @param id * @param transport * @param serviceAccountScopes * @return Credential * @throws IOException * @throws GeneralSecurityException */ private static Credential buildCredentialFromP12(String privateKeyPath, Optional<String> fsUri, Optional<String> id, HttpTransport transport, Collection<String> serviceAccountScopes) throws IOException, GeneralSecurityException { Preconditions.checkArgument(id.isPresent(), "user id is required."); FileSystem fs = getFileSystem(fsUri); Path keyPath = getPrivateKey(fs, privateKeyPath); final File localCopied = copyToLocal(fs, keyPath); localCopied.deleteOnExit(); try { return new GoogleCredential.Builder() .setTransport(transport) .setJsonFactory(JSON_FACTORY) .setServiceAccountId(id.get()) .setServiceAccountPrivateKeyFromP12File(localCopied) .setServiceAccountScopes(serviceAccountScopes) .build(); } finally { boolean isDeleted = localCopied.delete(); if (!isDeleted) { throw new RuntimeException(localCopied.getAbsolutePath() + " has not been deleted."); } } } /** * Before retrieving private key, it makes sure that original private key's permission is read only on the owner. * This is a way to ensure to keep private key private. * @param fs * @param privateKeyPath * @return * @throws IOException */ private static Path getPrivateKey(FileSystem fs, String privateKeyPath) throws IOException { Path keyPath = new Path(privateKeyPath); FileStatus fileStatus = fs.getFileStatus(keyPath); Preconditions.checkArgument(USER_READ_PERMISSION_ONLY.equals(fileStatus.getPermission()), "Private key file should only have read only permission only on user. " + keyPath); return keyPath; } private static FileSystem getFileSystem(Optional<String> fsUri) throws IOException { if (fsUri.isPresent()) { return FileSystem.get(URI.create(fsUri.get()), new Configuration()); } return FileSystem.get(new Configuration()); } private static Credential buildCredentialFromJson(String privateKeyPath, Optional<String> fsUri, HttpTransport transport, Collection<String> serviceAccountScopes) throws IOException { FileSystem fs = getFileSystem(fsUri); Path keyPath = getPrivateKey(fs, privateKeyPath); return GoogleCredential.fromStream(fs.open(keyPath), transport, JSON_FACTORY) .createScoped(serviceAccountScopes); } /** * Provides HttpTransport. If both proxyUrl and postStr is defined, it provides transport with Proxy. * @param proxyUrl Optional. * @param portStr Optional. String type for port so that user can easily pass null. (e.g: state.getProp(key)) * @return * @throws NumberFormatException * @throws GeneralSecurityException * @throws IOException */ public static HttpTransport newTransport(String proxyUrl, String portStr) throws NumberFormatException, GeneralSecurityException, IOException { if (!StringUtils.isEmpty(proxyUrl) && !StringUtils.isEmpty(portStr)) { return new NetHttpTransport.Builder() .trustCertificates(GoogleUtils.getCertificateTrustStore()) .setProxy(new Proxy(Proxy.Type.HTTP, new InetSocketAddress(proxyUrl, Integer.parseInt(portStr)))) .build(); } return GoogleNetHttpTransport.newTrustedTransport(); } private static File copyToLocal(FileSystem fs, Path keyPath) throws IOException { java.nio.file.Path tmpKeyPath = Files.createTempFile(GoogleCommon.class.getSimpleName(), "tmp", PosixFilePermissions.asFileAttribute(PosixFilePermissions.fromString("rwx------"))); File copied = tmpKeyPath.toFile(); copied.deleteOnExit(); fs.copyToLocalFile(keyPath, new Path(copied.getAbsolutePath())); return copied; } public static JsonFactory getJsonFactory() { return JSON_FACTORY; } }
3,686
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/source/extractor/extract
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/source/extractor/extract/google/GoogleAnalyticsUnsampledExtractor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.extractor.extract.google; import java.io.Closeable; import java.io.IOException; import java.util.Map; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormatter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.codahale.metrics.Timer; import com.github.rholder.retry.RetryException; import com.github.rholder.retry.Retryer; import com.google.api.client.auth.oauth2.Credential; import com.google.api.services.analytics.Analytics; import com.google.api.services.analytics.Analytics.Management.UnsampledReports.Insert; import com.google.api.services.analytics.model.UnsampledReport; import com.google.api.services.drive.Drive; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Optional; import com.google.common.base.Preconditions; import com.google.common.base.Stopwatch; import com.google.common.collect.ImmutableMap; import com.google.common.io.Closer; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import org.apache.gobblin.config.ConfigBuilder; import org.apache.gobblin.configuration.State; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.exception.NonTransientException; import org.apache.gobblin.instrumented.Instrumented; import org.apache.gobblin.metrics.GobblinMetrics; import org.apache.gobblin.source.extractor.DataRecordException; import org.apache.gobblin.source.extractor.Extractor; import org.apache.gobblin.source.extractor.extract.LongWatermark; import org.apache.gobblin.source.extractor.filebased.CsvFileDownloader; import org.apache.gobblin.source.workunit.WorkUnit; import org.apache.gobblin.util.retry.RetryerFactory; import static org.apache.gobblin.configuration.ConfigurationKeys.*; import static org.apache.gobblin.source.extractor.extract.google.GoogleAnalyticsUnsampledSource.*; import static org.apache.gobblin.source.extractor.extract.google.GoogleCommonKeys.*; import static org.apache.gobblin.util.retry.RetryerFactory.*; /** * Extracts Google Analytics(GA) unsampled report data. * GA provides unsampled report by client requesting it via GA asynchronous api and GA (server) creates unsampled report * on their background and put into Google drive by default. * (GoogleAnalyticsUnsampledExtractor currently does not support use case on Google cloud storage) * * While being created in background, GoogleAnalyticsExtractor will poll for status of the report request. Once report is generated, * GoogleAnalyticsUnsampledExtractor will use GoogleDriveExtractor to extract records. * * @param <S> * @param <D> */ public class GoogleAnalyticsUnsampledExtractor<S, D> implements Extractor<S, D> { private static final Logger LOG = LoggerFactory.getLogger(GoogleAnalyticsUnsampledExtractor.class); static final String GA_UNSAMPLED_REPORT_PREFIX = GA_REPORT_PREFIX + "unsampled."; static final String GA_UNSAMPLED_REPORT_CREATION_TIMER = GA_UNSAMPLED_REPORT_PREFIX + "creation.timer"; static final String REQUEST_RETRY_PREFIX = GA_REPORT_PREFIX + "request_retry."; static final String POLL_RETRY_PREFIX = GA_REPORT_PREFIX + "poll."; static final Config POLL_RETRY_DEFAULTS; static { Map<String, Object> configMap = ImmutableMap.<String, Object>builder() .put(RETRY_TIME_OUT_MS, TimeUnit.HOURS.toMillis(1L)) //Overall try to poll for 1 hour .put(RETRY_INTERVAL_MS, TimeUnit.MINUTES.toMillis(1L)) //Try to poll every 1 minutes .put(RETRY_TYPE, RetryType.FIXED.name()) .build(); POLL_RETRY_DEFAULTS = ConfigFactory.parseMap(configMap); }; static final String WATERMARK_INPUTFORMAT = "yyyyMMddHHmmss"; static final String DELETE_TEMP_UNSAMPLED_REPORT = GA_UNSAMPLED_REPORT_PREFIX + "delete_temp_unsampled_report"; static enum ReportCreationStatus { FAILED, PENDING, COMPLETED } static final String DOWNLOAD_TYPE_GOOGLE_DRIVE = "GOOGLE_DRIVE"; private final Closer closer = Closer.create(); private final Analytics gaService; private final WorkUnitState wuState; private final Extractor<S, D> actualExtractor; private final DateTimeFormatter googleAnalyticsFormatter; private final DateTimeFormatter watermarkFormatter; private final long nextWatermark; /** * For unsampled report, it will call GA service to produce unsampled CSV report into GoogleDrive so that getExtractor will * use Google drive to extract record from CSV file. * * @param wuState * @param sampleRate * @throws IOException */ public GoogleAnalyticsUnsampledExtractor(WorkUnitState wuState) throws IOException { this.wuState = wuState; this.googleAnalyticsFormatter = DateTimeFormat.forPattern(DATE_FORMAT) .withZone(DateTimeZone.forID(wuState.getProp(SOURCE_TIMEZONE, DEFAULT_SOURCE_TIMEZONE))); this.watermarkFormatter = DateTimeFormat.forPattern(WATERMARK_INPUTFORMAT) .withZone(DateTimeZone.forID(wuState.getProp(SOURCE_TIMEZONE, DEFAULT_SOURCE_TIMEZONE))); Credential credential = new GoogleCommon.CredentialBuilder(wuState.getProp(SOURCE_CONN_PRIVATE_KEY), wuState.getPropAsList(API_SCOPES)) .fileSystemUri(wuState.getProp(PRIVATE_KEY_FILESYSTEM_URI)) .proxyUrl(wuState.getProp(SOURCE_CONN_USE_PROXY_URL)) .port(wuState.getProp(SOURCE_CONN_USE_PROXY_PORT)) .serviceAccountId(wuState.getProp(SOURCE_CONN_USERNAME)) .build(); this.gaService = new Analytics.Builder(credential.getTransport(), GoogleCommon.getJsonFactory(), credential) .setApplicationName(Preconditions.checkNotNull(wuState.getProp(APPLICATION_NAME))) .build(); Drive driveClient = new Drive.Builder(credential.getTransport(), GoogleCommon.getJsonFactory(), Preconditions.checkNotNull(credential, "Credential is required")) .setApplicationName(Preconditions.checkNotNull(wuState.getProp(APPLICATION_NAME), "ApplicationName is required")) .build(); GoogleDriveFsHelper fsHelper = closer.register(new GoogleDriveFsHelper(wuState, driveClient)); UnsampledReport request = new UnsampledReport() .setAccountId(Preconditions.checkNotNull(wuState.getProp(ACCOUNT_ID), ACCOUNT_ID + " is required")) .setWebPropertyId(Preconditions.checkNotNull(wuState.getProp(WEB_PROPERTY_ID), WEB_PROPERTY_ID + " is required")) .setProfileId(Preconditions.checkNotNull(wuState.getProp(VIEW_ID), VIEW_ID + " is required")) .setTitle(Preconditions.checkNotNull(wuState.getProp(SOURCE_ENTITY), SOURCE_ENTITY + " is required.")) .setStartDate(convertFormat(wuState.getWorkunit().getLowWatermark(LongWatermark.class).getValue())) .setEndDate(convertFormat(wuState.getWorkunit().getExpectedHighWatermark(LongWatermark.class).getValue())) .setMetrics(Preconditions.checkNotNull(wuState.getProp(METRICS), METRICS + " is required.")) .setDimensions(wuState.getProp(DIMENSIONS)) //Optional .setSegment(wuState.getProp(SEGMENTS)) //Optional .setFilters(wuState.getProp(FILTERS)); //Optional UnsampledReport createdReport = prepareUnsampledReport(request, fsHelper, wuState.getPropAsBoolean(DELETE_TEMP_UNSAMPLED_REPORT, true)); DateTime nextWatermarkDateTime = googleAnalyticsFormatter.parseDateTime(createdReport.getEndDate()).plusDays(1); nextWatermark = Long.parseLong(watermarkFormatter.print(nextWatermarkDateTime)); this.actualExtractor = closer.register(new GoogleDriveExtractor<S, D>(copyOf(wuState), fsHelper)); } @VisibleForTesting GoogleAnalyticsUnsampledExtractor(WorkUnitState state, Extractor<S, D> actualExtractor, Analytics gaService) throws IOException { this.wuState = state; this.googleAnalyticsFormatter = DateTimeFormat.forPattern(DATE_FORMAT) .withZone(DateTimeZone.forID(state.getProp(SOURCE_TIMEZONE, DEFAULT_SOURCE_TIMEZONE))); this.watermarkFormatter = DateTimeFormat.forPattern(WATERMARK_INPUTFORMAT) .withZone(DateTimeZone.forID(state.getProp(SOURCE_TIMEZONE, DEFAULT_SOURCE_TIMEZONE))); this.actualExtractor = actualExtractor; this.gaService = gaService; this.nextWatermark = -1; } /** * Copy WorkUnitState so that work unit also contains job state. FileBasedExtractor needs properties from job state (mostly source.* properties), * where it has been already removed when reached here. * * @param src * @return */ private WorkUnitState copyOf(WorkUnitState src) { WorkUnit copiedWorkUnit = WorkUnit.copyOf(src.getWorkunit()); copiedWorkUnit.addAllIfNotExist(src.getJobState()); WorkUnitState workUnitState = new WorkUnitState(copiedWorkUnit, src.getJobState()); workUnitState.addAll(src); return workUnitState; } /** * Create unsampled report in Google drive and add google drive file id into state so that Google drive extractor * can extract record from it. Also, update the state to use CsvFileDownloader unless other downloader is defined. * * It also register closer to delete the file from Google Drive unless explicitly requested to not deleting it. * @return documentID of unsampled report in Google drive * @throws IOException * */ @VisibleForTesting UnsampledReport prepareUnsampledReport(UnsampledReport request, final GoogleDriveFsHelper fsHelper, boolean isDeleteTempReport) throws IOException { UnsampledReport createdReport = createUnsampledReports(request); final String fileId = createdReport.getDriveDownloadDetails().getDocumentId(); LOG.info("Temporary unsampled report created in Google Drive: " + fileId); if (isDeleteTempReport) { closer.register(new Closeable() { @Override public void close() throws IOException { LOG.info("Deleting created temporary unsampled report from Google drive " + fileId); fsHelper.deleteFile(fileId); } }); } else { LOG.warn("Temporary unsampled report will not be deleted as requested. File ID: " + fileId); } wuState.setProp(SOURCE_FILEBASED_FILES_TO_PULL, fileId); if (!wuState.contains(SOURCE_FILEBASED_OPTIONAL_DOWNLOADER_CLASS)) { wuState.setProp(SOURCE_FILEBASED_OPTIONAL_DOWNLOADER_CLASS, CsvFileDownloader.class.getName()); } return createdReport; } @VisibleForTesting UnsampledReport createUnsampledReports(UnsampledReport request) throws IOException { long startTimeInMillis = System.currentTimeMillis(); try { UnsampledReport requestedReport = requestUnsampledReport(request); UnsampledReport createdReport = pollForCompletion(wuState, gaService, requestedReport); createdReport.setEndDate(requestedReport.getEndDate()); return createdReport; } finally { long delta = System.currentTimeMillis() - startTimeInMillis; if (GobblinMetrics.isEnabled(wuState)) { Timer timer = Instrumented.getMetricContext(wuState, getClass()).timer(GA_UNSAMPLED_REPORT_CREATION_TIMER); Instrumented.updateTimer(Optional.of(timer), delta, TimeUnit.MILLISECONDS); } } } @VisibleForTesting UnsampledReport requestUnsampledReport(UnsampledReport request) throws IOException { String accountId = request.getAccountId(); String webPropertyId = request.getWebPropertyId(); String profileId = request.getProfileId(); request.setAccountId(null).setWebPropertyId(null).setProfileId(null); //GA somehow does not allow these values in it. final String endDate = request.getEndDate(); final Insert insertRequest = gaService.management() .unsampledReports() .insert(accountId, webPropertyId, profileId, request); Config config = ConfigBuilder.create().loadProps(wuState.getProperties(), REQUEST_RETRY_PREFIX).build(); Retryer<UnsampledReport> retryer = RetryerFactory.newInstance(config); LOG.info("Requesting to create unsampled report " + request); try { return retryer.call(new Callable<UnsampledReport>() { @Override public UnsampledReport call() throws Exception { UnsampledReport response = insertRequest.execute(); if (ReportCreationStatus.FAILED.name().equals(response.getStatus())) { //No retry if it's explicitly failed from server throw new NonTransientException("Failed to create unsampled report " + response); } response.setEndDate(endDate); //response does not have end date where we need it later for next watermark calculation. return response; } }); } catch (ExecutionException e) { throw new IOException(e); } catch (RetryException e) { throw new RuntimeException(e); } } /** * Converts date format from watermark format to Google analytics format * @param watermark * @return */ private String convertFormat(long watermark) { Preconditions.checkArgument(watermark > 0, "Watermark should be positive number."); return googleAnalyticsFormatter.print(watermarkFormatter.parseDateTime(Long.toString(watermark))); } @VisibleForTesting UnsampledReport pollForCompletion(State state, final Analytics gaService, final UnsampledReport requestedReport) throws IOException { Config config = ConfigBuilder.create() .loadProps(state.getProperties(), POLL_RETRY_PREFIX) .build() .withFallback(POLL_RETRY_DEFAULTS); Retryer<UnsampledReport> retryer = RetryerFactory.newInstance(config); LOG.info("Will poll for completion on unsampled report with retry config: " + config); final Stopwatch stopwatch = Stopwatch.createStarted(); UnsampledReport result = null; try { result = retryer.call(new Callable<UnsampledReport>() { @Override public UnsampledReport call() throws Exception { UnsampledReport response = null; try { response = gaService.management() .unsampledReports() .get(requestedReport.getAccountId(), requestedReport.getWebPropertyId(), requestedReport.getProfileId(), requestedReport.getId()) .execute(); } catch (Exception e) { LOG.warn("Encountered exception while polling for unsampled report. Will keep polling. " + "Elasped so far: " + stopwatch.elapsed(TimeUnit.SECONDS) + " seconds", e); throw e; } ReportCreationStatus status = ReportCreationStatus.valueOf(response.getStatus()); switch(status) { case FAILED: //Stop retrying if it explicitly failed from server. throw new NonTransientException("Unsampled report has failed to be generated. " + response); case PENDING: LOG.info("Waiting for report completion. Elasped so far: " + stopwatch.elapsed(TimeUnit.SECONDS) + " seconds for unsampled report: " + response); //Throw so that Retryer will retry throw new RuntimeException("Not completed yet. This will be retried. " + response); case COMPLETED: return response; default: throw new NonTransientException(status + " is not supported. " + response); } } }); } catch (ExecutionException e) { throw new IOException(e); } catch (RetryException e) { throw new RuntimeException(e); } LOG.info("Unsampled report creation has been completed. " + result); Preconditions.checkArgument(DOWNLOAD_TYPE_GOOGLE_DRIVE.equals(result.getDownloadType()), result.getDownloadType() + " DownloadType is not supported."); return result; } @Override public void close() throws IOException { LOG.info("Updating the current state high water mark with " + nextWatermark); this.wuState.setActualHighWatermark(new LongWatermark(nextWatermark)); closer.close(); } @Override public S getSchema() throws IOException { return actualExtractor.getSchema(); } @Override public D readRecord(D reuse) throws DataRecordException, IOException { return actualExtractor.readRecord(reuse); } @Override public long getExpectedRecordCount() { return actualExtractor.getExpectedRecordCount(); } @Override public long getHighWatermark() { return actualExtractor.getHighWatermark(); } }
3,687
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/source/extractor/extract
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/source/extractor/extract/google/GoogleCommonKeys.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.extractor.extract.google; public interface GoogleCommonKeys { public static final String GOOGLE_SOURCE_PREFIX = "source.google."; public static final String API_SCOPES = GOOGLE_SOURCE_PREFIX + "api_scopes"; public static final String APPLICATION_NAME = GOOGLE_SOURCE_PREFIX + "application_name"; public static final String PRIVATE_KEY_FILESYSTEM_URI = GOOGLE_SOURCE_PREFIX + "privatekey_fs_uri"; }
3,688
0
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/source/extractor/extract
Create_ds/gobblin/gobblin-modules/google-ingestion/src/main/java/org/apache/gobblin/source/extractor/extract/google/GoogleDriveExtractor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.extractor.extract.google; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.source.extractor.filebased.FileBasedExtractor; import org.apache.gobblin.source.extractor.filebased.FileBasedHelper; /** * Extractor for files in Google drive. */ public class GoogleDriveExtractor<S, D> extends FileBasedExtractor<S, D> { private static final Logger LOG = LoggerFactory.getLogger(GoogleDriveExtractor.class); public GoogleDriveExtractor(WorkUnitState workUnitState, FileBasedHelper fsHelper) { super(workUnitState, fsHelper); } }
3,689
0
Create_ds/gobblin/gobblin-modules/gobblin-http/src/test/java/org/apache
Create_ds/gobblin/gobblin-modules/gobblin-http/src/test/java/org/apache/gobblin/MockGenericRecord.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin; import java.util.HashMap; import java.util.Map; import org.apache.avro.Schema; import org.apache.avro.generic.GenericRecord; public class MockGenericRecord implements GenericRecord { private final Map<String, Object> map; public MockGenericRecord() { map = new HashMap<>(); } @Override public void put(String key, Object v) { map.put(key, v); } @Override public Object get(String key) { return map.get(key); } @Override public void put(int i, Object v) { throw new UnsupportedOperationException("Put by index not supported"); } @Override public Object get(int i) { throw new UnsupportedOperationException("Get by index not supported"); } @Override public Schema getSchema() { throw new UnsupportedOperationException("Get schema not supported"); } }
3,690
0
Create_ds/gobblin/gobblin-modules/gobblin-http/src/test/java/org/apache
Create_ds/gobblin/gobblin-modules/gobblin-http/src/test/java/org/apache/gobblin/HttpTestUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.ArrayDeque; import java.util.HashMap; import java.util.Map; import java.util.Queue; import org.apache.avro.generic.GenericRecord; import org.apache.commons.io.IOUtils; import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.client.methods.HttpUriRequest; import org.apache.http.client.methods.RequestBuilder; import org.testng.Assert; import com.linkedin.data.ByteString; import com.linkedin.r2.message.rest.RestRequest; import com.linkedin.r2.message.rest.RestRequestBuilder; import org.apache.gobblin.http.HttpOperation; import org.apache.gobblin.async.BufferedRecord; public class HttpTestUtils { public static Queue<BufferedRecord<GenericRecord>> createQueue(int size, boolean isHttpOperation) { Queue<BufferedRecord<GenericRecord>> queue = new ArrayDeque<>(size); for (int i = 0; i < size; i++) { Map<String, String> keys = new HashMap<>(); keys.put("part1", i + "1"); keys.put("part2", i + "2"); Map<String, String> queryParams = new HashMap<>(); queryParams.put("param1", i + "1"); GenericRecord record = isHttpOperation ? new HttpOperation() : new MockGenericRecord(); record.put("keys", keys); record.put("queryParams", queryParams); record.put("body", "{\"id\":\"id" + i + "\"}"); BufferedRecord<GenericRecord> item = new BufferedRecord<>(record, null); queue.add(item); } return queue; } public static void assertEqual(RequestBuilder actual, RequestBuilder expect) throws IOException { // Check entity HttpEntity actualEntity = actual.getEntity(); HttpEntity expectedEntity = expect.getEntity(); if (actualEntity == null) { Assert.assertTrue(expectedEntity == null); } else { Assert.assertEquals(actualEntity.getContentLength(), expectedEntity.getContentLength()); String actualContent = IOUtils.toString(actualEntity.getContent(), StandardCharsets.UTF_8); String expectedContent = IOUtils.toString(expectedEntity.getContent(), StandardCharsets.UTF_8); Assert.assertEquals(actualContent, expectedContent); } // Check request HttpUriRequest actualRequest = actual.build(); HttpUriRequest expectedRequest = expect.build(); Assert.assertEquals(actualRequest.getMethod(), expectedRequest.getMethod()); Assert.assertEquals(actualRequest.getURI().toString(), expectedRequest.getURI().toString()); Header[] actualHeaders = actualRequest.getAllHeaders(); Header[] expectedHeaders = expectedRequest.getAllHeaders(); Assert.assertEquals(actualHeaders.length, expectedHeaders.length); for (int i = 0; i < actualHeaders.length; i++) { Assert.assertEquals(actualHeaders[i].toString(), expectedHeaders[i].toString()); } } public static void assertEqual(RestRequestBuilder actual, RestRequestBuilder expect) throws IOException { // Check entity ByteString actualEntity = actual.getEntity(); ByteString expectedEntity = expect.getEntity(); if (actualEntity == null) { Assert.assertTrue(expectedEntity == null); } else { Assert.assertEquals(actualEntity.length(), expectedEntity.length()); Assert.assertEquals(actualEntity.asString(StandardCharsets.UTF_8),expectedEntity.asString(StandardCharsets.UTF_8)); } // Check request RestRequest actualRequest = actual.build(); RestRequest expectedRequest = expect.build(); Assert.assertEquals(actualRequest.getMethod(), expectedRequest.getMethod()); Assert.assertEquals(actualRequest.getURI().toString(), expectedRequest.getURI().toString()); Map<String, String> actualHeaders = actualRequest.getHeaders(); Map<String, String> expectedHeaders = expectedRequest.getHeaders(); Assert.assertEquals(actualHeaders.size(), expectedHeaders.size()); for (String key: actualHeaders.keySet()) { Assert.assertEquals(actualHeaders.get(key), expectedHeaders.get(key)); } } }
3,691
0
Create_ds/gobblin/gobblin-modules/gobblin-http/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-http/src/test/java/org/apache/gobblin/util/HttpUtilsTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.util; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import org.testng.annotations.Test; import com.typesafe.config.ConfigFactory; import junit.framework.Assert; import org.apache.gobblin.http.ResponseStatus; import org.apache.gobblin.http.StatusType; import org.apache.gobblin.utils.HttpConstants; import org.apache.gobblin.utils.HttpUtils; @Test public class HttpUtilsTest { public void testGetErrorCodeWhitelist() { Map<String, String> map = new HashMap<>(); map.put(HttpConstants.ERROR_CODE_WHITELIST, "303, 3xx, 303, 4XX"); Set<String> whitelist = HttpUtils.getErrorCodeWhitelist(ConfigFactory.parseMap(map)); Assert.assertTrue(whitelist.size() == 3); Assert.assertTrue(whitelist.contains("303")); Assert.assertTrue(whitelist.contains("3xx")); Assert.assertTrue(whitelist.contains("4xx")); Assert.assertFalse(whitelist.contains("4XX")); } public void testUpdateStatusType() { Set<String> errorCodeWhitelist = new HashSet<>(); ResponseStatus status = new ResponseStatus(StatusType.OK); HttpUtils.updateStatusType(status, 303, errorCodeWhitelist); // Client error without whitelist Assert.assertTrue(status.getType() == StatusType.CLIENT_ERROR); errorCodeWhitelist.add("303"); HttpUtils.updateStatusType(status, 303, errorCodeWhitelist); // Continue with whitelist Assert.assertTrue(status.getType() == StatusType.CONTINUE); errorCodeWhitelist.clear(); errorCodeWhitelist.add("3xx"); HttpUtils.updateStatusType(status, 303, errorCodeWhitelist); // Continue with whitelist Assert.assertTrue(status.getType() == StatusType.CONTINUE); HttpUtils.updateStatusType(status, 404, errorCodeWhitelist); // Client error without whitelist Assert.assertTrue(status.getType() == StatusType.CLIENT_ERROR); errorCodeWhitelist.add("4xx"); HttpUtils.updateStatusType(status, 404, errorCodeWhitelist); // Continue with whitelist Assert.assertTrue(status.getType() == StatusType.CONTINUE); HttpUtils.updateStatusType(status, 505, errorCodeWhitelist); // Server error without whitelist Assert.assertTrue(status.getType() == StatusType.SERVER_ERROR); errorCodeWhitelist.add("5xx"); HttpUtils.updateStatusType(status, 505, errorCodeWhitelist); // Continue with whitelist Assert.assertTrue(status.getType() == StatusType.CONTINUE); } }
3,692
0
Create_ds/gobblin/gobblin-modules/gobblin-http/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-http/src/test/java/org/apache/gobblin/writer/AsyncHttpWriterTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.writer; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Queue; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpUriRequest; import org.testng.annotations.Test; import com.google.common.base.Joiner; import com.google.common.collect.ImmutableMap; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import junit.framework.Assert; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.async.AsyncRequest; import org.apache.gobblin.async.AsyncRequestBuilder; import org.apache.gobblin.async.BufferedRecord; import org.apache.gobblin.async.Callback; import org.apache.gobblin.broker.BrokerConstants; import org.apache.gobblin.broker.SharedResourcesBrokerFactory; import org.apache.gobblin.broker.SharedResourcesBrokerImpl; import org.apache.gobblin.broker.SimpleScopeType; import org.apache.gobblin.broker.iface.SharedResourcesBroker; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.http.HttpClient; import org.apache.gobblin.http.ResponseHandler; import org.apache.gobblin.http.ResponseStatus; import org.apache.gobblin.http.StatusType; import org.apache.gobblin.http.ThrottledHttpClient; import org.apache.gobblin.net.Request; import org.apache.gobblin.util.limiter.RateBasedLimiter; import org.apache.gobblin.util.limiter.broker.SharedLimiterFactory; @Test @Slf4j public class AsyncHttpWriterTest { /** * Test successful writes of 4 records */ @Test public void testSuccessfulWrites() { MockHttpClient client = new MockHttpClient(); MockRequestBuilder requestBuilder = new MockRequestBuilder(); MockResponseHandler responseHandler = new MockResponseHandler(); MockAsyncHttpWriterBuilder builder = new MockAsyncHttpWriterBuilder(client, requestBuilder, responseHandler); TestAsyncHttpWriter asyncHttpWriter = new TestAsyncHttpWriter(builder); List<MockWriteCallback> callbacks = new ArrayList<>(); for (int i = 0; i < 4; i++) { callbacks.add(new MockWriteCallback()); } asyncHttpWriter.write(new Object(), callbacks.get(0)); asyncHttpWriter.write(new Object(), callbacks.get(1)); asyncHttpWriter.write(new Object(), callbacks.get(2)); try { asyncHttpWriter.flush(); } catch (IOException e) { Assert.fail("Flush failed"); } asyncHttpWriter.write(new Object(), callbacks.get(3)); try { asyncHttpWriter.close(); } catch (IOException e) { Assert.fail("Close failed"); } // Assert all successful callbacks are invoked for (MockWriteCallback callback : callbacks) { Assert.assertTrue(callback.isSuccess); } Assert.assertTrue(client.isCloseCalled); Assert.assertTrue(responseHandler.recordsInLastRequest.size() == 1); } @Test public void testSuccessfulWritesWithLimiter () { MockThrottledHttpClient client = new MockThrottledHttpClient(createMockBroker()); MockRequestBuilder requestBuilder = new MockRequestBuilder(); MockResponseHandler responseHandler = new MockResponseHandler(); MockAsyncHttpWriterBuilder builder = new MockAsyncHttpWriterBuilder(client, requestBuilder, responseHandler); TestAsyncHttpWriter asyncHttpWriter = new TestAsyncHttpWriter(builder); List<MockWriteCallback> callbacks = new ArrayList<>(); for (int i = 0; i < 50; i++) { MockWriteCallback callback = new MockWriteCallback(); callbacks.add(callback); asyncHttpWriter.write(new Object(), callback); } try { asyncHttpWriter.close(); } catch (IOException e) { Assert.fail("Close failed"); } // Assert all successful callbacks are invoked for (MockWriteCallback callback : callbacks) { Assert.assertTrue(callback.isSuccess); } Assert.assertTrue(client.getSendTimer().getCount() == 50); Assert.assertTrue(client.isCloseCalled); } private static SharedResourcesBroker createMockBroker() { Joiner JOINER = Joiner.on("."); Config config = ConfigFactory.parseMap(ImmutableMap.of( JOINER.join(BrokerConstants.GOBBLIN_BROKER_CONFIG_PREFIX, SharedLimiterFactory.NAME, SharedLimiterFactory.LIMITER_CLASS_KEY), "qps", JOINER.join(BrokerConstants.GOBBLIN_BROKER_CONFIG_PREFIX, SharedLimiterFactory.NAME, RateBasedLimiter.Factory.QPS_KEY), "10" )); SharedResourcesBrokerImpl broker = SharedResourcesBrokerFactory.<SimpleScopeType>createDefaultTopLevelBroker(config, SimpleScopeType.GLOBAL.defaultScopeInstance()); return broker; } /** * Test failure triggered by client error. No retries */ public void testClientError() { MockHttpClient client = new MockHttpClient(); MockRequestBuilder requestBuilder = new MockRequestBuilder(); MockResponseHandler responseHandler = new MockResponseHandler(); MockAsyncHttpWriterBuilder builder = new MockAsyncHttpWriterBuilder(client, requestBuilder, responseHandler); TestAsyncHttpWriter asyncHttpWriter = new TestAsyncHttpWriter(builder); responseHandler.type = StatusType.CLIENT_ERROR; MockWriteCallback callback = new MockWriteCallback(); asyncHttpWriter.write(new Object(), callback); boolean hasAnException = false; try { asyncHttpWriter.close(); } catch (Exception e) { hasAnException = true; } Assert.assertTrue(hasAnException); Assert.assertFalse(callback.isSuccess); Assert.assertTrue(client.isCloseCalled); // No retries are done Assert.assertTrue(client.attempts == 1); Assert.assertTrue(responseHandler.attempts == 1); } /** * Test max attempts triggered by failing to send. Attempt 3 times */ public void testMaxAttempts() { MockHttpClient client = new MockHttpClient(); MockRequestBuilder requestBuilder = new MockRequestBuilder(); MockResponseHandler responseHandler = new MockResponseHandler(); MockAsyncHttpWriterBuilder builder = new MockAsyncHttpWriterBuilder(client, requestBuilder, responseHandler); TestAsyncHttpWriter asyncHttpWriter = new TestAsyncHttpWriter(builder); client.shouldSendSucceed = false; MockWriteCallback callback = new MockWriteCallback(); asyncHttpWriter.write(new Object(), callback); boolean hasAnException = false; try { asyncHttpWriter.close(); } catch (Exception e) { hasAnException = true; } Assert.assertTrue(hasAnException); Assert.assertFalse(callback.isSuccess); Assert.assertTrue(client.isCloseCalled); Assert.assertTrue(client.attempts == AsyncHttpWriter.DEFAULT_MAX_ATTEMPTS); Assert.assertTrue(responseHandler.attempts == 0); } /** * Test server error. Attempt 3 times */ public void testServerError() { MockHttpClient client = new MockHttpClient(); MockRequestBuilder requestBuilder = new MockRequestBuilder(); MockResponseHandler responseHandler = new MockResponseHandler(); MockAsyncHttpWriterBuilder builder = new MockAsyncHttpWriterBuilder(client, requestBuilder, responseHandler); TestAsyncHttpWriter asyncHttpWriter = new TestAsyncHttpWriter(builder); responseHandler.type = StatusType.SERVER_ERROR; MockWriteCallback callback = new MockWriteCallback(); asyncHttpWriter.write(new Object(), callback); boolean hasAnException = false; try { asyncHttpWriter.close(); } catch (Exception e) { hasAnException = true; } Assert.assertTrue(hasAnException); Assert.assertFalse(callback.isSuccess); Assert.assertTrue(client.isCloseCalled); Assert.assertTrue(client.attempts == AsyncHttpWriter.DEFAULT_MAX_ATTEMPTS); Assert.assertTrue(responseHandler.attempts == AsyncHttpWriter.DEFAULT_MAX_ATTEMPTS); } class MockHttpClient implements HttpClient<HttpUriRequest, CloseableHttpResponse> { boolean isCloseCalled = false; int attempts = 0; boolean shouldSendSucceed = true; @Override public CloseableHttpResponse sendRequest(HttpUriRequest request) throws IOException { attempts++; if (shouldSendSucceed) { // We won't consume the response anyway return null; } throw new RuntimeException("Send failed"); } @Override public void sendAsyncRequest(HttpUriRequest request, Callback<CloseableHttpResponse> callback) { throw new UnsupportedOperationException(); } @Override public void close() throws IOException { isCloseCalled = true; } } class MockThrottledHttpClient extends ThrottledHttpClient<HttpUriRequest, CloseableHttpResponse> { boolean isCloseCalled = false; int attempts = 0; boolean shouldSendSucceed = true; public MockThrottledHttpClient (SharedResourcesBroker broker) { super (broker, "resource"); } @Override public CloseableHttpResponse sendRequestImpl(HttpUriRequest request) throws IOException { attempts++; if (shouldSendSucceed) { // We won't consume the response anyway return null; } throw new IOException("Send failed"); } @Override public void close() throws IOException { isCloseCalled = true; } @Override public void sendAsyncRequestImpl(HttpUriRequest request, Callback callback) { throw new UnsupportedOperationException(); } } class MockRequestBuilder implements AsyncRequestBuilder<Object, HttpUriRequest> { @Override public AsyncRequest<Object, HttpUriRequest> buildRequest(Queue<BufferedRecord<Object>> buffer) { BufferedRecord<Object> item = buffer.poll(); AsyncRequest<Object, HttpUriRequest> request = new AsyncRequest<>(); request.markRecord(item, 1); request.setRawRequest(null); return request; } } class MockResponseHandler implements ResponseHandler<HttpUriRequest, CloseableHttpResponse> { volatile StatusType type = StatusType.OK; int attempts = 0; List<Object> recordsInLastRequest; @Override public ResponseStatus handleResponse(Request<HttpUriRequest> request, CloseableHttpResponse response) { if (request instanceof AsyncRequest) { AsyncRequest asyncRequest = (AsyncRequest) request; recordsInLastRequest = new ArrayList<>(); asyncRequest.getThunks().forEach( thunk -> recordsInLastRequest.add(thunk)); } attempts++; switch (type) { case OK: return new ResponseStatus(StatusType.OK); case CLIENT_ERROR: return new ResponseStatus(StatusType.CLIENT_ERROR); case SERVER_ERROR: return new ResponseStatus(StatusType.SERVER_ERROR); } return null; } } class MockWriteCallback implements WriteCallback<Object> { boolean isSuccess = false; @Override public void onSuccess(WriteResponse<Object> writeResponse) { isSuccess = true; } @Override public void onFailure(Throwable throwable) { isSuccess = false; } } class MockAsyncHttpWriterBuilder extends AsyncHttpWriterBuilder<Object, HttpUriRequest, CloseableHttpResponse> { MockAsyncHttpWriterBuilder(HttpClient client, MockRequestBuilder requestBuilder, MockResponseHandler responseHandler) { this.client = client; this.asyncRequestBuilder = requestBuilder; this.responseHandler = responseHandler; this.state = new WorkUnitState(); this.queueCapacity = 2; this.maxAttempts = 3; } @Override public DataWriter<Object> build() throws IOException { return null; } @Override public AsyncHttpWriterBuilder<Object, HttpUriRequest, CloseableHttpResponse> fromConfig(Config config) { return null; } } class TestAsyncHttpWriter extends AsyncHttpWriter<Object, HttpUriRequest, CloseableHttpResponse> { public TestAsyncHttpWriter(AsyncHttpWriterBuilder builder) { super(builder); } } }
3,693
0
Create_ds/gobblin/gobblin-modules/gobblin-http/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-http/src/test/java/org/apache/gobblin/http/ApacheHttpRequestBuilderTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.http; import java.io.IOException; import java.util.Queue; import org.apache.avro.generic.GenericRecord; import org.apache.http.HttpHeaders; import org.apache.http.client.methods.HttpUriRequest; import org.apache.http.client.methods.RequestBuilder; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.mockito.ArgumentCaptor; import org.testng.Assert; import org.testng.annotations.Test; import org.apache.gobblin.HttpTestUtils; import org.apache.gobblin.async.AsyncRequest; import org.apache.gobblin.async.BufferedRecord; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; @Test public class ApacheHttpRequestBuilderTest { /** * Build a {@link HttpUriRequest} from a {@link GenericRecord} */ public void testBuildWriteRequest() throws IOException { String urlTemplate = "http://www.test.com/a/part1:${part1}/a/part2:${part2}"; String verb = "post"; ApacheHttpRequestBuilder builder = spy(new ApacheHttpRequestBuilder(urlTemplate, verb, "application/json")); ArgumentCaptor<RequestBuilder> requestBuilderArgument = ArgumentCaptor.forClass(RequestBuilder.class); Queue<BufferedRecord<GenericRecord>> queue = HttpTestUtils.createQueue(1, false); AsyncRequest<GenericRecord, HttpUriRequest> request = builder.buildRequest(queue); verify(builder).build(requestBuilderArgument.capture()); RequestBuilder expected = RequestBuilder.post(); expected.setUri("http://www.test.com/a/part1:01/a/part2:02?param1=01"); String payloadStr = "{\"id\":\"id0\"}"; expected.addHeader(HttpHeaders.CONTENT_TYPE, ContentType.APPLICATION_JSON.getMimeType()) .setEntity(new StringEntity(payloadStr, ContentType.APPLICATION_JSON)); // Compare HttpUriRequest HttpTestUtils.assertEqual(requestBuilderArgument.getValue(), expected); Assert.assertEquals(request.getRecordCount(), 1); Assert.assertEquals(queue.size(), 0); } }
3,694
0
Create_ds/gobblin/gobblin-modules/gobblin-http/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-http/src/test/java/org/apache/gobblin/r2/R2RestRequestBuilderTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.r2; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.util.Queue; import org.apache.avro.generic.GenericRecord; import org.mockito.ArgumentCaptor; import org.testng.Assert; import org.testng.annotations.Test; import com.linkedin.data.DataMap; import com.linkedin.data.codec.JacksonDataCodec; import com.linkedin.r2.message.rest.RestRequest; import com.linkedin.r2.message.rest.RestRequestBuilder; import com.linkedin.restli.common.RestConstants; import org.apache.gobblin.HttpTestUtils; import org.apache.gobblin.async.AsyncRequest; import org.apache.gobblin.async.BufferedRecord; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; @Test public class R2RestRequestBuilderTest { private static final JacksonDataCodec JACKSON_DATA_CODEC = new JacksonDataCodec(); /** * Build a {@link RestRequest} from a {@link GenericRecord} */ public void testBuildWriteRequest() throws URISyntaxException, IOException { String urlTemplate = "http://www.test.com/a/part1:${part1}/a/part2:${part2}"; String verb = "update"; String protocolVersion = "2.0.0"; R2RestRequestBuilder builder = spy(new R2RestRequestBuilder(urlTemplate, verb, protocolVersion)); ArgumentCaptor<RestRequestBuilder> requestBuilderArgument = ArgumentCaptor.forClass(RestRequestBuilder.class); Queue<BufferedRecord<GenericRecord>> queue = HttpTestUtils.createQueue(1, false); AsyncRequest<GenericRecord, RestRequest> request = builder.buildRequest(queue); verify(builder).build(requestBuilderArgument.capture()); RestRequestBuilder expected = new RestRequestBuilder(new URI("http://www.test.com/a/part1:01/a/part2:02?param1=01")); expected.setMethod("PUT"); expected.setHeader(RestConstants.HEADER_RESTLI_PROTOCOL_VERSION, protocolVersion); expected.setHeader(RestConstants.HEADER_RESTLI_REQUEST_METHOD, verb.toLowerCase()); expected.setHeader(RestConstants.HEADER_CONTENT_TYPE, RestConstants.HEADER_VALUE_APPLICATION_JSON); DataMap data = new DataMap(); data.put("id", "id0"); expected.setEntity(JACKSON_DATA_CODEC.mapToBytes(data)); HttpTestUtils.assertEqual(requestBuilderArgument.getValue(), expected); Assert.assertEquals(request.getRecordCount(), 1); Assert.assertEquals(queue.size(), 0); } }
3,695
0
Create_ds/gobblin/gobblin-modules/gobblin-http/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-http/src/test/java/org/apache/gobblin/r2/R2ClientFactoryTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.r2; import java.util.HashMap; import java.util.Map; import java.util.concurrent.ExecutionException; import org.apache.curator.test.TestingServer; import org.testng.annotations.Test; import com.google.common.util.concurrent.SettableFuture; import com.linkedin.common.callback.Callback; import com.linkedin.common.util.None; import com.linkedin.r2.transport.common.Client; import com.typesafe.config.ConfigException; import com.typesafe.config.ConfigFactory; import junit.framework.Assert; import org.apache.gobblin.security.ssl.SSLContextFactory; @Test public class R2ClientFactoryTest { public void testHttpClient() { R2ClientFactory factory = new R2ClientFactory(R2ClientFactory.Schema.HTTP); Map<String, Object> values = new HashMap<>(); // No SSL Client client = factory.createInstance(ConfigFactory.parseMap(values)); shutdown(client); // With SSL values.put(R2ClientFactory.SSL_ENABLED, true); values.put(SSLContextFactory.KEY_STORE_FILE_PATH, "identity.p12"); values.put(SSLContextFactory.TRUST_STORE_FILE_PATH, "certs"); values.put(SSLContextFactory.KEY_STORE_PASSWORD, "keyStorePassword"); values.put(SSLContextFactory.TRUST_STORE_PASSWORD, "trustStorePassword"); values.put(SSLContextFactory.KEY_STORE_TYPE, "PKCS12"); try { factory.createInstance(ConfigFactory.parseMap(values)); } catch (ConfigException | IllegalArgumentException e) { Assert.fail(); } catch (Exception e) { // OK } } public void testD2Client() throws Exception { R2ClientFactory factory = new R2ClientFactory(R2ClientFactory.Schema.D2); TestingServer zkServer = new TestingServer(-1); Map<String, Object> values = new HashMap<>(); values.put("d2.zkHosts", zkServer.getConnectString()); // No SSL Client client = factory.createInstance(ConfigFactory.parseMap(values)); shutdown(client); // With SSL final String confPrefix = "d2."; values.put(confPrefix + R2ClientFactory.SSL_ENABLED, true); values.put(confPrefix + SSLContextFactory.KEY_STORE_FILE_PATH, "identity.p12"); values.put(confPrefix + SSLContextFactory.TRUST_STORE_FILE_PATH, "certs"); values.put(confPrefix + SSLContextFactory.KEY_STORE_PASSWORD, "keyStorePassword"); values.put(confPrefix + SSLContextFactory.TRUST_STORE_PASSWORD, "trustStorePassword"); values.put(confPrefix + SSLContextFactory.KEY_STORE_TYPE, "PKCS12"); try { factory.createInstance(ConfigFactory.parseMap(values)); } catch (ConfigException | IllegalArgumentException e) { Assert.fail("Unexpected config exception"); } catch (Exception e) { // OK } zkServer.close(); } private void shutdown(Client client) { final SettableFuture<None> future = SettableFuture.create(); client.shutdown(new Callback<None>() { @Override public void onError(Throwable e) { future.setException(e); } @Override public void onSuccess(None result) { // OK future.set(result); } }); try { // Synchronously wait for shutdown to complete future.get(); } catch (InterruptedException | ExecutionException e) { Assert.fail("Client shutdown failed"); } } }
3,696
0
Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/converter/HttpJoinConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter; import java.io.IOException; import java.util.Queue; import java.util.concurrent.LinkedBlockingDeque; import org.apache.avro.generic.GenericRecord; import com.google.common.collect.ImmutableMap; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.async.AsyncRequest; import org.apache.gobblin.async.AsyncRequestBuilder; import org.apache.gobblin.async.BufferedRecord; import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes; import org.apache.gobblin.broker.iface.SharedResourcesBroker; import org.apache.gobblin.config.ConfigBuilder; import org.apache.gobblin.configuration.State; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.http.HttpClient; import org.apache.gobblin.http.HttpOperation; import org.apache.gobblin.http.ResponseHandler; import org.apache.gobblin.http.ResponseStatus; import org.apache.gobblin.utils.HttpConstants; import org.apache.gobblin.writer.WriteCallback; /** * This converter converts an input record (DI) to an output record (DO) which * contains original input data and http request & response info. * * Sequence: * Convert DI to HttpOperation * Convert HttpOperation to RQ (by internal AsyncRequestBuilder) * Execute http request, get response RP (by HttpClient) * Combine info (DI, RQ, RP, status, etc..) to generate output DO */ @Slf4j public abstract class HttpJoinConverter<SI, SO, DI, DO, RQ, RP> extends Converter<SI, SO, DI, DO> { public static final String CONF_PREFIX = "gobblin.converter.http."; public static final Config DEFAULT_FALLBACK = ConfigFactory.parseMap(ImmutableMap.<String, Object>builder() .put(HttpConstants.CONTENT_TYPE, "application/json") .put(HttpConstants.VERB, "GET") .build()); protected HttpClient<RQ, RP> httpClient = null; protected ResponseHandler<RQ, RP> responseHandler = null; protected AsyncRequestBuilder<GenericRecord, RQ> requestBuilder = null; public HttpJoinConverter init(WorkUnitState workUnitState) { super.init(workUnitState); Config config = ConfigBuilder.create().loadProps(workUnitState.getProperties(), CONF_PREFIX).build(); config = config.withFallback(DEFAULT_FALLBACK); httpClient = createHttpClient(config, workUnitState.getTaskBroker()); responseHandler = createResponseHandler(config); requestBuilder = createRequestBuilder(config); return this; } @Override public final SO convertSchema(SI inputSchema, WorkUnitState workUnitState) throws SchemaConversionException { return convertSchemaImpl(inputSchema, workUnitState); } protected abstract HttpClient<RQ, RP> createHttpClient(Config config, SharedResourcesBroker<GobblinScopeTypes> broker); protected abstract ResponseHandler<RQ, RP> createResponseHandler(Config config); protected abstract AsyncRequestBuilder<GenericRecord, RQ> createRequestBuilder(Config config); protected abstract HttpOperation generateHttpOperation (DI inputRecord, State state); protected abstract SO convertSchemaImpl (SI inputSchema, WorkUnitState workUnitState) throws SchemaConversionException; protected abstract DO convertRecordImpl (SO outputSchema, DI input, RQ rawRequest, ResponseStatus status) throws DataConversionException; @Override public final Iterable<DO> convertRecord(SO outputSchema, DI inputRecord, WorkUnitState workUnitState) throws DataConversionException { // Convert DI to HttpOperation HttpOperation operation = generateHttpOperation(inputRecord, workUnitState); BufferedRecord<GenericRecord> bufferedRecord = new BufferedRecord<>(operation, WriteCallback.EMPTY); // Convert HttpOperation to RQ Queue<BufferedRecord<GenericRecord>> buffer = new LinkedBlockingDeque<>(); buffer.add(bufferedRecord); AsyncRequest<GenericRecord, RQ> request = this.requestBuilder.buildRequest(buffer); RQ rawRequest = request.getRawRequest(); // Execute query and get response try { RP response = httpClient.sendRequest(rawRequest); ResponseStatus status = responseHandler.handleResponse(request, response); switch (status.getType()) { case OK: case CLIENT_ERROR: // Convert (DI, RQ, RP etc..) to output DO log.debug ("{} send with status type {}", rawRequest, status.getType()); DO output = convertRecordImpl (outputSchema, inputRecord, rawRequest, status); return new SingleRecordIterable<>(output); case SERVER_ERROR: // Server side error. Retry throw new DataConversionException(rawRequest + " send failed due to server error"); default: throw new DataConversionException(rawRequest + " Should not reach here"); } } catch (IOException e) { throw new DataConversionException(e); } } public void close() throws IOException { this.httpClient.close(); } }
3,697
0
Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/converter/AvroApacheHttpJoinConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter; import java.io.IOException; import java.nio.ByteBuffer; import org.apache.avro.Schema; import org.apache.avro.generic.GenericRecord; import org.apache.http.HttpResponse; import org.apache.http.client.methods.HttpUriRequest; import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; import com.typesafe.config.Config; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes; import org.apache.gobblin.broker.iface.SharedResourcesBroker; import org.apache.gobblin.http.ApacheHttpAsyncClient; import org.apache.gobblin.http.ApacheHttpResponseHandler; import org.apache.gobblin.http.ApacheHttpResponseStatus; import org.apache.gobblin.http.ApacheHttpRequestBuilder; import org.apache.gobblin.http.HttpRequestResponseRecord; import org.apache.gobblin.http.ResponseStatus; import org.apache.gobblin.utils.HttpConstants; /** * Apache version of http join converter */ @Slf4j public class AvroApacheHttpJoinConverter extends AvroHttpJoinConverter<HttpUriRequest, HttpResponse> { @Override public ApacheHttpAsyncClient createHttpClient(Config config, SharedResourcesBroker<GobblinScopeTypes> broker) { return new ApacheHttpAsyncClient(HttpAsyncClientBuilder.create(), config, broker); } @Override public ApacheHttpResponseHandler createResponseHandler(Config config) { return new ApacheHttpResponseHandler(); } @Override protected ApacheHttpRequestBuilder createRequestBuilder(Config config) { String urlTemplate = config.getString(HttpConstants.URL_TEMPLATE); String verb = config.getString(HttpConstants.VERB); String contentType = config.getString(HttpConstants.CONTENT_TYPE); return new ApacheHttpRequestBuilder(urlTemplate, verb, contentType); } @Override protected void fillHttpOutputData(Schema httpOutputSchema, GenericRecord outputRecord, HttpUriRequest rawRequest, ResponseStatus status) throws IOException { ApacheHttpResponseStatus apacheStatus = (ApacheHttpResponseStatus) status; HttpRequestResponseRecord record = new HttpRequestResponseRecord(); record.setRequestUrl(rawRequest.getURI().toASCIIString()); record.setMethod(rawRequest.getMethod()); record.setStatusCode(apacheStatus.getStatusCode()); record.setContentType(apacheStatus.getContentType()); record.setBody(apacheStatus.getContent() == null? null: ByteBuffer.wrap(apacheStatus.getContent())); outputRecord.put(HTTP_REQUEST_RESPONSE_FIELD, record); } }
3,698
0
Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/converter/AvroR2JoinConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter; import java.io.IOException; import org.apache.avro.Schema; import org.apache.avro.generic.GenericRecord; import com.linkedin.r2.message.rest.RestRequest; import com.linkedin.r2.message.rest.RestResponse; import com.linkedin.r2.transport.common.Client; import com.typesafe.config.Config; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.async.AsyncRequestBuilder; import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes; import org.apache.gobblin.broker.iface.SharedResourcesBroker; import org.apache.gobblin.http.HttpClient; import org.apache.gobblin.http.HttpRequestResponseRecord; import org.apache.gobblin.http.ResponseHandler; import org.apache.gobblin.http.ResponseStatus; import org.apache.gobblin.r2.R2ClientFactory; import org.apache.gobblin.r2.R2Client; import org.apache.gobblin.r2.R2ResponseStatus; import org.apache.gobblin.r2.R2RestRequestBuilder; import org.apache.gobblin.r2.R2RestResponseHandler; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.utils.HttpConstants; @Slf4j public class AvroR2JoinConverter extends AvroHttpJoinConverter<RestRequest, RestResponse>{ public static final String DEFAULT_PROTOCOL_VERSION = "1.0.0"; @Override protected void fillHttpOutputData(Schema schema, GenericRecord outputRecord, RestRequest restRequest, ResponseStatus status) throws IOException { R2ResponseStatus r2ResponseStatus = (R2ResponseStatus) status; HttpRequestResponseRecord record = new HttpRequestResponseRecord(); record.setRequestUrl(restRequest.getURI().toASCIIString()); record.setMethod(restRequest.getMethod()); record.setStatusCode(r2ResponseStatus.getStatusCode()); record.setContentType(r2ResponseStatus.getContentType()); record.setBody(r2ResponseStatus.getContent() == null? null: r2ResponseStatus.getContent().asByteBuffer()); outputRecord.put("HttpRequestResponse", record); } @Override protected HttpClient<RestRequest, RestResponse> createHttpClient(Config config, SharedResourcesBroker<GobblinScopeTypes> broker) { String urlTemplate = config.getString(HttpConstants.URL_TEMPLATE); // By default, use http schema R2ClientFactory.Schema schema = R2ClientFactory.Schema.HTTP; if (urlTemplate.startsWith(HttpConstants.SCHEMA_D2)) { schema = R2ClientFactory.Schema.D2; } R2ClientFactory factory = new R2ClientFactory(schema); Client client = factory.createInstance(config); return new R2Client(client, config, broker); } @Override protected ResponseHandler<RestRequest, RestResponse> createResponseHandler(Config config) { return new R2RestResponseHandler(); } @Override protected AsyncRequestBuilder<GenericRecord, RestRequest> createRequestBuilder(Config config) { String urlTemplate = config.getString(HttpConstants.URL_TEMPLATE); String verb = config.getString(HttpConstants.VERB); String protocolVersion = ConfigUtils.getString(config, HttpConstants.PROTOCOL_VERSION, DEFAULT_PROTOCOL_VERSION); return new R2RestRequestBuilder(urlTemplate, verb, protocolVersion); } }
3,699