index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/ConfigStoreUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.typesafe.config.Config;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.config.client.ConfigClient;
import org.apache.gobblin.config.client.ConfigClientUtils;
import org.apache.gobblin.config.client.api.ConfigStoreFactoryDoesNotExistsException;
import org.apache.gobblin.config.client.api.VersionStabilityPolicy;
import org.apache.gobblin.config.store.api.ConfigStoreCreationException;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.kafka.client.GobblinKafkaConsumerClient;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.DatasetFilterUtils;
import org.apache.gobblin.util.PathUtils;
@Slf4j
public class ConfigStoreUtils {
/**
* Used as the grouping functionality in config-store to filter certain data nodes.
*/
public static final String GOBBLIN_CONFIG_TAGS_WHITELIST = "gobblin.config.tags.whitelist";
public static final String GOBBLIN_CONFIG_TAGS_BLACKLIST = "gobblin.config.tags.blacklist";
public static final String GOBBLIN_CONFIG_FILTER = "gobblin.config.filter";
public static final String GOBBLIN_CONFIG_COMMONPATH = "gobblin.config.commonPath";
/**
* Will return the list of URIs given which are importing tag {@param tagUri}
*/
public static Collection<URI> getTopicsURIFromConfigStore(ConfigClient configClient, Path tagUri, String filterString,
Optional<Config> runtimeConfig) {
try {
Collection<URI> importedBy = configClient.getImportedBy(tagUri.toUri(), true, runtimeConfig);
return importedBy.stream().filter((URI u) -> u.toString().contains(filterString)).collect(Collectors.toList());
} catch (ConfigStoreFactoryDoesNotExistsException | ConfigStoreCreationException e) {
throw new Error(e);
}
}
public static Optional<String> getConfigStoreUri(Properties properties) {
Optional<String> configStoreUri =
StringUtils.isNotBlank(properties.getProperty(ConfigurationKeys.CONFIG_MANAGEMENT_STORE_URI)) ? Optional.of(
properties.getProperty(ConfigurationKeys.CONFIG_MANAGEMENT_STORE_URI)) : Optional.<String>absent();
if (!Boolean.valueOf(properties.getProperty(ConfigurationKeys.CONFIG_MANAGEMENT_STORE_ENABLED,
ConfigurationKeys.DEFAULT_CONFIG_MANAGEMENT_STORE_ENABLED))) {
configStoreUri = Optional.<String>absent();
}
return configStoreUri;
}
public static String getTopicNameFromURI(URI uri) {
Path path = new Path(uri);
return path.getName();
}
public static URI getUriStringForTopic(String topicName, String commonPath, String configStoreUri)
throws URISyntaxException {
Path fullTopicPathInConfigStore = PathUtils.mergePaths(new Path(commonPath), new Path(topicName));
URI topicUri = getUriFromPath(fullTopicPathInConfigStore, configStoreUri);
log.info("URI for topic is : " + topicUri.toString());
return topicUri;
}
/**
* Used when topic name needs to be fetched from Properties object, assuming we knew the topicKey.
*/
public static Optional<Config> getConfigForTopic(Properties properties, String topicKey, ConfigClient configClient) {
Preconditions.checkArgument(properties.containsKey(topicKey), "Missing required property " + topicKey);
String topicName = properties.getProperty(topicKey);
return getConfigForTopicWithName(properties, topicName, configClient);
}
/**
* Used when topic name is known.
*/
public static Optional<Config> getConfigForTopicWithName(Properties properties, String topicName, ConfigClient configClient) {
Optional<String> configStoreUri = ConfigStoreUtils.getConfigStoreUri(properties);
Optional<Config> config = Optional.<Config>absent();
if (!configStoreUri.isPresent()) {
return config;
}
try {
Preconditions.checkArgument(properties.containsKey(GOBBLIN_CONFIG_COMMONPATH),
"Missing required property " + GOBBLIN_CONFIG_COMMONPATH);
String commonPath = properties.getProperty(GOBBLIN_CONFIG_COMMONPATH);
config = Optional.fromNullable(
ConfigStoreUtils.getConfig(configClient, ConfigStoreUtils.getUriStringForTopic(topicName, commonPath, configStoreUri.get()),
ConfigClientUtils.getOptionalRuntimeConfig(properties)));
} catch (URISyntaxException e) {
log.error("Unable to get config", e);
}
return config;
}
/**
* Wrapper to convert Checked Exception to Unchecked Exception
* Easy to use in lambda expressions
*/
public static Config getConfig(ConfigClient client, URI u, Optional<Config> runtimeConfig) {
try {
return client.getConfig(u, runtimeConfig);
} catch (ConfigStoreFactoryDoesNotExistsException | ConfigStoreCreationException e) {
throw new Error(e);
}
}
/**
* Get topics from config store.
* Topics will either be whitelisted or blacklisted using tag.
*
* If tags are not provided, it will return all topics.
*/
public static List<KafkaTopic> getTopicsFromConfigStore(Properties properties, String configStoreUri,
GobblinKafkaConsumerClient kafkaConsumerClient) {
ConfigClient configClient = ConfigClient.createConfigClient(VersionStabilityPolicy.WEAK_LOCAL_STABILITY);
State state = new State();
state.setProp(KafkaSource.TOPIC_WHITELIST, ".*");
state.setProp(KafkaSource.TOPIC_BLACKLIST, StringUtils.EMPTY);
List<KafkaTopic> allTopics =
kafkaConsumerClient.getFilteredTopics(DatasetFilterUtils.getPatternList(state, KafkaSource.TOPIC_BLACKLIST),
DatasetFilterUtils.getPatternList(state, KafkaSource.TOPIC_WHITELIST));
Optional<Config> runtimeConfig = ConfigClientUtils.getOptionalRuntimeConfig(properties);
if (properties.containsKey(GOBBLIN_CONFIG_TAGS_WHITELIST)) {
List<String> whitelistedTopics = getListOfTopicNamesByFilteringTag(properties, configClient, runtimeConfig,
configStoreUri, GOBBLIN_CONFIG_TAGS_WHITELIST);
return allTopics.stream()
.filter((KafkaTopic p) -> whitelistedTopics.contains(p.getName()))
.collect(Collectors.toList());
} else if (properties.containsKey(GOBBLIN_CONFIG_TAGS_BLACKLIST)) {
List<String> blacklistedTopics = getListOfTopicNamesByFilteringTag(properties, configClient, runtimeConfig,
configStoreUri, GOBBLIN_CONFIG_TAGS_BLACKLIST);
return allTopics.stream()
.filter((KafkaTopic p) -> !blacklistedTopics.contains(p.getName()))
.collect(Collectors.toList());
} else {
log.warn("None of the blacklist or whitelist tags are provided");
return allTopics;
}
}
/**
* Using the tag feature provided by Config-Store for grouping, getting a list of topics (case-sensitive,
* need to be matched with what would be returned from kafka broker) tagged by the tag value specified
* in job configuration.
*/
public static List<String> getListOfTopicNamesByFilteringTag(Properties properties, ConfigClient configClient,
Optional<Config> runtimeConfig, String configStoreUri, String tagConfName) {
Preconditions.checkArgument(properties.containsKey(GOBBLIN_CONFIG_FILTER),
"Missing required property " + GOBBLIN_CONFIG_FILTER);
String filterString = properties.getProperty(GOBBLIN_CONFIG_FILTER);
Path tagUri = new Path("/");
try {
tagUri = new Path(getUriFromPath(new Path(properties.getProperty(tagConfName)), configStoreUri));
} catch (URISyntaxException ue) {
log.error("Cannot construct a Tag URI due to the exception:", ue);
}
List<String> taggedTopics = new ArrayList<>();
ConfigStoreUtils.getTopicsURIFromConfigStore(configClient, tagUri, filterString, runtimeConfig)
.forEach(((URI u) -> taggedTopics.add(ConfigStoreUtils.getTopicNameFromURI(u))));
return taggedTopics;
}
/**
* Construct the URI for a Config-Store node given a path.
* The implementation will be based on scheme, while the signature of this method will not be subject to
* different implementation.
*
* The implementation will be different since Fs-based config-store simply append dataNode's path in the end,
* while ivy-based config-store will require query to store those information.
*
* @param path The relative path of a node inside Config-Store.
* @param configStoreUri The config store URI.
* @return The URI to inspect a data node represented by path inside Config Store.
* @throws URISyntaxException
*/
private static URI getUriFromPath(Path path, String configStoreUri) throws URISyntaxException {
URI storeUri = new URI(configStoreUri);
return new URI(storeUri.getScheme(), storeUri.getAuthority(),
PathUtils.mergePaths(new Path(storeUri.getPath()), path).toString(), storeUri.getQuery(), storeUri.getFragment());
}
/**
* Shortlist topics from config store based on whitelist/blacklist tags and
* add it to {@param whitelist}/{@param blacklist}
*
* If tags are not provided, blacklist and whitelist won't be modified
* @deprecated Since this method contains implementation-specific way to construct TagURI inside Config-Store.
*/
@Deprecated
public static void setTopicsFromConfigStore(Properties properties, Set<String> blacklist, Set<String> whitelist,
final String _blacklistTopicKey, final String _whitelistTopicKey) {
Optional<String> configStoreUri = getConfigStoreUri(properties);
if (!configStoreUri.isPresent()) {
return;
}
ConfigClient configClient = ConfigClient.createConfigClient(VersionStabilityPolicy.WEAK_LOCAL_STABILITY);
Optional<Config> runtimeConfig = ConfigClientUtils.getOptionalRuntimeConfig(properties);
if (properties.containsKey(GOBBLIN_CONFIG_TAGS_WHITELIST)) {
Preconditions.checkArgument(properties.containsKey(GOBBLIN_CONFIG_FILTER),
"Missing required property " + GOBBLIN_CONFIG_FILTER);
String filterString = properties.getProperty(GOBBLIN_CONFIG_FILTER);
Path whiteListTagUri = PathUtils.mergePaths(new Path(configStoreUri.get()),
new Path(properties.getProperty(GOBBLIN_CONFIG_TAGS_WHITELIST)));
getTopicsURIFromConfigStore(configClient, whiteListTagUri, filterString, runtimeConfig).stream()
.filter((URI u) -> ConfigUtils.getBoolean(getConfig(configClient, u, runtimeConfig), _whitelistTopicKey, false))
.forEach(((URI u) -> whitelist.add(getTopicNameFromURI(u))));
} else if (properties.containsKey(GOBBLIN_CONFIG_TAGS_BLACKLIST)) {
Preconditions.checkArgument(properties.containsKey(GOBBLIN_CONFIG_FILTER),
"Missing required property " + GOBBLIN_CONFIG_FILTER);
String filterString = properties.getProperty(GOBBLIN_CONFIG_FILTER);
Path blackListTagUri = PathUtils.mergePaths(new Path(configStoreUri.get()),
new Path(properties.getProperty(GOBBLIN_CONFIG_TAGS_BLACKLIST)));
getTopicsURIFromConfigStore(configClient, blackListTagUri, filterString, runtimeConfig).stream()
.filter((URI u) -> ConfigUtils.getBoolean(getConfig(configClient, u, runtimeConfig), _blacklistTopicKey, false))
.forEach(((URI u) -> blacklist.add(getTopicNameFromURI(u))));
} else {
log.warn("None of the blacklist or whitelist tags are provided");
}
}
}
| 3,300 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import java.util.List;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
import lombok.extern.slf4j.Slf4j;
/**
* A Utils class for Kafka.
*/
@Slf4j
public class KafkaUtils {
private static final String TOPIC_PARTITION_DELIMITER = "-";
/**
* Get topic name from a {@link State} object. The {@link State} should contain property
* {@link KafkaSource#TOPIC_NAME}.
*/
public static String getTopicName(State state) {
Preconditions.checkArgument(state.contains(KafkaSource.TOPIC_NAME),
"Missing configuration property " + KafkaSource.TOPIC_NAME);
return state.getProp(KafkaSource.TOPIC_NAME);
}
/**
* Get {@link KafkaPartition} from a {@link State} object. The {@link State} should contain properties
* {@link KafkaSource#TOPIC_NAME}, {@link KafkaSource#PARTITION_ID}, and may optionally contain
* {@link KafkaSource#LEADER_ID} and {@link KafkaSource#LEADER_HOSTANDPORT}.
*/
public static KafkaPartition getPartition(State state) {
Preconditions.checkArgument(state.contains(KafkaSource.TOPIC_NAME),
"Missing configuration property " + KafkaSource.TOPIC_NAME);
Preconditions.checkArgument(state.contains(KafkaSource.PARTITION_ID),
"Missing configuration property " + KafkaSource.PARTITION_ID);
KafkaPartition.Builder builder = new KafkaPartition.Builder().withTopicName(state.getProp(KafkaSource.TOPIC_NAME))
.withId(state.getPropAsInt(KafkaSource.PARTITION_ID));
if (state.contains(KafkaSource.LEADER_ID)) {
builder = builder.withLeaderId(state.getPropAsInt(KafkaSource.LEADER_ID));
}
if (state.contains(KafkaSource.LEADER_HOSTANDPORT)) {
builder = builder.withLeaderHostAndPort(state.getProp(KafkaSource.LEADER_HOSTANDPORT));
}
return builder.build();
}
/**
* Get a list of {@link KafkaPartition}s from a {@link State} object. The given {@link State} should contain property
* {@link KafkaSource#TOPIC_NAME}. If there are multiple partitions in the {@link State}, all partitions should have
* the same topic name.
*
* It first checks whether the given {@link State} contains "partition.id.i", "leader.id.i" and
* "leader.hostandport.i", i = 0,1,2,...
*
* Otherwise it will call {@link #getPartition(State)}.
*/
public static List<KafkaPartition> getPartitions(State state) {
List<KafkaPartition> partitions = Lists.newArrayList();
String topicName = state.getProp(KafkaSource.TOPIC_NAME);
for (int i = 0;; i++) {
if (!state.contains(KafkaUtils.getPartitionPropName(KafkaSource.PARTITION_ID, i))) {
break;
}
KafkaPartition partition = new KafkaPartition.Builder().withTopicName(topicName)
.withId(state.getPropAsInt(KafkaUtils.getPartitionPropName(KafkaSource.PARTITION_ID, i)))
.withLeaderId(state.getPropAsInt(KafkaUtils.getPartitionPropName(KafkaSource.LEADER_ID, i)))
.withLeaderHostAndPort(state.getProp(KafkaUtils.getPartitionPropName(KafkaSource.LEADER_HOSTANDPORT, i)))
.build();
partitions.add(partition);
}
if (partitions.isEmpty()) {
partitions.add(getPartition(state));
}
return partitions;
}
/**
* This method returns topicName + '.' + partitionId.
*/
public static String getPartitionPropName(String topicName, int partitionId) {
return topicName + "." + partitionId;
}
/**
* Determines whether the given {@link State} contains "[topicname].[partitionid].avg.record.size".
*/
public static boolean containsPartitionAvgRecordSize(State state, KafkaPartition partition) {
return state.contains(
getPartitionPropName(partition.getTopicName(), partition.getId()) + "." + ConfigurationKeys.AVG_RECORD_SIZE);
}
/**
* Get the average record size of a partition, which is stored in property "[topicname].[partitionid].avg.record.size".
* If state doesn't contain this property, it returns defaultSize.
*/
public static long getPartitionAvgRecordSize(State state, KafkaPartition partition) {
return state.getPropAsLong(
getPartitionPropName(partition.getTopicName(), partition.getId()) + "." + ConfigurationKeys.AVG_RECORD_SIZE);
}
/**
* Set the average record size of a partition, which will be stored in property
* "[topicname].[partitionid].avg.record.size".
*/
public static void setPartitionAvgRecordSize(State state, KafkaPartition partition, long size) {
state.setProp(getPartitionPropName(partition.getTopicName(), partition.getId()) + "." + ConfigurationKeys.AVG_RECORD_SIZE,
size);
}
/**
* Determines whether the given {@link State} contains "[topicname].[partitionid].avg.record.millis".
*/
public static boolean containsPartitionAvgRecordMillis(State state, KafkaPartition partition) {
return state.contains(
getPartitionPropName(partition.getTopicName(), partition.getId()) + "." + KafkaSource.AVG_RECORD_MILLIS);
}
/**
* Get the average time to pull a record of a partition, which is stored in property
* "[topicname].[partitionid].avg.record.millis". If state doesn't contain this property, it returns defaultValue.
*/
public static double getPartitionAvgRecordMillis(State state, KafkaPartition partition) {
double avgRecordMillis = state.getPropAsDouble(
getPartitionPropName(partition.getTopicName(), partition.getId()) + "." + KafkaSource.AVG_RECORD_MILLIS);
// cap to prevent a poorly behaved topic from impacting the bin-packing
int avgFetchTimeCap = state.getPropAsInt(ConfigurationKeys.KAFKA_SOURCE_AVG_FETCH_TIME_CAP,
ConfigurationKeys.DEFAULT_KAFKA_SOURCE_AVG_FETCH_TIME_CAP);
if (avgFetchTimeCap > 0 && avgRecordMillis > avgFetchTimeCap) {
log.info("Topic {} partition {} has an average fetch time of {}, capping it to {}", partition.getTopicName(),
partition.getId(), avgRecordMillis, avgFetchTimeCap);
avgRecordMillis = avgFetchTimeCap;
}
return avgRecordMillis;
}
/**
* Set the average time in milliseconds to pull a record of a partition, which will be stored in property
* "[topicname].[partitionid].avg.record.millis".
*/
public static void setPartitionAvgRecordMillis(State state, KafkaPartition partition, double millis) {
state.setProp(
getPartitionPropName(partition.getTopicName(), partition.getId()) + "." + KafkaSource.AVG_RECORD_MILLIS,
millis);
}
/**
* Get a property as long from a work unit that may or may not be a multiworkunit.
* This method is needed because the SingleLevelWorkUnitPacker does not squeeze work units
* into a multiworkunit, and thus does not append the partitionId to property keys, while
* the BiLevelWorkUnitPacker does.
* Return 0 as default if key not found in either form.
*/
public static long getPropAsLongFromSingleOrMultiWorkUnitState(WorkUnitState workUnitState,
String key, int partitionId) {
return Long.parseLong(workUnitState.contains(key) ? workUnitState.getProp(key)
: workUnitState.getProp(KafkaUtils.getPartitionPropName(key, partitionId), "0"));
}
/**
* Get topic name from a topic partition
* @param topicPartition
*/
public static String getTopicNameFromTopicPartition(String topicPartition) {
Preconditions.checkArgument(topicPartition.contains(TOPIC_PARTITION_DELIMITER));
List<String> parts = Splitter.on(TOPIC_PARTITION_DELIMITER).splitToList(topicPartition);
return Joiner.on(TOPIC_PARTITION_DELIMITER).join(parts.subList(0, parts.size() - 1));
}
}
| 3,301 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaSimpleJsonExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.io.IOException;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import com.google.gson.Gson;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.kafka.client.ByteArrayBasedKafkaRecord;
import org.apache.gobblin.source.extractor.Extractor;
@Alias("KafkaSimpleJsonExtractor")
public class KafkaSimpleJsonExtractor extends KafkaSimpleExtractor implements Extractor<String, byte[]> {
private static final Gson gson = new Gson();
private static final Charset CHARSET = StandardCharsets.UTF_8;
public KafkaSimpleJsonExtractor(WorkUnitState state) {
super(state);
}
@Override
protected byte[] decodeRecord(ByteArrayBasedKafkaRecord messageAndOffset) throws IOException {
long offset = messageAndOffset.getOffset();
byte[] keyBytes = messageAndOffset.getKeyBytes();
String key = (keyBytes == null) ? "" : new String(keyBytes, CHARSET);
byte[] payloadBytes = messageAndOffset.getMessageBytes();
String payload = (payloadBytes == null) ? "" : new String(payloadBytes, CHARSET);
KafkaRecord record = new KafkaRecord(offset, key, payload);
byte[] decodedRecord = gson.toJson(record).getBytes(CHARSET);
return decodedRecord;
}
}
| 3,302 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaSimpleSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.Extractor;
import java.io.IOException;
/**
* A {@link KafkaSource} implementation for SimpleKafkaExtractor.
*
* @author akshay@nerdwallet.com
*
* @deprecated use {@link KafkaDeserializerSource} and {@link KafkaDeserializerExtractor.Deserializers#BYTE_ARRAY} instead
*/
public class KafkaSimpleSource extends KafkaSource<String, byte[]> {
/**
* Get an {@link Extractor} based on a given {@link WorkUnitState}.
* <p>
* The {@link Extractor} returned can use {@link WorkUnitState} to store arbitrary key-value pairs
* that will be persisted to the state store and loaded in the next scheduled job run.
* </p>
*
* @param state a {@link WorkUnitState} carrying properties needed by the returned {@link Extractor}
* @return an {@link Extractor} used to extract schema and data records from the data source
* @throws IOException if it fails to create an {@link Extractor}
*/
@Override
public Extractor<String, byte[]> getExtractor(WorkUnitState state) throws IOException {
return new KafkaSimpleExtractor(state);
}
}
| 3,303 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/FixedSchemaKafkaAvroExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import org.apache.avro.Schema;
import org.apache.avro.io.Decoder;
import org.apache.avro.io.DecoderFactory;
import com.google.common.base.Preconditions;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.configuration.WorkUnitState;
/**
* Extract avro records from a Kafka topic using a fixed schema provided by {@link #STATIC_SCHEMA_ROOT_KEY}.
*/
@Alias(value = "AVRO_FIXED_SCHEMA")
public class FixedSchemaKafkaAvroExtractor extends KafkaAvroExtractor<Void> {
public static final String STATIC_SCHEMA_ROOT_KEY = "gobblin.source.kafka.fixedSchema";
public FixedSchemaKafkaAvroExtractor(WorkUnitState state) {
super(state);
}
@Override
protected Schema getLatestSchemaByTopic(String topic) {
String key = STATIC_SCHEMA_ROOT_KEY + "." + topic;
Preconditions.checkArgument(this.workUnitState.contains(key),
String.format("Could not find schema for topic %s. Looking for key %s.", topic, key));
return new Schema.Parser().parse(this.workUnitState.getProp(key));
}
@Override
protected Schema getRecordSchema(byte[] payload) {
if (!this.schema.isPresent()) {
throw new RuntimeException("Schema is not preset. This is an error in the code.");
}
return this.schema.get();
}
@Override
protected Decoder getDecoder(byte[] payload) {
return DecoderFactory.get().binaryDecoder(payload, null);
}
}
| 3,304 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/SimpleKafkaSchemaRegistry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.util.Properties;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import org.apache.gobblin.metrics.kafka.SchemaRegistryException;
/**
* Extension of {@link KafkaSchemaRegistry} that treats the topic name and the schema as the same string. The
* {@link #getLatestSchemaByTopic(String)} topic will simplye return the specified topic name. All other methods throw
* an {@link UnsupportedOperationException}. This class is useful when Kafka records don't have a schema, for example,
* in {@link KafkaSimpleExtractor} or {@link KafkaGsonDeserializer}.
*/
public class SimpleKafkaSchemaRegistry extends KafkaSchemaRegistry<String, String> {
public SimpleKafkaSchemaRegistry(Properties props) {
super(props);
}
@Override
protected String fetchSchemaByKey(String key) throws SchemaRegistryException {
throw new UnsupportedOperationException();
}
@Override
public String getLatestSchemaByTopic(String topic) throws SchemaRegistryException {
return topic;
}
@Override
public String register(String schema) throws SchemaRegistryException {
throw new UnsupportedOperationException();
}
@Override
public String register(String schema, String name) throws SchemaRegistryException {
throw new UnsupportedOperationException();
}
}
| 3,305 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/validator/TopicValidatorBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka.validator;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaTopic;
/**
* The base class of a topic validator
*/
public abstract class TopicValidatorBase {
protected State state;
public TopicValidatorBase(State sourceState) {
this.state = sourceState;
}
public abstract boolean validate(KafkaTopic topic) throws Exception;
}
| 3,306 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/validator/TopicNameValidator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka.validator;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaTopic;
/**
* A topic validator that validates the topic name
*/
public class TopicNameValidator extends TopicValidatorBase {
private static final String DOT = ".";
public TopicNameValidator(State state) {
super(state);
}
/**
* Check if a topic name is valid, current rules are:
* 1. must not contain "."
* @param topic the topic to be validated
* @return true if the topic name is valid (aka. doesn't contain ".")
*/
@Override
public boolean validate(KafkaTopic topic) throws Exception {
return !topic.getName().contains(DOT);
}
}
| 3,307 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/validator/TopicValidators.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka.validator;
import com.google.common.base.Optional;
import com.google.common.base.Stopwatch;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.StringUtils;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaTopic;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* The TopicValidators contains a list of {@link TopicValidatorBase} that validate topics.
* To enable it, add below settings in the config:
* gobblin.kafka.topicValidators=validator1_class_name,validator2_class_name...
*/
@Slf4j
public class TopicValidators {
public static final String VALIDATOR_CLASSES_KEY = "gobblin.kafka.topicValidators";
private static long DEFAULTL_TIMEOUT = 10L;
private static TimeUnit DEFAULT_TIMEOUT_UNIT = TimeUnit.MINUTES;
private final List<TopicValidatorBase> validators = new ArrayList<>();
private final State state;
public TopicValidators(State state) {
this.state = state;
for (String validatorClassName : state.getPropAsList(VALIDATOR_CLASSES_KEY, StringUtils.EMPTY)) {
try {
this.validators.add(GobblinConstructorUtils.invokeConstructor(TopicValidatorBase.class, validatorClassName,
state));
} catch (Exception e) {
log.error("Failed to create topic validator: {}, due to {}", validatorClassName, e);
}
}
}
/**
* Validate topics with all the internal validators. The default timeout is set to 1 hour.
* Note:
* 1. the validations for every topic run in parallel.
* 2. when timeout happens, un-validated topics are still treated as "valid".
* @param topics the topics to be validated
* @return the topics that pass all the validators
*/
public List<KafkaTopic> validate(List<KafkaTopic> topics) {
return validate(topics, DEFAULTL_TIMEOUT, DEFAULT_TIMEOUT_UNIT);
}
/**
* Validate topics with all the internal validators.
* Note:
* 1. the validations for every topic run in parallel.
* 2. when timeout happens, un-validated topics are still treated as "valid".
* @param topics the topics to be validated
* @param timeout the timeout for the validation
* @param timeoutUnit the time unit for the timeout
* @return the topics that pass all the validators
*/
public List<KafkaTopic> validate(List<KafkaTopic> topics, long timeout, TimeUnit timeoutUnit) {
int numOfThreads = state.getPropAsInt(ConfigurationKeys.KAFKA_SOURCE_WORK_UNITS_CREATION_THREADS,
ConfigurationKeys.KAFKA_SOURCE_WORK_UNITS_CREATION_DEFAULT_THREAD_COUNT);
// Tasks running in the thread pool will have the same access control and class loader settings as current thread
ExecutorService threadPool = Executors.newFixedThreadPool(numOfThreads, ExecutorsUtils.newPrivilegedThreadFactory(
Optional.of(log)));
List<Future<Boolean>> results = new ArrayList<>();
Stopwatch stopwatch = Stopwatch.createStarted();
for (KafkaTopic topic : topics) {
results.add(threadPool.submit(() -> validate(topic)));
}
ExecutorsUtils.shutdownExecutorService(threadPool, Optional.of(log), timeout, timeoutUnit);
log.info(String.format("Validate %d topics in %d seconds", topics.size(), stopwatch.elapsed(TimeUnit.SECONDS)));
List<KafkaTopic> validTopics = new ArrayList<>();
for (int i = 0; i < results.size(); ++i) {
try {
if (results.get(i).get()) {
validTopics.add(topics.get(i));
}
} catch (InterruptedException | ExecutionException e) {
log.warn("Failed to validate topic: {}, treat it as a valid topic", topics.get(i));
validTopics.add(topics.get(i));
}
}
return validTopics;
}
/**
* Validates a single topic with all the internal validators
*/
private boolean validate(KafkaTopic topic) throws Exception {
log.info("Validating topic {} in thread: {}", topic, Thread.currentThread().getName());
for (TopicValidatorBase validator : this.validators) {
if (!validator.validate(topic)) {
log.warn("KafkaTopic: {} doesn't pass the validator: {}", topic, validator.getClass().getName());
return false;
}
}
return true;
}
}
| 3,308 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/workunit | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/workunit/packer/KafkaSingleLevelWorkUnitPacker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka.workunit.packer;
import java.util.List;
import java.util.Map;
import com.google.common.collect.Lists;
import com.google.common.math.DoubleMath;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.source.extractor.extract.AbstractSource;
import org.apache.gobblin.source.workunit.MultiWorkUnit;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* An implementation of {@link KafkaWorkUnitPacker} with a single level of bin packing using worst-fit-decreasing.
*
* Note that for each skipped topic, an empty workunit is created for each partition to preserve its checkpoints.
* In single-level bin packing mode, it still assigns all partitions of a skipped topic to the same workunit / task,
* so that a single empty task will be created for this topic, instead of one empty task per partition.
*
* Please refer to the Javadoc of {@link KafkaBiLevelWorkUnitPacker} for a comparison between
* {@link KafkaSingleLevelWorkUnitPacker} and {@link KafkaBiLevelWorkUnitPacker}.
*
* @author Ziyang Liu
*/
public class KafkaSingleLevelWorkUnitPacker extends KafkaWorkUnitPacker {
public KafkaSingleLevelWorkUnitPacker(AbstractSource<?, ?> source, SourceState state) {
super(source, state);
}
@Override
public List<WorkUnit> pack(Map<String, List<WorkUnit>> workUnitsByTopic, int numContainers) {
if (workUnitsByTopic == null || workUnitsByTopic.isEmpty()) {
return Lists.newArrayList();
}
setWorkUnitEstSizes(workUnitsByTopic);
List<WorkUnit> workUnits = Lists.newArrayList();
for (List<WorkUnit> workUnitsForTopic : workUnitsByTopic.values()) {
// For each topic, merge all empty workunits into a single workunit, so that a single
// empty task will be created instead of many.
MultiWorkUnit zeroSizeWorkUnit = MultiWorkUnit.createEmpty();
for (WorkUnit workUnit : workUnitsForTopic) {
if (DoubleMath.fuzzyEquals(getWorkUnitEstSize(workUnit), 0.0, EPS)) {
addWorkUnitToMultiWorkUnit(workUnit, zeroSizeWorkUnit);
} else {
workUnit.setWatermarkInterval(getWatermarkIntervalFromWorkUnit(workUnit));
workUnits.add(workUnit);
}
}
if (!zeroSizeWorkUnit.getWorkUnits().isEmpty()) {
workUnits.add(squeezeMultiWorkUnit(zeroSizeWorkUnit));
}
}
return worstFitDecreasingBinPacking(workUnits, numContainers);
}
}
| 3,309 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/workunit | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/workunit/packer/KafkaAvgRecordTimeBasedWorkUnitSizeEstimator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka.workunit.packer;
import java.util.List;
import java.util.Map;
import org.apache.commons.math3.stat.descriptive.moment.GeometricMean;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaPartition;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaUtils;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* An implementation of {@link KafkaWorkUnitSizeEstimator} which uses the average time to pull a record in the
* previous run to estimate the sizes of {@link WorkUnits}.
*
* Each partition pulled in the previous run should have an avg time per record in its {@link WorkUnitState}. In the
* next run, the estimated avg time per record for each topic is the geometric mean of the avg time per record of all
* partitions. For example if a topic has two partitions whose avg time per record in the previous run are 2 and 8,
* the next run will use 4 as the estimated avg time per record. The reason to choose geometric mean over algebraic
* mean is because large numbers are likely outliers, e.g., a topic may have 5 partitions, and the avg time per record
* collected from the previous run could sometimes be [1.1, 1.2, 1.1, 1.3, 100].
*
* If a topic was not pulled in the previous run, its estimated avg time per record is the geometric mean of the
* estimated avg time per record of all topics that were pulled in the previous run. If no topic was pulled in the
* previous run, a default value of 1.0 is used.
*
* @author Ziyang Liu
*/
public class KafkaAvgRecordTimeBasedWorkUnitSizeEstimator implements KafkaWorkUnitSizeEstimator {
private static final Logger LOG = LoggerFactory.getLogger(KafkaAvgRecordTimeBasedWorkUnitSizeEstimator.class);
private static final GeometricMean GEOMETRIC_MEAN = new GeometricMean();
private static final double EPS = 0.01;
private final Map<String, Double> estAvgMillis = Maps.newHashMap();
private double avgEstAvgMillis = 0.0;
public KafkaAvgRecordTimeBasedWorkUnitSizeEstimator(SourceState state) {
readPrevAvgRecordMillis(state);
}
@Override
public double calcEstimatedSize(WorkUnit workUnit) {
double avgMillis = this.getEstAvgMillisForTopic(KafkaUtils.getTopicName(workUnit));
long numOfRecords = workUnit.getPropAsLong(ConfigurationKeys.WORK_UNIT_HIGH_WATER_MARK_KEY)
- workUnit.getPropAsLong(ConfigurationKeys.WORK_UNIT_LOW_WATER_MARK_KEY);
return avgMillis * numOfRecords;
}
/**
* Calculate the geometric mean of a {@link List} of double numbers. Numbers smaller than {@link #EPS} will be
* treated as {@link #EPS}.
*/
private static double geometricMean(List<Double> numbers) {
Preconditions.checkArgument(!numbers.isEmpty());
double[] numberArray = new double[numbers.size()];
for (int i = 0; i < numbers.size(); i++) {
numberArray[i] = Math.max(numbers.get(i), EPS);
}
return GEOMETRIC_MEAN.evaluate(numberArray, 0, numberArray.length);
}
private double getEstAvgMillisForTopic(String topic) {
if (this.estAvgMillis.containsKey(topic)) {
return this.estAvgMillis.get(topic);
}
return this.avgEstAvgMillis;
}
/**
* Get avg time to pull a record in the previous run for all topics, each of which is the geometric mean
* of the avg time to pull a record of all partitions of the topic.
*
* If a topic was not pulled in the previous run (e.g., it's a new topic), it will use the geometric mean
* of avg record time of topics that were pulled in the previous run.
*
* If no topic was pulled in the previous run, 1.0 will be used for all topics.
*/
private void readPrevAvgRecordMillis(SourceState state) {
Map<String, List<Double>> prevAvgMillis = Maps.newHashMap();
for (WorkUnitState workUnitState : state.getPreviousWorkUnitStates()) {
List<KafkaPartition> partitions = KafkaUtils.getPartitions(workUnitState);
for (KafkaPartition partition : partitions) {
if (KafkaUtils.containsPartitionAvgRecordMillis(workUnitState, partition)) {
double prevAvgMillisForPartition = KafkaUtils.getPartitionAvgRecordMillis(workUnitState, partition);
if (prevAvgMillis.containsKey(partition.getTopicName())) {
prevAvgMillis.get(partition.getTopicName()).add(prevAvgMillisForPartition);
} else {
prevAvgMillis.put(partition.getTopicName(), Lists.newArrayList(prevAvgMillisForPartition));
}
}
}
}
this.estAvgMillis.clear();
if (prevAvgMillis.isEmpty()) {
this.avgEstAvgMillis = 1.0;
} else {
List<Double> allEstAvgMillis = Lists.newArrayList();
for (Map.Entry<String, List<Double>> entry : prevAvgMillis.entrySet()) {
String topic = entry.getKey();
List<Double> prevAvgMillisForPartitions = entry.getValue();
// If a topic has k partitions, and in the previous run, each partition recorded its avg time to pull
// a record, then use the geometric mean of these k numbers as the estimated avg time to pull
// a record in this run.
double estAvgMillisForTopic = geometricMean(prevAvgMillisForPartitions);
this.estAvgMillis.put(topic, estAvgMillisForTopic);
LOG.info(String.format("Estimated avg time to pull a record for topic %s is %f milliseconds", topic,
estAvgMillisForTopic));
allEstAvgMillis.add(estAvgMillisForTopic);
}
// If a topic was not pulled in the previous run, use this.avgEstAvgMillis as the estimated avg time
// to pull a record in this run, which is the geometric mean of all topics whose avg times to pull
// a record in the previous run are known.
this.avgEstAvgMillis = geometricMean(allEstAvgMillis);
}
LOG.info("For all topics not pulled in the previous run, estimated avg time to pull a record is "
+ this.avgEstAvgMillis + " milliseconds");
}
}
| 3,310 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/workunit | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/workunit/packer/KafkaTopicGroupingWorkUnitPacker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka.workunit.packer;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.PriorityQueue;
import org.apache.gobblin.metrics.event.GobblinEventBuilder;
import org.apache.hadoop.fs.Path;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.gson.Gson;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.metrics.ContextAwareGauge;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.event.CountEventBuilder;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.runtime.CheckpointableWatermarkState;
import org.apache.gobblin.runtime.StateStoreBasedWatermarkStorage;
import org.apache.gobblin.source.extractor.extract.AbstractSource;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaPartition;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaSource;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaStreamingExtractor;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaUtils;
import org.apache.gobblin.source.workunit.MultiWorkUnit;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.io.GsonInterfaceAdapter;
import static org.apache.gobblin.source.extractor.extract.kafka.workunit.packer.KafkaBiLevelWorkUnitPacker.bestFitDecreasingBinPacking;
/**
*
* An implementation of {@link KafkaWorkUnitPacker} that used for streamlined Kafka ingestion, which:
*
* 1) Groups partitions of the same topic together. Multiple topics are never mixed in a base {@link WorkUnit},
* but may be mixed in a {@link MultiWorkUnit}
* 2) Don't assign offset range within WorkUnit but provides a list of partitions (or topics) to inform streaming
* {@link org.apache.gobblin.source.extractor.Extractor} of where to pull events from, behaves as an "index' for
* {@link org.apache.gobblin.source.extractor.Extractor}.
*
* It is then {@link org.apache.gobblin.source.extractor.Extractor}'s responsibility to interact with
* {@link org.apache.gobblin.writer.WatermarkStorage} on determining offset of each
* {@link org.apache.gobblin.source.extractor.extract.kafka.KafkaPartition} that it was assigned.
*/
@Slf4j
public class KafkaTopicGroupingWorkUnitPacker extends KafkaWorkUnitPacker {
public static final String GOBBLIN_KAFKA_PREFIX = "gobblin.kafka.";
public static final String DEFAULT_NUM_TOPIC_PARTITIONS_PER_CONTAINER_KEY = GOBBLIN_KAFKA_PREFIX + "default.num.topic.partitions.per.container";
private static final int DEFAULT_DEFAULT_NUM_TOPIC_PARTITIONS_PER_CONTAINER = 10;
//A global configuration for container capacity. The container capacity refers to the peak rate (in MB/s) that a
//single JVM can consume from Kafka for a single topic and controls the number of partitions of a topic that will be
// packed into a single workunit. For example, if the container capacity is set to 10, and each topic partition has a
// weight of 1, then 10 partitions of the topic will be packed into a single workunit. This configuration is topic-independent
// i.e. all topics will be assumed to have the same peak consumption rate when set.
public static final String CONTAINER_CAPACITY_KEY = GOBBLIN_KAFKA_PREFIX + "streaming.containerCapacity";
public static final double DEFAULT_CONTAINER_CAPACITY = 10;
// minimum container capacity to avoid bad topic schema causing us to request resources aggressively
public static final String MINIMUM_CONTAINER_CAPACITY = GOBBLIN_KAFKA_PREFIX + "streaming.minimum.containerCapacity";
public static final double DEFAULT_MINIMUM_CONTAINER_CAPACITY = 1;
public static final String TOPIC_PARTITION_WITH_LOW_CAPACITY_EVENT_NAME = "topicPartitionWithLowCapacity";
public static final String TOPIC_PARTITION = "topicPartition";
public static final String TOPIC_PARTITION_CAPACITY = "topicPartitionCapacity";
//A boolean flag to enable per-topic container capacity, where "container capacity" is as defined earlier. This
// configuration is useful in scenarios where the write performance can vary significantly across topics due to differences
// in schema, as in the case of columnar formats such as ORC and Parquet. When enabled, the bin packing algorithm uses
// historic consumption rates for a given topic as tracked by the ingestion pipeline.
public static final String IS_PER_TOPIC_CONTAINER_CAPACITY_ENABLED_KEY = GOBBLIN_KAFKA_PREFIX + "streaming.isPerTopicBinCapacityEnabled";
public static final Boolean DEFAULT_IS_PER_TOPIC_CONTAINER_CAPACITY_ENABLED = false;
//A topic-specific config that controls the minimum number of containers for that topic.
public static final String MIN_CONTAINERS_FOR_TOPIC = GOBBLIN_KAFKA_PREFIX + "minContainersForTopic";
public static final String PARTITION_WATERMARK = GOBBLIN_KAFKA_PREFIX + "partition.watermark";
public static final String PACKING_START_TIME_MILLIS = GOBBLIN_KAFKA_PREFIX + "packer.packingStartTimeMillis";
public static final String IS_STATS_BASED_PACKING_ENABLED_KEY = GOBBLIN_KAFKA_PREFIX + "streaming.isStatsBasedPackingEnabled";
public static final boolean DEFAULT_IS_STATS_BASED_PACKING_ENABLED = false;
public static final String CONTAINER_CAPACITY_COMPUTATION_STRATEGY_KEY =
GOBBLIN_KAFKA_PREFIX + "streaming.containerCapacityComputationStrategy";
public static final String DEFAULT_CONTAINER_CAPACITY_COMPUTATION_STRATEGY = ContainerCapacityComputationStrategy.MEDIAN.name();
public enum ContainerCapacityComputationStrategy {
MIN, MAX, MEAN, MEDIAN
}
private static final Gson GSON = GsonInterfaceAdapter.getGson(Object.class);
/**
* Configuration to enable indexing on packing.
*/
private static final String INDEXING_ENABLED = "gobblin.kafka.streaming.enableIndexing";
private static final boolean DEFAULT_INDEXING_ENABLED = true;
private static final String METRICS_PREFIX = "binpacker.metrics.";
/**
* When indexing-packing is enabled, the number of partitions is important for extractor to know
* how many kafka partitions need to be pulled from.
* Set to public-static to share with Extractor.
*/
public static final String NUM_PARTITIONS_ASSIGNED = "gobblin.kafka.streaming.numPartitions";
//A derived metric that defines the default workunit size, in case of workunit size cannot be estimated.
public static final String DEFAULT_WORKUNIT_SIZE_KEY = "gobblin.kafka.defaultWorkUnitSize";
//A lower bound for the workunit size.
public static final String MIN_WORKUNIT_SIZE_KEY = "gobblin.kafka.minWorkUnitSize";
private static final String NUM_CONTAINERS_EVENT_NAME = "NumContainers";
private final long packingStartTimeMillis;
private final double minimumContainerCapacity;
private final Optional<StateStoreBasedWatermarkStorage> watermarkStorage;
private final Optional<MetricContext> metricContext;
private final boolean isStatsBasedPackingEnabled;
private final Boolean isPerTopicContainerCapacityEnabled;
private final ContainerCapacityComputationStrategy containerCapacityComputationStrategy;
private final Map<String, KafkaStreamingExtractor.KafkaWatermark> lastCommittedWatermarks = Maps.newHashMap();
private final Map<String, List<Double>> capacitiesByTopic = Maps.newHashMap();
private final EventSubmitter eventSubmitter;
private SourceState state;
public KafkaTopicGroupingWorkUnitPacker(AbstractSource<?, ?> source, SourceState state,
Optional<MetricContext> metricContext) {
super(source, state);
this.state = state;
this.minimumContainerCapacity = state.getPropAsDouble(MINIMUM_CONTAINER_CAPACITY, DEFAULT_MINIMUM_CONTAINER_CAPACITY);
this.isStatsBasedPackingEnabled =
state.getPropAsBoolean(IS_STATS_BASED_PACKING_ENABLED_KEY, DEFAULT_IS_STATS_BASED_PACKING_ENABLED);
this.isPerTopicContainerCapacityEnabled = state
.getPropAsBoolean(IS_PER_TOPIC_CONTAINER_CAPACITY_ENABLED_KEY, DEFAULT_IS_PER_TOPIC_CONTAINER_CAPACITY_ENABLED);
this.containerCapacityComputationStrategy = ContainerCapacityComputationStrategy.valueOf(
state.getProp(CONTAINER_CAPACITY_COMPUTATION_STRATEGY_KEY, DEFAULT_CONTAINER_CAPACITY_COMPUTATION_STRATEGY)
.toUpperCase());
this.watermarkStorage =
Optional.fromNullable(this.isStatsBasedPackingEnabled ? new StateStoreBasedWatermarkStorage(state) : null);
this.packingStartTimeMillis = System.currentTimeMillis();
this.metricContext = metricContext;
this.eventSubmitter =
new EventSubmitter.Builder(this.metricContext, KafkaTopicGroupingWorkUnitPacker.class.getName()).build();
}
/**
* Pack using the following strategy.
* - Each container has a configured capacity in terms of the cost metric.
* This is configured by {@value CONTAINER_CAPACITY_KEY}.
* - For each topic pack the workunits into a set of topic specific buckets by filling the fullest bucket that can hold
* the workunit without exceeding the container capacity.
* - The topic specific multi-workunits are squeezed and returned as a workunit.
*
* @param numContainers desired number of containers, which will be the size of return value List<WorkUnit>. The actual
* num can be smaller or bigger depends on container capacity and total workUnit/partition number
*/
@Override
public List<WorkUnit> pack(Map<String, List<WorkUnit>> workUnitsByTopic, int numContainers) {
double containerCapacity = this.state.getPropAsDouble(CONTAINER_CAPACITY_KEY, DEFAULT_CONTAINER_CAPACITY);
if (this.watermarkStorage.isPresent()) {
try {
addStatsToWorkUnits(workUnitsByTopic);
} catch (IOException e) {
log.error("Unable to get stats from watermark storage.");
throw new RuntimeException(e);
}
}
setWorkUnitEstSizes(workUnitsByTopic);
List<MultiWorkUnit> mwuGroups = Lists.newArrayList();
for (Map.Entry<String, List<WorkUnit>> entry : workUnitsByTopic.entrySet()) {
String topic = entry.getKey();
List<WorkUnit> workUnitsForTopic = entry.getValue();
if (this.isStatsBasedPackingEnabled && this.isPerTopicContainerCapacityEnabled) {
containerCapacity = getContainerCapacityForTopic(capacitiesByTopic.get(topic), this.containerCapacityComputationStrategy);
log.info("Container capacity for topic {}: {}", topic, containerCapacity);
}
//Add CONTAINER_CAPACITY into each workunit. Useful when KafkaIngestionHealthCheck is enabled.
for (WorkUnit workUnit: workUnitsForTopic) {
//todo: check whether it's set already to respect the topic specific capacity from user input properties
workUnit.setProp(CONTAINER_CAPACITY_KEY, containerCapacity);
}
double estimatedDataSizeForTopic = calcTotalEstSizeForTopic(workUnitsForTopic);
int previousSize = mwuGroups.size();
if (estimatedDataSizeForTopic < containerCapacity) {
// If the total estimated size of a topic is then the container capacity then put all partitions of this
// topic in a single group.
MultiWorkUnit mwuGroup = MultiWorkUnit.createEmpty();
addWorkUnitsToMultiWorkUnit(workUnitsForTopic, mwuGroup);
mwuGroups.add(mwuGroup);
} else {
// Use best-fit-decreasing to group workunits for a topic into multiple groups.
mwuGroups.addAll(bestFitDecreasingBinPacking(workUnitsForTopic, containerCapacity));
}
int numContainersForTopic = mwuGroups.size() - previousSize;
log.info("Packed partitions for topic {} into {} containers", topic, Integer.toString(numContainersForTopic));
if (this.metricContext.isPresent()) {
//Report the number of containers used for each topic.
String metricName = METRICS_PREFIX + topic + ".numContainers";
ContextAwareGauge<Integer> gauge =
this.metricContext.get().newContextAwareGauge(metricName, () -> numContainersForTopic);
this.metricContext.get().register(metricName, gauge);
//Submit a container count event for the given topic
CountEventBuilder countEventBuilder = new CountEventBuilder(NUM_CONTAINERS_EVENT_NAME, numContainersForTopic);
countEventBuilder.addMetadata("topic", topic);
this.eventSubmitter.submit(countEventBuilder);
}
}
// If size of mwuGroups is smaller than numContainers, try to further split the multi WU to respect the container number requirement
if(mwuGroups.size() < numContainers) {
mwuGroups = splitMultiWorkUnits(mwuGroups, numContainers);
}
List<WorkUnit> squeezedGroups = squeezeMultiWorkUnits(mwuGroups);
log.debug("Squeezed work unit groups: " + squeezedGroups);
return squeezedGroups;
}
/**
* TODO: This method should be moved into {@link KafkaSource}, which requires moving classes such
* as {@link KafkaStreamingExtractor.KafkaWatermark} to the open source. A side-effect of this method is to
* populate a map (called "capacitiesByTopic") of topicName to the peak consumption rate observed
* by a JVM for a given topic. This capacity limits the number of partitions of a topic grouped into a single workunit.
* The capacity is computed from the historic peak consumption rates observed by different containers processing
* a given topic, using the configured {@link ContainerCapacityComputationStrategy}.
*
* Read the average produce rates for each topic partition from Watermark storage and add them to the workunit.
* @param workUnitsByTopic
* @throws IOException
*/
private void addStatsToWorkUnits(Map<String, List<WorkUnit>> workUnitsByTopic) throws IOException {
for (CheckpointableWatermarkState state : this.watermarkStorage.get().getAllCommittedWatermarks()) {
String topicPartition = state.getSource();
KafkaStreamingExtractor.KafkaWatermark watermark =
GSON.fromJson(state.getProp(topicPartition), KafkaStreamingExtractor.KafkaWatermark.class);
lastCommittedWatermarks.put(topicPartition, watermark);
if (this.isPerTopicContainerCapacityEnabled) {
String topicName = KafkaUtils.getTopicNameFromTopicPartition(topicPartition);
List<Double> capacities = capacitiesByTopic.getOrDefault(topicName, Lists.newArrayList());
double realCapacity = watermark.getAvgConsumeRate() > 0 ? watermark.getAvgConsumeRate() : DEFAULT_CONTAINER_CAPACITY;
if (realCapacity < minimumContainerCapacity) {
if (this.metricContext.isPresent()) {
GobblinEventBuilder event = new GobblinEventBuilder(TOPIC_PARTITION_WITH_LOW_CAPACITY_EVENT_NAME);
event.addMetadata(TOPIC_PARTITION, topicPartition);
event.addMetadata(TOPIC_PARTITION_CAPACITY, String.valueOf(realCapacity));
this.eventSubmitter.submit(event);
}
log.warn(String.format("topicPartition %s has lower capacity %s, ignore that and reset capacity to be %s", topicPartition, realCapacity, minimumContainerCapacity));
realCapacity = minimumContainerCapacity;
}
capacities.add(realCapacity);
capacitiesByTopic.put(topicName, capacities);
}
}
for (Map.Entry<String, List<WorkUnit>> entry : workUnitsByTopic.entrySet()) {
String topic = entry.getKey();
List<WorkUnit> workUnits = entry.getValue();
for (WorkUnit workUnit : workUnits) {
int partitionId = Integer.parseInt(workUnit.getProp(KafkaSource.PARTITION_ID));
String topicPartition = new KafkaPartition.Builder().withTopicName(topic).withId(partitionId).build().toString();
KafkaStreamingExtractor.KafkaWatermark watermark = lastCommittedWatermarks.get(topicPartition);
workUnit.setProp(PARTITION_WATERMARK, GSON.toJson(watermark));
workUnit.setProp(PACKING_START_TIME_MILLIS, this.packingStartTimeMillis);
workUnit.setProp(DEFAULT_WORKUNIT_SIZE_KEY, getDefaultWorkUnitSize());
workUnit.setProp(MIN_WORKUNIT_SIZE_KEY, getMinWorkUnitSize(workUnit));
// avgRecordSize is unknown when bootstrapping. so skipping setting this
// and ORC writer will use the default setting for the tunning feature.
if (watermark != null && watermark.getAvgRecordSize() > 0) {
workUnit.setProp(ConfigurationKeys.AVG_RECORD_SIZE, watermark.getAvgRecordSize());
}
}
}
}
private Double getDefaultWorkUnitSize() {
return state.getPropAsDouble(KafkaTopicGroupingWorkUnitPacker.CONTAINER_CAPACITY_KEY,
KafkaTopicGroupingWorkUnitPacker.DEFAULT_CONTAINER_CAPACITY) / state.getPropAsDouble(DEFAULT_NUM_TOPIC_PARTITIONS_PER_CONTAINER_KEY, DEFAULT_DEFAULT_NUM_TOPIC_PARTITIONS_PER_CONTAINER);
}
/**
* A helper method that configures the minimum workunit size for each topic partition based on the lower bound of
* the number of containers to be used for the topic.
* @param workUnit
* @return the minimum workunit size.
*/
private Double getMinWorkUnitSize(WorkUnit workUnit) {
int minContainersForTopic = Math.min(workUnit.getPropAsInt(MIN_CONTAINERS_FOR_TOPIC, -1),
workUnit.getPropAsInt(KafkaSource.NUM_TOPIC_PARTITIONS));
if (minContainersForTopic == -1) {
//No minimum configured? Return lower bound for workunit size to be 0.
return 0.0;
}
//Compute the maximum number of partitions to be packed into each container.
int maxNumPartitionsPerContainer = workUnit.getPropAsInt(KafkaSource.NUM_TOPIC_PARTITIONS) / minContainersForTopic;
return state.getPropAsDouble(KafkaTopicGroupingWorkUnitPacker.CONTAINER_CAPACITY_KEY,
KafkaTopicGroupingWorkUnitPacker.DEFAULT_CONTAINER_CAPACITY) / maxNumPartitionsPerContainer;
}
/**
* Indexing all partitions that will be handled in this topic-specific MWU into a single WU by only tracking their
* topic/partition id.
* Indexed WU will not have offset assigned to pull for each partitions
*/
@Override
protected List<WorkUnit> squeezeMultiWorkUnits(List<MultiWorkUnit> multiWorkUnits) {
if (state.getPropAsBoolean(INDEXING_ENABLED, DEFAULT_INDEXING_ENABLED)) {
List<WorkUnit> indexedWorkUnitList = new ArrayList<>();
// id to append to the task output directory to make it unique to avoid multiple flush publishers
// attempting to move the same file.
int uniqueId = 0;
for (MultiWorkUnit mwu : multiWorkUnits) {
// Select a sample WU.
WorkUnit indexedWorkUnit = mwu.getWorkUnits().get(0);
List<KafkaPartition> topicPartitions = getPartitionsFromMultiWorkUnit(mwu);
// Indexing all topics/partitions into this WU.
populateMultiPartitionWorkUnit(topicPartitions, indexedWorkUnit);
// Adding Number of Partitions as part of WorkUnit so that Extractor has clue on how many iterations to run.
indexedWorkUnit.setProp(NUM_PARTITIONS_ASSIGNED, topicPartitions.size());
// Need to make the task output directory unique to file move conflicts in the flush publisher.
String outputDir = state.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR);
indexedWorkUnit.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, new Path(outputDir, Integer.toString(uniqueId++)));
indexedWorkUnitList.add(indexedWorkUnit);
}
return indexedWorkUnitList;
} else {
return super.squeezeMultiWorkUnits(multiWorkUnits);
}
}
/**
* A method that returns the container capacity for a given topic given the
* @param capacities measured container capacities derived from watermarks
* @param strategy the algorithm to derive the container capacity from prior measurements
* @return the container capacity obtained using the given {@link ContainerCapacityComputationStrategy}.
*/
@VisibleForTesting
static double getContainerCapacityForTopic(List<Double> capacities, ContainerCapacityComputationStrategy strategy) {
//No previous stats for a topic? Return default.
if (capacities == null) {
return DEFAULT_CONTAINER_CAPACITY;
}
Collections.sort(capacities);
log.info("Capacity computation strategy: {}, capacities: {}", strategy.name(), capacities);
switch (strategy) {
case MIN:
return capacities.get(0);
case MAX:
return capacities.get(capacities.size() - 1);
case MEAN:
return (capacities.stream().mapToDouble(capacity -> capacity).sum()) / capacities.size();
case MEDIAN:
return ((capacities.size() % 2) == 0) ? (
(capacities.get(capacities.size() / 2) + capacities.get(capacities.size() / 2 - 1)) / 2)
: capacities.get(capacities.size() / 2);
default:
throw new RuntimeException("Unsupported computation strategy: " + strategy.name());
}
}
/**
* A method that split a list of {@link MultiWorkUnit} to the size of desiredWUSize if possible. The approach is to try
* to evenly split the {@link WorkUnit} within MWU into two, and always try to split MWU with more partitions first.
* Stop when each {@link MultiWorkUnit} only contains single {@link WorkUnit} as further split is no possible.
* @param multiWorkUnits the list of {@link MultiWorkUnit} to be split
* @param desiredWUSize desired number of {@link MultiWorkUnit}
* @return splitted MultiWorkUnit
*/
public static List<MultiWorkUnit> splitMultiWorkUnits(List<MultiWorkUnit> multiWorkUnits, int desiredWUSize) {
PriorityQueue<MultiWorkUnit> pQueue = new PriorityQueue<>(
Comparator.comparing(mwu -> mwu.getWorkUnits().size(), Comparator.reverseOrder()));
pQueue.addAll(multiWorkUnits);
while(pQueue.size() < desiredWUSize) {
MultiWorkUnit mwu = pQueue.poll();
int size = mwu.getWorkUnits().size();
// If the size is smaller than 2, meaning each mwu only contains a single wu and can't be further split.
// Add back the polled element and stop the loop.
if(size < 2) {
pQueue.add(mwu);
break;
}
// Split the mwu into 2 with evenly distributed wu
pQueue.add(MultiWorkUnit.createMultiWorkUnit(mwu.getWorkUnits().subList(0, size/2)));
pQueue.add(MultiWorkUnit.createMultiWorkUnit(mwu.getWorkUnits().subList(size/2, size)));
}
log.info("Min size of the container is set to {}, successfully split the multi workunit to {}", desiredWUSize, pQueue.size());
// If size is the same, meaning no split can be done. Return the original list to avoid construct a new list
if(multiWorkUnits.size() == pQueue.size()) {
return multiWorkUnits;
}
return new ArrayList<>(pQueue);
}
} | 3,311 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/workunit | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/workunit/packer/UnitKafkaWorkUnitSizeEstimator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka.workunit.packer;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* A dummy implementation of {@link KafkaWorkUnitSizeEstimator} that directly returns unit("1") for
* {@link #calcEstimatedSize(WorkUnit)} methods as the estimated size for each {@link WorkUnit}
* could be useless in certain circumstances.
*/
public class UnitKafkaWorkUnitSizeEstimator implements KafkaWorkUnitSizeEstimator {
public UnitKafkaWorkUnitSizeEstimator(SourceState state) {
// do nothing
}
@Override
public double calcEstimatedSize(WorkUnit workUnit) {
return 1;
}
} | 3,312 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/workunit | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/workunit/packer/KafkaAvgRecordSizeBasedWorkUnitSizeEstimator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka.workunit.packer;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Maps;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaPartition;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaUtils;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* An implementation of {@link KafkaWorkUnitSizeEstimator} which uses the average record size of each partition to
* estimate the sizes of {@link WorkUnits}.
*
* Each partition pulled in the previous run should have an avg record size in its {@link WorkUnitState}. In the
* next run, for each partition the avg record size pulled in the previous run is considered the avg record size
* to be pulled in this run.
*
* If a partition was not pulled in the previous run, a default value of 1024 is used.
*
* @author Ziyang Liu
*/
public class KafkaAvgRecordSizeBasedWorkUnitSizeEstimator implements KafkaWorkUnitSizeEstimator {
private static final Logger LOG = LoggerFactory.getLogger(KafkaAvgRecordSizeBasedWorkUnitSizeEstimator.class);
private static final long DEFAULT_AVG_RECORD_SIZE = 1024;
private final Map<KafkaPartition, Long> estAvgSizes = Maps.newHashMap();
public KafkaAvgRecordSizeBasedWorkUnitSizeEstimator(SourceState state) {
readPreAvgRecordSizes(state);
}
@Override
public double calcEstimatedSize(WorkUnit workUnit) {
long avgSize = this.getEstAvgSizeForPartition(KafkaUtils.getPartition(workUnit));
long numOfRecords = workUnit.getPropAsLong(ConfigurationKeys.WORK_UNIT_HIGH_WATER_MARK_KEY)
- workUnit.getPropAsLong(ConfigurationKeys.WORK_UNIT_LOW_WATER_MARK_KEY);
return (double) avgSize * numOfRecords;
}
private long getEstAvgSizeForPartition(KafkaPartition partition) {
if (this.estAvgSizes.containsKey(partition)) {
LOG.info(String.format("Estimated avg record size for partition %s is %d", partition,
this.estAvgSizes.get(partition)));
return this.estAvgSizes.get(partition);
}
LOG.warn(String.format("Avg record size for partition %s not available, using default size %d", partition,
DEFAULT_AVG_RECORD_SIZE));
return DEFAULT_AVG_RECORD_SIZE;
}
private void readPreAvgRecordSizes(SourceState state) {
this.estAvgSizes.clear();
for (WorkUnitState workUnitState : state.getPreviousWorkUnitStates()) {
List<KafkaPartition> partitions = KafkaUtils.getPartitions(workUnitState);
for (KafkaPartition partition : partitions) {
if (KafkaUtils.containsPartitionAvgRecordSize(workUnitState, partition)) {
long previousAvgSize = KafkaUtils.getPartitionAvgRecordSize(workUnitState, partition);
this.estAvgSizes.put(partition, previousAvgSize);
}
}
}
}
}
| 3,313 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/workunit | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/workunit/packer/KafkaWorkUnitPacker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka.workunit.packer;
import java.util.Collections;
import java.util.Comparator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Enums;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.MinMaxPriorityQueue;
import com.google.common.primitives.Doubles;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.source.extractor.WatermarkInterval;
import org.apache.gobblin.source.extractor.extract.AbstractSource;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaPartition;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaSource;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaUtils;
import org.apache.gobblin.source.extractor.extract.kafka.MultiLongWatermark;
import org.apache.gobblin.source.workunit.MultiWorkUnit;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* An abstract class for packing Kafka {@link WorkUnit}s into {@link MultiWorkUnit}s
* based on the number of containers.
*
* @author Ziyang Liu
*/
public abstract class KafkaWorkUnitPacker {
private static final Logger LOG = LoggerFactory.getLogger(KafkaWorkUnitPacker.class);
/**
* For customized type of the following enums, it will try to find declared class in classpath
* and fallback to exception if ClassNotFound. This way the sizeEstimator and packer could be easier to
* extend. The major purpose for keeping this enum instead of using reflection only to construct the instance
* of packer or sizeEstimator is to maintain backward-compatibility.
*
* The constructor of customized type needs to be annotated with public access-modifier as it is instantiated by
* {@link GobblinConstructorUtils} which reside in different package, and it needs to have the same signature
* as other implementation under the enum.
*/
public enum PackerType {
SINGLE_LEVEL,
BI_LEVEL,
CUSTOM
}
public enum SizeEstimatorType {
AVG_RECORD_TIME,
AVG_RECORD_SIZE, CUSTOM
}
public static final String KAFKA_WORKUNIT_PACKER_TYPE = "kafka.workunit.packer.type";
public static final String KAFKA_WORKUNIT_PACKER_CUSTOMIZED_TYPE = "kafka.workunit.packer.customizedType";
private static final PackerType DEFAULT_PACKER_TYPE = PackerType.SINGLE_LEVEL;
public static final String KAFKA_WORKUNIT_SIZE_ESTIMATOR_TYPE = "kafka.workunit.size.estimator.type";
public static final String KAFKA_WORKUNIT_SIZE_ESTIMATOR_CUSTOMIZED_TYPE = "kafka.workunit.size.estimator.customizedType";
private static final SizeEstimatorType DEFAULT_SIZE_ESTIMATOR_TYPE = SizeEstimatorType.AVG_RECORD_TIME;
protected static final double EPS = 0.01;
public static final String MIN_MULTIWORKUNIT_LOAD = "min.multiworkunit.load";
public static final String MAX_MULTIWORKUNIT_LOAD = "max.multiworkunit.load";
private static final String ESTIMATED_WORKUNIT_SIZE = "estimated.workunit.size";
protected final AbstractSource<?, ?> source;
protected final SourceState state;
protected final KafkaWorkUnitSizeEstimator sizeEstimator;
protected KafkaWorkUnitPacker(AbstractSource<?, ?> source, SourceState state) {
this.source = source;
this.state = state;
this.sizeEstimator = getWorkUnitSizeEstimator();
}
protected static final Comparator<WorkUnit> LOAD_ASC_COMPARATOR = new Comparator<WorkUnit>() {
@Override
public int compare(WorkUnit w1, WorkUnit w2) {
return Doubles.compare(getWorkUnitEstLoad(w1), getWorkUnitEstLoad(w2));
}
};
protected static final Comparator<WorkUnit> LOAD_DESC_COMPARATOR = new Comparator<WorkUnit>() {
@Override
public int compare(WorkUnit w1, WorkUnit w2) {
return Doubles.compare(getWorkUnitEstLoad(w2), getWorkUnitEstLoad(w1));
}
};
private void setWorkUnitEstSize(WorkUnit workUnit) {
workUnit.setProp(ESTIMATED_WORKUNIT_SIZE, this.sizeEstimator.calcEstimatedSize(workUnit));
}
// Setting to package-private for unit-testing purpose.
KafkaWorkUnitSizeEstimator getWorkUnitSizeEstimator() {
if (this.state.contains(KAFKA_WORKUNIT_SIZE_ESTIMATOR_TYPE)) {
String sizeEstimatorTypeString = this.state.getProp(KAFKA_WORKUNIT_SIZE_ESTIMATOR_TYPE);
Optional<SizeEstimatorType> sizeEstimatorType =
Enums.getIfPresent(SizeEstimatorType.class, sizeEstimatorTypeString);
if (sizeEstimatorType.isPresent()) {
return getWorkUnitSizeEstimator(sizeEstimatorType.get());
}
throw new IllegalArgumentException("WorkUnit size estimator type " + sizeEstimatorType + " not found");
}
return getWorkUnitSizeEstimator(DEFAULT_SIZE_ESTIMATOR_TYPE);
}
private KafkaWorkUnitSizeEstimator getWorkUnitSizeEstimator(SizeEstimatorType sizeEstimatorType) {
switch (sizeEstimatorType) {
case AVG_RECORD_TIME:
return new KafkaAvgRecordTimeBasedWorkUnitSizeEstimator(this.state);
case AVG_RECORD_SIZE:
return new KafkaAvgRecordSizeBasedWorkUnitSizeEstimator(this.state);
case CUSTOM:
Preconditions.checkArgument(this.state.contains(KAFKA_WORKUNIT_SIZE_ESTIMATOR_CUSTOMIZED_TYPE));
String className = this.state.getProp(KAFKA_WORKUNIT_SIZE_ESTIMATOR_CUSTOMIZED_TYPE);
return GobblinConstructorUtils.invokeConstructor(KafkaWorkUnitSizeEstimator.class, className, this.state);
default:
throw new IllegalArgumentException("WorkUnit size estimator type " + sizeEstimatorType + " not found");
}
}
private static void setWorkUnitEstSize(WorkUnit workUnit, double estSize) {
workUnit.setProp(ESTIMATED_WORKUNIT_SIZE, estSize);
}
/**
* Calculate estimated size for a topic from all {@link WorkUnit}s belong to it.
*/
static double calcTotalEstSizeForTopic(List<WorkUnit> workUnitsForTopic) {
double totalSize = 0;
for (WorkUnit w : workUnitsForTopic) {
totalSize += getWorkUnitEstSize(w);
}
return totalSize;
}
protected static double getWorkUnitEstSize(WorkUnit workUnit) {
Preconditions.checkArgument(workUnit.contains(ESTIMATED_WORKUNIT_SIZE));
return workUnit.getPropAsDouble(ESTIMATED_WORKUNIT_SIZE);
}
protected static double getWorkUnitEstLoad(WorkUnit workUnit) {
if (workUnit instanceof MultiWorkUnit) {
MultiWorkUnit mwu = (MultiWorkUnit) workUnit;
return Math.max(getWorkUnitEstSize(workUnit), EPS) * Math.log10(Math.max(mwu.getWorkUnits().size(), 2));
}
return Math.max(getWorkUnitEstSize(workUnit), EPS) * Math.log10(2.0);
}
protected static void addWorkUnitToMultiWorkUnit(WorkUnit workUnit, MultiWorkUnit multiWorkUnit) {
multiWorkUnit.addWorkUnit(workUnit);
double size = multiWorkUnit.getPropAsDouble(ESTIMATED_WORKUNIT_SIZE, 0.0);
multiWorkUnit.setProp(ESTIMATED_WORKUNIT_SIZE, size + getWorkUnitEstSize(workUnit));
}
protected static void addWorkUnitsToMultiWorkUnit(List<WorkUnit> workUnits, MultiWorkUnit multiWorkUnit) {
for (WorkUnit workUnit : workUnits) {
addWorkUnitToMultiWorkUnit(workUnit, multiWorkUnit);
}
}
@SuppressWarnings("deprecation")
protected static WatermarkInterval getWatermarkIntervalFromWorkUnit(WorkUnit workUnit) {
if (workUnit instanceof MultiWorkUnit) {
return getWatermarkIntervalFromMultiWorkUnit((MultiWorkUnit) workUnit);
}
List<Long> lowWatermarkValues = Lists.newArrayList(workUnit.getLowWaterMark());
List<Long> expectedHighWatermarkValues = Lists.newArrayList(workUnit.getHighWaterMark());
return new WatermarkInterval(new MultiLongWatermark(lowWatermarkValues),
new MultiLongWatermark(expectedHighWatermarkValues));
}
@SuppressWarnings("deprecation")
protected static WatermarkInterval getWatermarkIntervalFromMultiWorkUnit(MultiWorkUnit multiWorkUnit) {
List<Long> lowWatermarkValues = Lists.newArrayList();
List<Long> expectedHighWatermarkValues = Lists.newArrayList();
for (WorkUnit workUnit : multiWorkUnit.getWorkUnits()) {
lowWatermarkValues.add(workUnit.getLowWaterMark());
expectedHighWatermarkValues.add(workUnit.getHighWaterMark());
}
return new WatermarkInterval(new MultiLongWatermark(lowWatermarkValues),
new MultiLongWatermark(expectedHighWatermarkValues));
}
/**
* For each input {@link MultiWorkUnit}, combine all {@link WorkUnit}s in it into a single {@link WorkUnit}.
*/
protected List<WorkUnit> squeezeMultiWorkUnits(List<MultiWorkUnit> multiWorkUnits) {
List<WorkUnit> workUnits = Lists.newArrayList();
for (MultiWorkUnit multiWorkUnit : multiWorkUnits) {
workUnits.add(squeezeMultiWorkUnit(multiWorkUnit));
}
return workUnits;
}
/**
* Combine all {@link WorkUnit}s in the {@link MultiWorkUnit} into a single {@link WorkUnit}.
*/
protected WorkUnit squeezeMultiWorkUnit(MultiWorkUnit multiWorkUnit) {
WatermarkInterval interval = getWatermarkIntervalFromMultiWorkUnit(multiWorkUnit);
List<KafkaPartition> partitions = getPartitionsFromMultiWorkUnit(multiWorkUnit);
Preconditions.checkArgument(!partitions.isEmpty(), "There must be at least one partition in the multiWorkUnit");
// Squeeze all partitions from the multiWorkUnit into of one the work units, which can be any one
WorkUnit workUnit = multiWorkUnit.getWorkUnits().get(0);
// Update interval
workUnit.removeProp(ConfigurationKeys.WORK_UNIT_LOW_WATER_MARK_KEY);
workUnit.removeProp(ConfigurationKeys.WORK_UNIT_HIGH_WATER_MARK_KEY);
workUnit.setWatermarkInterval(interval);
// Update offset fetch epoch time and previous latest offset. These are used to compute the load factor,
// gobblin consumption rate relative to the kafka production rate. The kafka rate is computed as
// (current latest offset - previous latest offset)/(current epoch time - previous epoch time).
int index = 0;
for (WorkUnit wu : multiWorkUnit.getWorkUnits()) {
workUnit.setProp(KafkaUtils.getPartitionPropName(KafkaSource.PREVIOUS_START_FETCH_EPOCH_TIME, index),
wu.getProp(KafkaSource.PREVIOUS_START_FETCH_EPOCH_TIME));
workUnit.setProp(KafkaUtils.getPartitionPropName(KafkaSource.PREVIOUS_STOP_FETCH_EPOCH_TIME, index),
wu.getProp(KafkaSource.PREVIOUS_STOP_FETCH_EPOCH_TIME));
workUnit.setProp(KafkaUtils.getPartitionPropName(KafkaSource.PREVIOUS_LOW_WATERMARK, index),
wu.getProp(KafkaSource.PREVIOUS_LOW_WATERMARK));
workUnit.setProp(KafkaUtils.getPartitionPropName(KafkaSource.PREVIOUS_HIGH_WATERMARK, index),
wu.getProp(KafkaSource.PREVIOUS_HIGH_WATERMARK));
workUnit.setProp(KafkaUtils.getPartitionPropName(KafkaSource.PREVIOUS_OFFSET_FETCH_EPOCH_TIME, index),
wu.getProp(KafkaSource.PREVIOUS_OFFSET_FETCH_EPOCH_TIME));
workUnit.setProp(KafkaUtils.getPartitionPropName(KafkaSource.OFFSET_FETCH_EPOCH_TIME, index),
wu.getProp(KafkaSource.OFFSET_FETCH_EPOCH_TIME));
workUnit.setProp(KafkaUtils.getPartitionPropName(KafkaSource.PREVIOUS_LATEST_OFFSET, index),
wu.getProp(KafkaSource.PREVIOUS_LATEST_OFFSET));
index++;
}
workUnit.removeProp(KafkaSource.PREVIOUS_START_FETCH_EPOCH_TIME);
workUnit.removeProp(KafkaSource.PREVIOUS_STOP_FETCH_EPOCH_TIME);
workUnit.removeProp(KafkaSource.PREVIOUS_LOW_WATERMARK);
workUnit.removeProp(KafkaSource.PREVIOUS_HIGH_WATERMARK);
workUnit.removeProp(KafkaSource.PREVIOUS_OFFSET_FETCH_EPOCH_TIME);
workUnit.removeProp(KafkaSource.OFFSET_FETCH_EPOCH_TIME);
workUnit.removeProp(KafkaSource.PREVIOUS_LATEST_OFFSET);
// Remove the original partition information
workUnit.removeProp(KafkaSource.PARTITION_ID);
workUnit.removeProp(KafkaSource.LEADER_ID);
workUnit.removeProp(KafkaSource.LEADER_HOSTANDPORT);
// Add combined partitions information
populateMultiPartitionWorkUnit(partitions, workUnit);
LOG.info(String.format("Created MultiWorkUnit for partitions %s", partitions));
return workUnit;
}
/**
* Add a list of partitions of the same topic to a {@link WorkUnit}.
*/
static void populateMultiPartitionWorkUnit(List<KafkaPartition> partitions, WorkUnit workUnit) {
Preconditions.checkArgument(!partitions.isEmpty(), "There should be at least one partition");
GobblinMetrics.addCustomTagToState(workUnit, new Tag<>("kafkaTopic", partitions.get(0).getTopicName()));
for (int i = 0; i < partitions.size(); i++) {
workUnit.setProp(KafkaUtils.getPartitionPropName(KafkaSource.PARTITION_ID, i), partitions.get(i).getId());
workUnit.setProp(KafkaUtils.getPartitionPropName(KafkaSource.LEADER_ID, i),
partitions.get(i).getLeader().getId());
workUnit.setProp(KafkaUtils.getPartitionPropName(KafkaSource.LEADER_HOSTANDPORT, i),
partitions.get(i).getLeader().getHostAndPort());
}
}
static List<KafkaPartition> getPartitionsFromMultiWorkUnit(MultiWorkUnit multiWorkUnit) {
List<KafkaPartition> partitions = Lists.newArrayList();
for (WorkUnit workUnit : multiWorkUnit.getWorkUnits()) {
partitions.add(KafkaUtils.getPartition(workUnit));
}
return partitions;
}
/**
* Pack a list of {@link WorkUnit}s into a smaller number of {@link MultiWorkUnit}s,
* using the worst-fit-decreasing algorithm.
*
* Each {@link WorkUnit} is assigned to the {@link MultiWorkUnit} with the smallest load.
*/
protected List<WorkUnit> worstFitDecreasingBinPacking(List<WorkUnit> groups, int numOfMultiWorkUnits) {
// Sort workunit groups by data size desc
Collections.sort(groups, LOAD_DESC_COMPARATOR);
MinMaxPriorityQueue<MultiWorkUnit> pQueue =
MinMaxPriorityQueue.orderedBy(LOAD_ASC_COMPARATOR).expectedSize(numOfMultiWorkUnits).create();
for (int i = 0; i < numOfMultiWorkUnits; i++) {
MultiWorkUnit multiWorkUnit = MultiWorkUnit.createEmpty();
setWorkUnitEstSize(multiWorkUnit, 0);
pQueue.add(multiWorkUnit);
}
for (WorkUnit group : groups) {
MultiWorkUnit lightestMultiWorkUnit = pQueue.poll();
addWorkUnitToMultiWorkUnit(group, lightestMultiWorkUnit);
pQueue.add(lightestMultiWorkUnit);
}
LinkedList<MultiWorkUnit> pQueue_filtered = new LinkedList();
while(!pQueue.isEmpty()) {
MultiWorkUnit multiWorkUnit = pQueue.poll();
if(multiWorkUnit.getWorkUnits().size() != 0) {
pQueue_filtered.offer(multiWorkUnit);
}
}
if (pQueue_filtered.isEmpty()) {
return Lists.newArrayList();
}
logMultiWorkUnitInfo(pQueue_filtered);
double minLoad = getWorkUnitEstLoad(pQueue_filtered.peekFirst());
double maxLoad = getWorkUnitEstLoad(pQueue_filtered.peekLast());
LOG.info(String.format("Min load of multiWorkUnit = %f; Max load of multiWorkUnit = %f; Diff = %f%%", minLoad,
maxLoad, (maxLoad - minLoad) / maxLoad * 100.0));
this.state.setProp(MIN_MULTIWORKUNIT_LOAD, minLoad);
this.state.setProp(MAX_MULTIWORKUNIT_LOAD, maxLoad);
List<WorkUnit> multiWorkUnits = Lists.newArrayList();
multiWorkUnits.addAll(pQueue_filtered);
return multiWorkUnits;
}
private static void logMultiWorkUnitInfo(Iterable<MultiWorkUnit> mwus) {
int idx = 0;
for (MultiWorkUnit mwu : mwus) {
LOG.info(String.format("MultiWorkUnit %d: estimated load=%f, partitions=%s", idx++, getWorkUnitEstLoad(mwu),
getMultiWorkUnitPartitions(mwu)));
}
}
protected static List<List<KafkaPartition>> getMultiWorkUnitPartitions(MultiWorkUnit mwu) {
List<List<KafkaPartition>> partitions = Lists.newArrayList();
for (WorkUnit workUnit : mwu.getWorkUnits()) {
partitions.add(KafkaUtils.getPartitions(workUnit));
}
return partitions;
}
public static KafkaWorkUnitPacker getInstance(AbstractSource<?, ?> source, SourceState state) {
return getInstance(source, state, Optional.absent());
}
public static KafkaWorkUnitPacker getInstance(AbstractSource<?, ?> source, SourceState state,
Optional<MetricContext> metricContext) {
if (state.contains(KAFKA_WORKUNIT_PACKER_TYPE)) {
String packerTypeStr = state.getProp(KAFKA_WORKUNIT_PACKER_TYPE);
Optional<PackerType> packerType = Enums.getIfPresent(PackerType.class, packerTypeStr);
if (packerType.isPresent()) {
return getInstance(packerType.get(), source, state, metricContext);
}
throw new IllegalArgumentException("WorkUnit packer type " + packerTypeStr + " not found");
}
return getInstance(DEFAULT_PACKER_TYPE, source, state, metricContext);
}
public static KafkaWorkUnitPacker getInstance(PackerType packerType, AbstractSource<?, ?> source, SourceState state, Optional<MetricContext> metricContext) {
switch (packerType) {
case SINGLE_LEVEL:
return new KafkaSingleLevelWorkUnitPacker(source, state);
case BI_LEVEL:
return new KafkaBiLevelWorkUnitPacker(source, state);
case CUSTOM:
Preconditions.checkArgument(state.contains(KAFKA_WORKUNIT_PACKER_CUSTOMIZED_TYPE));
String className = state.getProp(KAFKA_WORKUNIT_PACKER_CUSTOMIZED_TYPE);
try {
return (KafkaWorkUnitPacker) GobblinConstructorUtils.invokeLongestConstructor(Class.forName(className), source, state, metricContext);
} catch (ReflectiveOperationException e) {
throw new RuntimeException(e);
}
default:
throw new IllegalArgumentException("WorkUnit packer type " + packerType + " not found");
}
}
/**
* Calculate the total size of the workUnits and set the estimated size for each workUnit
* @param workUnitsByTopic
* @return the total size of the input workUnits
*/
public double setWorkUnitEstSizes(Map<String, List<WorkUnit>> workUnitsByTopic) {
double totalEstDataSize = 0;
for (List<WorkUnit> workUnitsForTopic : workUnitsByTopic.values()) {
for (WorkUnit workUnit : workUnitsForTopic) {
setWorkUnitEstSize(workUnit);
totalEstDataSize += getWorkUnitEstSize(workUnit);
}
}
return totalEstDataSize;
}
/**
* Group {@link WorkUnit}s into {@link MultiWorkUnit}s. Each input {@link WorkUnit} corresponds to
* a (topic, partition).
*/
public abstract List<WorkUnit> pack(Map<String, List<WorkUnit>> workUnitsByTopic, int numContainers);
}
| 3,314 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/workunit | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/workunit/packer/ProduceRateAndLagBasedWorkUnitSizeEstimator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka.workunit.packer;
import java.util.Date;
import com.google.gson.Gson;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaProduceRateTracker;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaSource;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaStreamingExtractor;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.io.GsonInterfaceAdapter;
/**
* A {@link KafkaWorkUnitSizeEstimator} that uses historic produce rates of Kafka TopicPartitions and the current lag
* to determine the Workunit size. The inputs to the WorkUnitSizeEstimator are the following:
* <ul>
* <li> Current Lag in number of records (L) </li>
* <li> Average record size (R) </li>
* <li> Historic produce rates by hour-of-day and day-of-week (P) </li>
* <li> Target SLA to achieve zero lag (SLA) </li>
* </ul>
* Based on the current lag, historic produce rate for the Kafka TopicPartition, and
* a target SLA, we estimate the minimum consume rate (C) required to meet the target SLA using the following formula:
* C = (L * R)/SLA + P.
* To allow headroom for week-over-week and intra-hour variances, we scale the historic produce rates by
* an over-provisioning factor O. The formula is then modified to:
* C = (L * R)/SLA + (P * O).
* The calculated consumption rate C is returned as the estimated workunit size. Note that the estimated workunit size may exceed the
* container capacity. The bin packer is assumed to create a new bin containing only this workunit.
*
* Assumptions:
* <ul>
* <li>The container capacity is assumed to be defined in MB/s</li>
* <li>The topic partition produce rates are assumed to be tracked in bytes/s</li>
* </ul>
*/
@Slf4j
public class ProduceRateAndLagBasedWorkUnitSizeEstimator implements KafkaWorkUnitSizeEstimator {
public static final String CATCHUP_SLA_IN_HOURS_KEY = "gobblin.kafka.catchUpSlaInHours";
private static final int DEFAULT_CATCHUP_SLA_IN_HOURS = 1;
//The interval at which workunits are re-calculated.
public static final String REPLANNING_INTERVAL_IN_HOURS_KEY = "gobblin.kafka.replanningIntervalInHours";
//Set default mode to disable time-based replanning i.e. set replanningIntervalInHours to its maximum value.
// In this case, the work unit weight estimation will select the maximum produce rate for the topic partition across
// all hours and days of week.
private static final int DEFAULT_KAFKA_REPLANNING_INTERVAL = 168; //24 * 7
//An over-provisioning factor to provide head room to allow variances in traffic e.g. sub-hour rate variances, week-over-week
// traffic variances etc. This config has the effect of multiplying the historic rate by this factor.
public static final String PRODUCE_RATE_SCALING_FACTOR_KEY = "gobblin.kafka.produceRateScalingFactor";
private static final Double DEFAULT_PRODUCE_RATE_SCALING_FACTOR = 1.3;
public static final int ONE_MEGA_BYTE = 1048576;
private static final Gson GSON = GsonInterfaceAdapter.getGson(Object.class);
private final long catchUpSlaInHours;
private final long replanIntervalInHours;
private final double produceRateScalingFactor;
public ProduceRateAndLagBasedWorkUnitSizeEstimator(SourceState state) {
this.catchUpSlaInHours = state.getPropAsLong(CATCHUP_SLA_IN_HOURS_KEY, DEFAULT_CATCHUP_SLA_IN_HOURS);
this.replanIntervalInHours =
state.getPropAsLong(REPLANNING_INTERVAL_IN_HOURS_KEY, DEFAULT_KAFKA_REPLANNING_INTERVAL);
this.produceRateScalingFactor =
state.getPropAsDouble(PRODUCE_RATE_SCALING_FACTOR_KEY, DEFAULT_PRODUCE_RATE_SCALING_FACTOR);
}
@Override
public double calcEstimatedSize(WorkUnit workUnit) {
KafkaStreamingExtractor.KafkaWatermark watermark =
GSON.fromJson(workUnit.getProp(KafkaTopicGroupingWorkUnitPacker.PARTITION_WATERMARK),
KafkaStreamingExtractor.KafkaWatermark.class);
String topic = workUnit.getProp(KafkaSource.TOPIC_NAME);
String partition = workUnit.getProp(KafkaSource.PARTITION_ID);
double[][] avgProduceRates = null;
long avgRecordSize = 0L;
long offsetLag = Long.parseLong(workUnit.getProp(ConfigurationKeys.WORK_UNIT_HIGH_WATER_MARK_KEY));
if (watermark != null) {
avgProduceRates = watermark.getAvgProduceRates();
avgRecordSize = watermark.getAvgRecordSize();
offsetLag = offsetLag - watermark.getLwm().getValue();
} else {
offsetLag = 0L;
}
double maxProduceRate = getMaxProduceRateUntilNextReplan(avgProduceRates,
workUnit.getPropAsLong(KafkaTopicGroupingWorkUnitPacker.PACKING_START_TIME_MILLIS));
if (maxProduceRate < 0) {
//No previous estimates found.
log.debug("No previous produce rate estimate found for {}", topic + "-" + partition);
maxProduceRate = workUnit.getPropAsDouble(KafkaTopicGroupingWorkUnitPacker.DEFAULT_WORKUNIT_SIZE_KEY);
}
double minWorkUnitSize = workUnit.getPropAsDouble(KafkaTopicGroupingWorkUnitPacker.MIN_WORKUNIT_SIZE_KEY, 0.0);
//Compute the target consume rate in MB/s.
double targetConsumeRate =
((double) (offsetLag * avgRecordSize) / (catchUpSlaInHours * 3600 * ONE_MEGA_BYTE)) + (maxProduceRate
* produceRateScalingFactor);
log.info("TopicPartiton: {}, Max produce rate: {}, Offset lag: {}, Avg Record size: {}, Target Consume Rate: {}, Min Workunit size: {}",
topic + ":" + partition, maxProduceRate, offsetLag, avgRecordSize, targetConsumeRate, minWorkUnitSize);
//Return the target consumption rate to catch up with incoming traffic and current lag, as the workunit size.
return Math.max(targetConsumeRate, minWorkUnitSize);
}
/**
* @param avgProduceRates
* @param packingTimeMillis
* @return the maximum produce rate in MB/s observed within the time window of [packingTimeMillis, packingTimeMillis+catchUpSla].
*/
private double getMaxProduceRateUntilNextReplan(double[][] avgProduceRates, long packingTimeMillis) {
int dayOfWeek = KafkaProduceRateTracker.getDayOfWeek(new Date(packingTimeMillis));
int hourOfDay = KafkaProduceRateTracker.getHourOfDay(new Date(packingTimeMillis));
if (avgProduceRates == null) {
return -1.0;
}
double max = avgProduceRates[dayOfWeek][hourOfDay];
for (int i = 0; i < replanIntervalInHours; i++) {
if ((hourOfDay + 1) >= 24) {
dayOfWeek = (dayOfWeek + 1) % 7;
}
hourOfDay = (hourOfDay + 1) % 24;
if (max < avgProduceRates[dayOfWeek][hourOfDay]) {
max = avgProduceRates[dayOfWeek][hourOfDay];
}
}
return max / ONE_MEGA_BYTE;
}
} | 3,315 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/workunit | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/workunit/packer/KafkaWorkUnitSizeEstimator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka.workunit.packer;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* Estimates the size of a Kafka {@link WorkUnit}, which contains one or more partitions of the same topic.
*/
public interface KafkaWorkUnitSizeEstimator {
/**
* Estimates the size of a Kafka {@link WorkUnit}.
*/
public double calcEstimatedSize(WorkUnit workUnit);
}
| 3,316 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/workunit | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/workunit/packer/KafkaBiLevelWorkUnitPacker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka.workunit.packer;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.PriorityQueue;
import com.google.common.collect.Lists;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.source.extractor.extract.AbstractSource;
import org.apache.gobblin.source.workunit.MultiWorkUnit;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* An implementation of {@link KafkaWorkUnitPacker} with two levels of bin packing.
*
* In the first level, some {@link WorkUnit}s corresponding to partitions
* of the same topic are grouped together into a single {@link WorkUnit}. The number of grouped {@link WorkUnit}s
* is approximately {@link #WORKUNIT_PRE_GROUPING_SIZE_FACTOR} * number of {@link MultiWorkUnit}s. The value of
* {@link #WORKUNIT_PRE_GROUPING_SIZE_FACTOR} should generally be 3.0 or higher, since the worst-fit-decreasing
* algorithm (used by the second level) may not achieve a good balance if the number of items
* is less than 3 times the number of bins.
*
* In the second level, these grouped {@link WorkUnit}s are assembled into {@link MultiWorkUnit}s
* using worst-fit-decreasing.
*
* Bi-level bin packing has two advantages: (1) reduce the number of small output files since it tends to pack
* partitions of the same topic together; (2) reduce the total number of workunits / tasks since multiple partitions
* of the same topic are assigned to the same task. A task has a non-trivial cost of initialization, tear down and
* task state persistence. However, bi-level bin packing has more mapper skew than single-level bin packing, because
* if we pack lots of partitions of the same topic to the same mapper, and we underestimate the avg time per record
* for this topic, then this mapper could be much slower than other mappers.
*
* @author Ziyang Liu
*/
public class KafkaBiLevelWorkUnitPacker extends KafkaWorkUnitPacker {
public static final String WORKUNIT_PRE_GROUPING_SIZE_FACTOR = "workunit.pre.grouping.size.factor";
public static final double DEFAULT_WORKUNIT_PRE_GROUPING_SIZE_FACTOR = 3.0;
public KafkaBiLevelWorkUnitPacker(AbstractSource<?, ?> source, SourceState state) {
super(source, state);
}
@Override
public List<WorkUnit> pack(Map<String, List<WorkUnit>> workUnitsByTopic, int numContainers) {
if (workUnitsByTopic == null || workUnitsByTopic.isEmpty()) {
return Lists.newArrayList();
}
double totalEstDataSize = setWorkUnitEstSizes(workUnitsByTopic);
double avgGroupSize = totalEstDataSize / numContainers / getPreGroupingSizeFactor(this.state);
List<MultiWorkUnit> mwuGroups = Lists.newArrayList();
for (List<WorkUnit> workUnitsForTopic : workUnitsByTopic.values()) {
double estimatedDataSizeForTopic = calcTotalEstSizeForTopic(workUnitsForTopic);
if (estimatedDataSizeForTopic < avgGroupSize) {
// If the total estimated size of a topic is smaller than group size, put all partitions of this
// topic in a single group.
MultiWorkUnit mwuGroup = MultiWorkUnit.createEmpty();
addWorkUnitsToMultiWorkUnit(workUnitsForTopic, mwuGroup);
mwuGroups.add(mwuGroup);
} else {
// Use best-fit-decreasing to group workunits for a topic into multiple groups.
mwuGroups.addAll(bestFitDecreasingBinPacking(workUnitsForTopic, avgGroupSize));
}
}
List<WorkUnit> groups = squeezeMultiWorkUnits(mwuGroups);
return worstFitDecreasingBinPacking(groups, numContainers);
}
private static double getPreGroupingSizeFactor(State state) {
return state.getPropAsDouble(WORKUNIT_PRE_GROUPING_SIZE_FACTOR, DEFAULT_WORKUNIT_PRE_GROUPING_SIZE_FACTOR);
}
/**
* Group {@link WorkUnit}s into groups. Each group is a {@link MultiWorkUnit}. Each group has a capacity of
* avgGroupSize. If there's a single {@link WorkUnit} whose size is larger than avgGroupSize, it forms a group itself.
*/
static List<MultiWorkUnit> bestFitDecreasingBinPacking(List<WorkUnit> workUnits, double avgGroupSize) {
// Sort workunits by data size desc
Collections.sort(workUnits, LOAD_DESC_COMPARATOR);
PriorityQueue<MultiWorkUnit> pQueue = new PriorityQueue<>(workUnits.size(), LOAD_DESC_COMPARATOR);
for (WorkUnit workUnit : workUnits) {
MultiWorkUnit bestGroup = findAndPopBestFitGroup(workUnit, pQueue, avgGroupSize);
if (bestGroup != null) {
addWorkUnitToMultiWorkUnit(workUnit, bestGroup);
} else {
bestGroup = MultiWorkUnit.createEmpty();
addWorkUnitToMultiWorkUnit(workUnit, bestGroup);
}
pQueue.add(bestGroup);
}
return Lists.newArrayList(pQueue);
}
/**
* Find the best group using the best-fit-decreasing algorithm.
* The best group is the fullest group that has enough capacity for the new {@link WorkUnit}.
* If no existing group has enough capacity for the new {@link WorkUnit}, return null.
*/
private static MultiWorkUnit findAndPopBestFitGroup(WorkUnit workUnit, PriorityQueue<MultiWorkUnit> pQueue,
double avgGroupSize) {
List<MultiWorkUnit> fullWorkUnits = Lists.newArrayList();
MultiWorkUnit bestFit = null;
while (!pQueue.isEmpty()) {
MultiWorkUnit candidate = pQueue.poll();
if (getWorkUnitEstSize(candidate) + getWorkUnitEstSize(workUnit) <= avgGroupSize) {
bestFit = candidate;
break;
}
fullWorkUnits.add(candidate);
}
for (MultiWorkUnit fullWorkUnit : fullWorkUnits) {
pQueue.add(fullWorkUnit);
}
return bestFit;
}
}
| 3,317 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/serialize/LiAvroDeserializerBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.serialize;
import java.io.IOException;
import java.util.Map;
import java.util.Properties;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.Decoder;
import org.apache.avro.io.DecoderFactory;
import com.google.common.base.Preconditions;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.kafka.schemareg.KafkaSchemaRegistry;
import org.apache.gobblin.kafka.schemareg.KafkaSchemaRegistryFactory;
import org.apache.gobblin.kafka.schemareg.SchemaRegistryException;
/**
* The LinkedIn Avro Deserializer (works with records serialized by the {@link LiAvroSerializerBase})
*/
@Slf4j
public class LiAvroDeserializerBase {
private KafkaSchemaRegistry<MD5Digest, Schema> _schemaRegistry;
private GenericDatumReader<GenericData.Record> _datumReader;
public LiAvroDeserializerBase()
{}
public LiAvroDeserializerBase(KafkaSchemaRegistry<MD5Digest, Schema> schemaRegistry)
{
_schemaRegistry = schemaRegistry;
_datumReader = new GenericDatumReader<>();
Preconditions.checkState(_schemaRegistry!=null, "Schema Registry is not initialized");
Preconditions.checkState(_datumReader!=null, "Datum Reader is not initialized");
}
/**
* Configure this class.
* @param configs configs in key/value pairs
* @param isKey whether is for key or value
*/
public void configure(Map<String, ?> configs, boolean isKey) {
Preconditions.checkArgument(isKey==false, "LiAvroDeserializer only works for value fields");
_datumReader = new GenericDatumReader<>();
Properties props = new Properties();
for (Map.Entry<String, ?> entry: configs.entrySet())
{
String value = String.valueOf(entry.getValue());
props.setProperty(entry.getKey(), value);
}
_schemaRegistry = KafkaSchemaRegistryFactory.getSchemaRegistry(props);
}
/**
*
* @param topic topic associated with the data
* @param data serialized bytes
* @param outputSchema the schema to deserialize to. If null then the record schema is used.
* @return deserialized object
*/
public GenericRecord deserialize(String topic, byte[] data, Schema outputSchema)
throws SerializationException {
try {
// MAGIC_BYTE | schemaId-bytes | avro_payload
if (data[0] != LiAvroSerDeHelper.MAGIC_BYTE) {
throw new SerializationException(String.format("Unknown magic byte for topic: %s ", topic));
}
MD5Digest schemaId = MD5Digest.fromBytes(data, 1 ); // read start after the first byte (magic byte)
Schema schema = _schemaRegistry.getById(schemaId);
Decoder decoder = DecoderFactory.get().binaryDecoder(data, 1 + MD5Digest.MD5_BYTES_LENGTH,
data.length - MD5Digest.MD5_BYTES_LENGTH - 1, null);
_datumReader.setExpected(outputSchema);
_datumReader.setSchema(schema);
try {
GenericRecord record = _datumReader.read(null, decoder);
return record;
} catch (IOException e) {
log.error(String.format("Error during decoding record for topic %s: ", topic));
throw e;
}
} catch (IOException | SchemaRegistryException e) {
throw new SerializationException("Error during Deserialization", e);
}
}
/**
*
* @param topic topic associated with the data
* @param data serialized bytes
* @return deserialized object
*/
public GenericRecord deserialize(String topic, byte[] data)
throws SerializationException {
return deserialize(topic, data, null);
}
public void close() {
}
}
| 3,318 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/serialize/GsonSerializerBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.serialize;
import java.nio.charset.StandardCharsets;
import java.util.Map;
import com.google.gson.JsonElement;
/**
* Base kafka GSON serializer, which serializes json data into string encoded with
* {@link StandardCharsets#UTF_8}
*/
public class GsonSerializerBase<T extends JsonElement> {
public void configure(Map<String, ?> configs, boolean isKey) {
// Do nothing by default
}
public byte[] serialize(String topic, T data) {
if (data == null) {
return null;
} else {
return data.toString().getBytes(StandardCharsets.UTF_8);
}
}
public void close() {
// Nothing to close
}
}
| 3,319 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/serialize/GsonDeserializerBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.serialize;
import java.nio.charset.StandardCharsets;
import java.util.Map;
import com.google.gson.Gson;
import com.google.gson.JsonElement;
/**
* Base kafka Gson deserializer, which deserializes a json string to a {@link JsonElement}
*/
public class GsonDeserializerBase<T extends JsonElement> {
private static final Gson GSON = new Gson();
public void configure(Map<String, ?> configs, boolean isKey) {
// Do nothing
}
public T deserialize(String topic, byte[] data) {
return (T) GSON.fromJson(new String(data, StandardCharsets.UTF_8), JsonElement.class);
}
public void close() {
// Do nothing
}
}
| 3,320 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/serialize/SerializationException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.serialize;
public class SerializationException extends Exception {
public SerializationException(String message) {
super(message);
}
public SerializationException(String s, Exception e) {
super(s, e);
}
public SerializationException(Exception e) {
super(e);
}
}
| 3,321 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/serialize/LiAvroSerDeHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.serialize;
import java.util.Map;
import java.util.Properties;
import org.apache.gobblin.kafka.schemareg.KafkaSchemaRegistry;
import org.apache.gobblin.kafka.schemareg.KafkaSchemaRegistryFactory;
/**
* Helper class for {@link LiAvroSerializer} and {@link LiAvroDeserializerBase}.
*/
public class LiAvroSerDeHelper {
public static final byte MAGIC_BYTE = 0x0;
public static KafkaSchemaRegistry getSchemaRegistry(Map<String, ?> config) {
Properties props = new Properties();
props.putAll(config);
return KafkaSchemaRegistryFactory.getSchemaRegistry(props);
}
}
| 3,322 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/serialize/LiAvroSerializerBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.serialize;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.BinaryEncoder;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.io.EncoderFactory;
import org.apache.gobblin.kafka.schemareg.KafkaSchemaRegistry;
import org.apache.gobblin.kafka.schemareg.SchemaRegistryException;
/**
* LinkedIn's implementation of Avro-schema based serialization for Kafka
* TODO: Implement this for IndexedRecord not just GenericRecord
*
*/
public class LiAvroSerializerBase {
private KafkaSchemaRegistry<MD5Digest, Schema> schemaRegistry;
private final EncoderFactory encoderFactory;
private boolean isKey;
public LiAvroSerializerBase()
{
isKey = false;
encoderFactory = EncoderFactory.get();
}
public void configure(Map<String, ?> configs, boolean isKey) {
if (null == schemaRegistry)
{
schemaRegistry = LiAvroSerDeHelper.getSchemaRegistry(configs);
}
this.isKey = isKey;
}
public byte[] serialize(String topic, GenericRecord data)
throws SerializationException {
Schema schema = data.getSchema();
MD5Digest schemaId = null;
try {
schemaId = schemaRegistry.register(topic, schema);
ByteArrayOutputStream out = new ByteArrayOutputStream();
// MAGIC_BYTE | schemaId-bytes | avro_payload
out.write(LiAvroSerDeHelper.MAGIC_BYTE);
out.write(schemaId.asBytes());
BinaryEncoder encoder = encoderFactory.directBinaryEncoder(out, null);
DatumWriter<GenericRecord> writer = new GenericDatumWriter<>(schema);
writer.write(data, encoder);
encoder.flush();
byte[] bytes = out.toByteArray();
out.close();
return bytes;
} catch (IOException | SchemaRegistryException e) {
throw new SerializationException(e);
}
}
public void close() {
}
}
| 3,323 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/serialize/MD5Digest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.serialize;
import java.util.Arrays;
import org.apache.commons.codec.DecoderException;
import org.apache.commons.codec.binary.Hex;
import com.google.common.base.Preconditions;
/**
* A holder for an MD5Digest
* Allows for conversion between the human-readable String version and the serializable byte[] version.
* Used by the {@link org.apache.gobblin.kafka.schemareg.LiKafkaSchemaRegistry}
*/
public class MD5Digest {
public static final int MD5_BYTES_LENGTH = 16;
private final byte[] bytes;
private final String md5String;
private MD5Digest(String md5String, byte[] md5bytes) {
this.bytes = md5bytes;
this.md5String = md5String;
}
public String asString() {
return this.md5String;
}
public byte[] asBytes() {
return this.bytes;
}
/**
* Static method to get an MD5Digest from a human-readable string representation
* @param md5String
* @return a filled out MD5Digest
*/
public static MD5Digest fromString(String md5String) {
byte[] bytes;
try {
bytes = Hex.decodeHex(md5String.toCharArray());
return new MD5Digest(md5String, bytes);
} catch (DecoderException e) {
throw new IllegalArgumentException("Unable to convert md5string", e);
}
}
/**
* Static method to get an MD5Digest from a binary byte representation
* @param md5Bytes
* @return a filled out MD5Digest
*/
public static MD5Digest fromBytes(byte[] md5Bytes)
{
Preconditions.checkArgument(md5Bytes.length == MD5_BYTES_LENGTH,
"md5 bytes must be " + MD5_BYTES_LENGTH + " bytes in length, found " + md5Bytes.length + " bytes.");
String md5String = Hex.encodeHexString(md5Bytes);
return new MD5Digest(md5String, md5Bytes);
}
/**
* Static method to get an MD5Digest from a binary byte representation.
* @param md5Bytes
* @param offset in the byte array to start reading from
* @return a filled out MD5Digest
*/
public static MD5Digest fromBytes(byte[] md5Bytes, int offset) {
byte[] md5BytesCopy = Arrays.copyOfRange(md5Bytes, offset, offset + MD5_BYTES_LENGTH);
//TODO: Replace this with a version that encodes without needing a copy.
String md5String = Hex.encodeHexString(md5BytesCopy);
return new MD5Digest(md5String, md5BytesCopy);
}
@Override
public int hashCode() {
//skipping null check since there is no way to create MD5Digest with a null md5String
return this.md5String.hashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (!(obj instanceof MD5Digest)) return false;
MD5Digest other = (MD5Digest) obj;
if (this.md5String == null)
{
return (other.md5String == null);
}
else
{
return this.md5String.equals(other.md5String);
}
}
}
| 3,324 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/writer/KafkaWriterHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.writer;
import java.lang.reflect.InvocationTargetException;
import java.util.Properties;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import com.google.common.base.Throwables;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.types.FieldMappingException;
import org.apache.gobblin.types.TypeMapper;
import org.apache.gobblin.util.ConfigUtils;
import static org.apache.gobblin.kafka.writer.KafkaWriterConfigurationKeys.*;
import static org.apache.gobblin.kafka.writer.KafkaWriterConfigurationKeys.CLIENT_ID_DEFAULT;
/**
* Helper class for version-specific Kafka writers
*/
@Slf4j
public class KafkaWriterHelper {
static Properties getProducerProperties(Properties props) {
Config config = ConfigUtils.propertiesToConfig(props);
// get the "writer.kafka.producerConfig" config for producer config to pass along to Kafka with a fallback to the
// shared config that start with "gobblin.kafka.sharedConfig"
Config producerConfig = ConfigUtils.getConfigOrEmpty(config, KAFKA_PRODUCER_CONFIG_PREFIX_NO_DOT).withFallback(
ConfigUtils.getConfigOrEmpty(config, ConfigurationKeys.SHARED_KAFKA_CONFIG_PREFIX));
Properties producerProperties = ConfigUtils.configToProperties(producerConfig);
// Provide default properties if not set from above
setDefaultIfUnset(producerProperties, KEY_SERIALIZER_CONFIG, DEFAULT_KEY_SERIALIZER);
setDefaultIfUnset(producerProperties, VALUE_SERIALIZER_CONFIG, DEFAULT_VALUE_SERIALIZER);
setDefaultIfUnset(producerProperties, CLIENT_ID_CONFIG, CLIENT_ID_DEFAULT);
setDefaultIfUnset(producerProperties, KAFKA_SCHEMA_REGISTRY_SWITCH_NAME, KAFKA_SCHEMA_REGISTRY_SWITCH_NAME_DEFAULT);
return producerProperties;
}
private static void setDefaultIfUnset(Properties props, String key, String value) {
if (!props.containsKey(key)) {
props.setProperty(key, value);
}
}
private static Properties stripPrefix(Properties props, String prefix) {
Properties strippedProps = new Properties();
int prefixLength = prefix.length();
for (String key : props.stringPropertyNames()) {
if (key.startsWith(prefix)) {
strippedProps.setProperty(key.substring(prefixLength), props.getProperty(key));
}
}
return strippedProps;
}
public static Object getKafkaProducer(Properties props) {
Config config = ConfigFactory.parseProperties(props);
String kafkaProducerClass = ConfigUtils.getString(config, KafkaWriterConfigurationKeys.KAFKA_WRITER_PRODUCER_CLASS,
KafkaWriterConfigurationKeys.KAFKA_WRITER_PRODUCER_CLASS_DEFAULT);
Properties producerProps = getProducerProperties(props);
try {
Class<?> producerClass = (Class<?>) Class.forName(kafkaProducerClass);
Object producer = ConstructorUtils.invokeConstructor(producerClass, producerProps);
return producer;
} catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException | InstantiationException | InvocationTargetException e) {
log.error("Failed to instantiate Kafka producer from class " + kafkaProducerClass, e);
throw Throwables.propagate(e);
}
}
public static <K, V> Pair<K,V> getKeyValuePair(V record, KafkaWriterCommonConfig commonConfig)
throws FieldMappingException {
K key = null;
TypeMapper typeMapper = commonConfig.getTypeMapper();
if (commonConfig.isKeyed()) {
key = (K) typeMapper.getField(record, commonConfig.getKeyField());
}
V value = record;
if (!commonConfig.getValueField().equals(TypeMapper.FIELD_PATH_ALL)) {
value = (V) typeMapper.getField(record, commonConfig.getValueField());
}
return new ImmutablePair<>(key, value);
}
}
| 3,325 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/writer/KafkaWriterCommonConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.writer;
import com.google.common.base.Preconditions;
import com.typesafe.config.Config;
import lombok.Getter;
import org.apache.gobblin.configuration.ConfigurationException;
import org.apache.gobblin.types.TypeMapper;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import static org.apache.gobblin.kafka.writer.KafkaWriterConfigurationKeys.*;
/**
* Version-independent configuration for Kafka Writers
*/
public class KafkaWriterCommonConfig {
@Getter
private final boolean keyed;
@Getter
private final String keyField;
@Getter
private final TypeMapper typeMapper;
@Getter
private final String valueField;
public KafkaWriterCommonConfig(Config config)
throws ConfigurationException {
try {
this.keyed = ConfigUtils.getBoolean(config, WRITER_KAFKA_KEYED_CONFIG, WRITER_KAFKA_KEYED_DEFAULT);
this.keyField = ConfigUtils.getString(config, WRITER_KAFKA_KEYFIELD_CONFIG, WRITER_KAFKA_KEYFIELD_DEFAULT);
String typeMapperClass = ConfigUtils.getString(config, WRITER_KAFKA_TYPEMAPPERCLASS_CONFIG,
WRITER_KAFKA_TYPEMAPPERCLASS_DEFAULT);
this.typeMapper = (TypeMapper) GobblinConstructorUtils.invokeLongestConstructor(Class.forName(typeMapperClass));
this.valueField = ConfigUtils.getString(config, WRITER_KAFKA_VALUEFIELD_CONFIG, WRITER_KAFKA_VALUEFIELD_DEFAULT);
Preconditions.checkArgument(!this.keyed || (this.keyField != null),
"With keyed writes to Kafka, you must provide a key fieldname to be used");
} catch (Exception e) {
throw new ConfigurationException("Failed to configure KafkaWriterCommonConfig", e);
}
}
}
| 3,326 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/writer/BaseKafkaDataWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.writer;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
/**
* Base class for creating KafkaDataWriter builders.
*
* @deprecated Use {@link AbstractKafkaDataWriterBuilder}
*/
@Deprecated
public abstract class BaseKafkaDataWriterBuilder extends AbstractKafkaDataWriterBuilder<Schema, GenericRecord> {
}
| 3,327 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/writer/KafkaDataWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.writer;
import java.util.concurrent.Future;
import javax.annotation.Nullable;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.gobblin.writer.AsyncDataWriter;
import org.apache.gobblin.writer.WriteCallback;
import org.apache.gobblin.writer.WriteResponse;
public interface KafkaDataWriter<K, V> extends AsyncDataWriter<V> {
/**
* Asynchronously write a Key-Value pair, execute the callback on success/failure
*/
Future<WriteResponse> write(Pair<K, V> record, @Nullable WriteCallback callback);
}
| 3,328 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/writer/KafkaWriterConfigurationKeys.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.writer;
import org.apache.gobblin.types.AvroGenericRecordTypeMapper;
import org.apache.gobblin.types.TypeMapper;
/**
* Configuration keys for a KafkaWriter.
*/
public class KafkaWriterConfigurationKeys {
/** Writer specific configuration keys go here **/
public static final String KAFKA_TOPIC = "writer.kafka.topic";
static final String KAFKA_WRITER_PRODUCER_CLASS = "writer.kafka.producerClass";
static final String KAFKA_WRITER_PRODUCER_CLASS_DEFAULT = "org.apache.kafka.clients.producer.KafkaProducer";
static final String COMMIT_TIMEOUT_MILLIS_CONFIG = "writer.kafka.commitTimeoutMillis";
static final long COMMIT_TIMEOUT_MILLIS_DEFAULT = 60000; // 1 minute
static final String COMMIT_STEP_WAIT_TIME_CONFIG = "writer.kafka.commitStepWaitTimeMillis";
static final long COMMIT_STEP_WAIT_TIME_DEFAULT = 500; // 500ms
static final String FAILURE_ALLOWANCE_PCT_CONFIG = "writer.kafka.failureAllowancePercentage";
static final double FAILURE_ALLOWANCE_PCT_DEFAULT = 20.0;
public static final String WRITER_KAFKA_KEYED_CONFIG = "writer.kafka.keyed";
public static final boolean WRITER_KAFKA_KEYED_DEFAULT = false;
public static final String WRITER_KAFKA_KEYFIELD_CONFIG = "writer.kafka.keyField";
public static final String WRITER_KAFKA_KEYFIELD_DEFAULT = null;
public static final String WRITER_KAFKA_TYPEMAPPERCLASS_CONFIG = "writer.kafka.typeMapperClass";
public static final String WRITER_KAFKA_TYPEMAPPERCLASS_DEFAULT = AvroGenericRecordTypeMapper.class.getName();
public static final String WRITER_KAFKA_VALUEFIELD_CONFIG = "writer.kafka.valueField";
public static final String WRITER_KAFKA_VALUEFIELD_DEFAULT = TypeMapper.FIELD_PATH_ALL;
/**
* Kafka producer configurations will be passed through as is as long as they are prefixed
* by the PREFIX specified below.
*/
public static final String KAFKA_PRODUCER_CONFIG_PREFIX_NO_DOT = "writer.kafka.producerConfig";
public static final String KAFKA_PRODUCER_CONFIG_PREFIX = KAFKA_PRODUCER_CONFIG_PREFIX_NO_DOT + ".";
/** Kafka producer scoped configuration keys go here **/
static final String KEY_SERIALIZER_CONFIG = "key.serializer";
static final String DEFAULT_KEY_SERIALIZER = "org.apache.kafka.common.serialization.StringSerializer";
public static final String VALUE_SERIALIZER_CONFIG = "value.serializer";
static final String DEFAULT_VALUE_SERIALIZER = "org.apache.kafka.common.serialization.ByteArraySerializer";
static final String CLIENT_ID_CONFIG = "client.id";
static final String CLIENT_ID_DEFAULT = "gobblin";
static final String KAFKA_SCHEMA_REGISTRY_SWITCH_NAME = "kafka.schemaRegistry.switchName";
static final String KAFKA_SCHEMA_REGISTRY_SWITCH_NAME_DEFAULT = "true";
public static final String KAFKA_TOPIC_CONFIG = "writer.kafka.";
static final String TOPIC_NAME = "topic";
public static final String CLUSTER_ZOOKEEPER = KAFKA_TOPIC_CONFIG + "zookeeper";
static final String REPLICATION_COUNT = KAFKA_TOPIC_CONFIG + "replicationCount";
static final int REPLICATION_COUNT_DEFAULT = 1;
public static final String PARTITION_COUNT = KAFKA_TOPIC_CONFIG + "partitionCount";
static final int PARTITION_COUNT_DEFAULT = 1;
public static final String DELETE_TOPIC_IF_EXISTS = KAFKA_TOPIC_CONFIG + "deleteTopicIfExists";
static final Boolean DEFAULT_DELETE_TOPIC_IF_EXISTS = false;
public static final String ZOOKEEPER_SESSION_TIMEOUT = CLUSTER_ZOOKEEPER + ".sto";
static final int ZOOKEEPER_SESSION_TIMEOUT_DEFAULT = 10000; // 10 seconds
public static final String ZOOKEEPER_CONNECTION_TIMEOUT = CLUSTER_ZOOKEEPER + ".cto";
static final int ZOOKEEPER_CONNECTION_TIMEOUT_DEFAULT = 8000; // 8 seconds
} | 3,329 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/writer/AbstractKafkaDataWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.writer;
import java.io.IOException;
import java.util.Properties;
import com.typesafe.config.Config;
import org.apache.gobblin.configuration.ConfigurationException;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.writer.AsyncDataWriter;
import org.apache.gobblin.writer.AsyncWriterManager;
import org.apache.gobblin.writer.DataWriter;
import org.apache.gobblin.writer.DataWriterBuilder;
/**
* Base kafka data writer builder. It builds an async kafka {@link DataWriter}
*/
public abstract class AbstractKafkaDataWriterBuilder<S, D> extends DataWriterBuilder<S, D> {
protected abstract AsyncDataWriter<D> getAsyncDataWriter(Properties props) throws ConfigurationException;
/**
* Build a {@link DataWriter}.
*
* @throws IOException if there is anything wrong building the writer
* @return the built {@link DataWriter}
*/
@Override
public DataWriter<D> build()
throws IOException {
State state = this.destination.getProperties();
Properties taskProps = state.getProperties();
Config config = ConfigUtils.propertiesToConfig(taskProps);
long commitTimeoutMillis = ConfigUtils.getLong(config, KafkaWriterConfigurationKeys.COMMIT_TIMEOUT_MILLIS_CONFIG,
KafkaWriterConfigurationKeys.COMMIT_TIMEOUT_MILLIS_DEFAULT);
long commitStepWaitTimeMillis = ConfigUtils.getLong(config, KafkaWriterConfigurationKeys.COMMIT_STEP_WAIT_TIME_CONFIG,
KafkaWriterConfigurationKeys.COMMIT_STEP_WAIT_TIME_DEFAULT);
double failureAllowance = ConfigUtils.getDouble(config, KafkaWriterConfigurationKeys.FAILURE_ALLOWANCE_PCT_CONFIG,
KafkaWriterConfigurationKeys.FAILURE_ALLOWANCE_PCT_DEFAULT) / 100.0;
return AsyncWriterManager.builder()
.config(config)
.commitTimeoutMillis(commitTimeoutMillis)
.commitStepWaitTimeInMillis(commitStepWaitTimeMillis)
.failureAllowanceRatio(failureAllowance)
.retriesEnabled(false)
.asyncDataWriter(getAsyncDataWriter(taskProps))
.build();
}
}
| 3,330 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/writer/KafkaWriterMetricNames.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.writer;
/**
* Listing of Metrics names used by the {@link KafkaDataWriter}
*/
public class KafkaWriterMetricNames {
/**
* A {@link com.codahale.metrics.Meter} measuring the number of records sent to
* a {@link org.apache.gobblin.kafka.writer.KafkaDataWriter}.
*/
public static final String RECORDS_PRODUCED_METER = "gobblin.writer.kafka.records.produced";
/**
* A {@link com.codahale.metrics.Meter} measuring the number of records that failed to be written by
* {@link org.apache.gobblin.kafka.writer.KafkaDataWriter}.
*/
public static final String RECORDS_FAILED_METER = "gobblin.writer.kafka.records.failed";
/**
* A {@link com.codahale.metrics.Meter} measuring the number of records that were successfully written by
* {@link org.apache.gobblin.kafka.writer.KafkaDataWriter}.
*/
public static final String RECORDS_SUCCESS_METER = "gobblin.writer.kafka.records.success";
}
| 3,331 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/schemareg/HttpClientFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.schemareg;
import org.apache.commons.httpclient.DefaultHttpMethodRetryHandler;
import org.apache.commons.httpclient.HttpClient;
import org.apache.commons.httpclient.HttpMethodRetryHandler;
import org.apache.commons.httpclient.params.HttpMethodParams;
import org.apache.commons.pool2.BasePooledObjectFactory;
import org.apache.commons.pool2.PooledObject;
import org.apache.commons.pool2.impl.DefaultPooledObject;
import lombok.Setter;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* An implementation of {@link BasePooledObjectFactory} for {@link HttpClient}.
*
* @author mitu
*/
public class HttpClientFactory extends BasePooledObjectFactory<HttpClient>{
@Setter private int soTimeout = -1;
@Setter private int connTimeout = -1;
@Setter private int httpMethodRetryCount = 3;
@Setter private boolean httpRequestSentRetryEnabled = false;
@Setter private String httpMethodRetryHandlerClass = DefaultHttpMethodRetryHandler.class.getName();
public HttpClientFactory() {
}
@Override
public HttpClient create() {
HttpClient client = new HttpClient();
if (soTimeout >= 0) {
client.getParams().setSoTimeout(soTimeout);
}
ClassAliasResolver<HttpMethodRetryHandler> aliasResolver = new ClassAliasResolver<>(HttpMethodRetryHandler.class);
HttpMethodRetryHandler httpMethodRetryHandler;
try {
httpMethodRetryHandler = GobblinConstructorUtils.invokeLongestConstructor(aliasResolver.resolveClass(httpMethodRetryHandlerClass), httpMethodRetryCount, httpRequestSentRetryEnabled);
} catch (ReflectiveOperationException e) {
throw new RuntimeException(e);
}
client.getParams().setParameter(HttpMethodParams.RETRY_HANDLER, httpMethodRetryHandler);
if (connTimeout >= 0) {
client.getHttpConnectionManager().getParams().setConnectionTimeout(connTimeout);
}
return client;
}
@Override
public PooledObject<HttpClient> wrap(HttpClient obj) {
return new DefaultPooledObject<>(obj);
}
}
| 3,332 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/schemareg/SchemaRegistryException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.schemareg;
public class SchemaRegistryException extends Exception {
public SchemaRegistryException(String message) {
super(message);
}
public SchemaRegistryException(String message, Throwable t) {
super(message, t);
}
public SchemaRegistryException(Throwable t) {
super(t);
}
}
| 3,333 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/schemareg/LiKafkaSchemaRegistry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.schemareg;
import java.io.IOException;
import java.util.Map;
import java.util.Properties;
import org.apache.avro.Schema;
import org.apache.commons.httpclient.Header;
import org.apache.commons.httpclient.HttpClient;
import org.apache.commons.httpclient.HttpException;
import org.apache.commons.httpclient.HttpStatus;
import org.apache.commons.httpclient.methods.GetMethod;
import org.apache.commons.httpclient.methods.PostMethod;
import org.apache.commons.pool2.impl.GenericObjectPool;
import org.apache.commons.pool2.impl.GenericObjectPoolConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.kafka.serialize.MD5Digest;
import org.apache.gobblin.metrics.reporter.util.KafkaReporterUtils;
import org.apache.gobblin.util.AvroUtils;
import org.apache.gobblin.util.PropertiesUtils;
/**
* Integration with LinkedIn's implementation of a schema registry that uses md5-hash for schema ids.
*/
public class LiKafkaSchemaRegistry implements KafkaSchemaRegistry<MD5Digest, Schema> {
private static final Logger LOG = LoggerFactory.getLogger(LiKafkaSchemaRegistry.class);
private static final String GET_RESOURCE_BY_ID = "/id=";
private static final String GET_RESOURCE_BY_TYPE = "/latest_with_type=";
private static final String SCHEMA_ID_HEADER_NAME = "Location";
private static final String SCHEMA_ID_HEADER_PREFIX = "/id=";
private final GenericObjectPool<HttpClient> httpClientPool;
private final String url;
private final Optional<Map<String, String>> namespaceOverride;
private final boolean switchTopicNames;
/**
* @param props properties should contain property "kafka.schema.registry.url", and optionally
* "kafka.schema.registry.max.cache.size" (default = 1000) and
* "kafka.schema.registry.cache.expire.after.write.min" (default = 10).
*/
public LiKafkaSchemaRegistry(Properties props) {
Preconditions.checkArgument(props.containsKey(KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_URL),
String.format("Property %s not provided.", KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_URL));
this.url = props.getProperty(KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_URL);
this.namespaceOverride = KafkaReporterUtils.extractOverrideNamespace(props);
this.switchTopicNames = PropertiesUtils.getPropAsBoolean(props, KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_SWITCH_NAME,
KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_SWITCH_NAME_DEFAULT);
int objPoolSize =
Integer.parseInt(props.getProperty(ConfigurationKeys.KAFKA_SOURCE_WORK_UNITS_CREATION_THREADS,
"" + ConfigurationKeys.KAFKA_SOURCE_WORK_UNITS_CREATION_DEFAULT_THREAD_COUNT));
LOG.info("Create HttpClient pool with size " + objPoolSize);
GenericObjectPoolConfig config = new GenericObjectPoolConfig();
config.setMaxTotal(objPoolSize);
config.setMaxIdle(objPoolSize);
this.httpClientPool = new GenericObjectPool<>(new HttpClientFactory(), config);
}
@Override
public Schema getById(MD5Digest id)
throws IOException, SchemaRegistryException {
return fetchSchemaByKey(id);
}
/**
*
* @return whether this implementation of the schema registry has an internal cache
*/
@Override
public boolean hasInternalCache() {
return false;
}
/**
* Get the latest schema of a topic.
*
* @param topic topic name
* @return the latest schema
* @throws SchemaRegistryException if failed to retrieve schema.
*/
@Override
public Schema getLatestSchema(String topic) throws SchemaRegistryException {
String schemaUrl = this.url + GET_RESOURCE_BY_TYPE + topic;
LOG.debug("Fetching from URL : " + schemaUrl);
GetMethod get = new GetMethod(schemaUrl);
int statusCode;
String schemaString;
HttpClient httpClient = this.borrowClient();
try {
statusCode = httpClient.executeMethod(get);
schemaString = get.getResponseBodyAsString();
} catch (HttpException e) {
throw new RuntimeException(e);
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
get.releaseConnection();
this.httpClientPool.returnObject(httpClient);
}
if (statusCode != HttpStatus.SC_OK) {
throw new SchemaRegistryException(
String.format("Latest schema for topic %s cannot be retrieved. Status code = %d", topic, statusCode));
}
Schema schema;
try {
schema = new Schema.Parser().parse(schemaString);
} catch (Throwable t) {
throw new SchemaRegistryException(String.format("Latest schema for topic %s cannot be retrieved", topic), t);
}
return schema;
}
private HttpClient borrowClient() throws SchemaRegistryException {
try {
return this.httpClientPool.borrowObject();
} catch (Exception e) {
throw new SchemaRegistryException("Unable to borrow " + HttpClient.class.getSimpleName());
}
}
/**
* Register a schema to the Kafka schema registry under the provided input name. This method will change the name
* of the schema to the provided name if configured to do so. This is useful because certain services (like Gobblin kafka adaptor and
* Camus) get the schema for a topic by querying for the latest schema with the topic name, requiring the topic
* name and schema name to match for all topics. If it is not configured to switch names, this is useful for the case
* where the Kafka topic and Avro schema names do not match. This method registers the schema to the schema registry in such a
* way that any schema can be written to any topic.
*
* @param schema {@link org.apache.avro.Schema} to register.
* @param name Name of the schema when registerd to the schema registry. This name should match the name
* of the topic where instances will be published.
* @return schema ID of the registered schema.
* @throws SchemaRegistryException if registration failed
*/
@Override
public MD5Digest register(String name, Schema schema) throws SchemaRegistryException {
PostMethod post = new PostMethod(url);
if (this.switchTopicNames) {
return register(AvroUtils.switchName(schema, name), post);
} else {
post.addParameter("name", name);
return register(schema, post);
}
}
/**
* Register a schema to the Kafka schema registry
*
* @param schema
* @param post
* @return schema ID of the registered schema
* @throws SchemaRegistryException if registration failed
*/
public synchronized MD5Digest register(Schema schema, PostMethod post) throws SchemaRegistryException {
// Change namespace if override specified
if (this.namespaceOverride.isPresent()) {
schema = AvroUtils.switchNamespace(schema, this.namespaceOverride.get());
}
LOG.info("Registering schema " + schema.toString());
post.addParameter("schema", schema.toString());
HttpClient httpClient = this.borrowClient();
try {
LOG.debug("Loading: " + post.getURI());
int statusCode = httpClient.executeMethod(post);
if (statusCode != HttpStatus.SC_CREATED) {
throw new SchemaRegistryException("Error occurred while trying to register schema: " + statusCode);
}
String response;
response = post.getResponseBodyAsString();
if (response != null) {
LOG.info("Received response " + response);
}
String schemaKey;
Header[] headers = post.getResponseHeaders(SCHEMA_ID_HEADER_NAME);
if (headers.length != 1) {
throw new SchemaRegistryException(
"Error reading schema id returned by registerSchema call: headers.length = " + headers.length);
} else if (!headers[0].getValue().startsWith(SCHEMA_ID_HEADER_PREFIX)) {
throw new SchemaRegistryException(
"Error parsing schema id returned by registerSchema call: header = " + headers[0].getValue());
} else {
LOG.info("Registered schema successfully");
schemaKey = headers[0].getValue().substring(SCHEMA_ID_HEADER_PREFIX.length());
}
MD5Digest schemaId = MD5Digest.fromString(schemaKey);
return schemaId;
} catch (Throwable t) {
throw new SchemaRegistryException(t);
} finally {
post.releaseConnection();
this.httpClientPool.returnObject(httpClient);
}
}
/**
* Fetch schema by key.
*/
protected Schema fetchSchemaByKey(MD5Digest key) throws SchemaRegistryException {
String schemaUrl = this.url + GET_RESOURCE_BY_ID + key.asString();
GetMethod get = new GetMethod(schemaUrl);
int statusCode;
String schemaString;
HttpClient httpClient = this.borrowClient();
try {
statusCode = httpClient.executeMethod(get);
schemaString = get.getResponseBodyAsString();
} catch (IOException e) {
throw new SchemaRegistryException(e);
} finally {
get.releaseConnection();
this.httpClientPool.returnObject(httpClient);
}
if (statusCode != HttpStatus.SC_OK) {
throw new SchemaRegistryException(
String.format("Schema with key %s cannot be retrieved, statusCode = %d", key, statusCode));
}
Schema schema;
try {
schema = new Schema.Parser().parse(schemaString);
} catch (Throwable t) {
throw new SchemaRegistryException(String.format("Schema with ID = %s cannot be parsed", key), t);
}
return schema;
}
}
| 3,334 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/schemareg/KafkaSchemaRegistryFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.schemareg;
import java.lang.reflect.InvocationTargetException;
import java.util.Properties;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import lombok.extern.slf4j.Slf4j;
/**
* A Factory that constructs and hands back {@link KafkaSchemaRegistry} implementations.
*/
@Slf4j
public class KafkaSchemaRegistryFactory {
public static final String DEFAULT_TRY_CACHING = "true";
@SuppressWarnings("unchecked")
public static KafkaSchemaRegistry getSchemaRegistry(Properties props) {
Preconditions.checkArgument(props.containsKey(KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_CLASS),
"Missing required property " + KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_CLASS);
boolean tryCache = Boolean.parseBoolean(props.getProperty(KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_CACHE,
DEFAULT_TRY_CACHING));
Class<?> clazz;
try {
clazz =
(Class<?>) Class.forName(props.getProperty(KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_CLASS));
KafkaSchemaRegistry schemaRegistry = (KafkaSchemaRegistry) ConstructorUtils.invokeConstructor(clazz, props);
if (tryCache && !schemaRegistry.hasInternalCache())
{
schemaRegistry = new CachingKafkaSchemaRegistry(schemaRegistry);
}
return schemaRegistry;
} catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException | InvocationTargetException
| InstantiationException e) {
log.error("Failed to instantiate " + KafkaSchemaRegistry.class, e);
throw Throwables.propagate(e);
}
}
}
| 3,335 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/schemareg/GobblinHttpMethodRetryHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.schemareg;
import java.io.IOException;
import java.net.NoRouteToHostException;
import java.net.UnknownHostException;
import org.apache.commons.httpclient.DefaultHttpMethodRetryHandler;
import org.apache.commons.httpclient.HttpMethod;
import org.apache.gobblin.annotation.Alias;
/**
* An extension of {@link DefaultHttpMethodRetryHandler} that retries the HTTP request on network errors such as
* {@link java.net.UnknownHostException} and {@link java.net.NoRouteToHostException}.
*/
@Alias (value = "gobblinhttpretryhandler")
public class GobblinHttpMethodRetryHandler extends DefaultHttpMethodRetryHandler {
public GobblinHttpMethodRetryHandler() {
this(3, false);
}
public GobblinHttpMethodRetryHandler(int retryCount, boolean requestSentRetryEnabled) {
super(retryCount, requestSentRetryEnabled);
}
@Override
public boolean retryMethod(final HttpMethod method, final IOException exception, int executionCount) {
if (method == null) {
throw new IllegalArgumentException("HTTP method may not be null");
}
if (exception == null) {
throw new IllegalArgumentException("Exception parameter may not be null");
}
if (executionCount > super.getRetryCount()) {
// Do not retry if over max retry count
return false;
}
//Override the behavior of DefaultHttpMethodRetryHandler to retry in case of UnknownHostException
// and NoRouteToHostException.
if (exception instanceof UnknownHostException || exception instanceof NoRouteToHostException) {
return true;
}
return super.retryMethod(method, exception, executionCount);
}
}
| 3,336 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/schemareg/ConfigDrivenMd5SchemaRegistry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.schemareg;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.HashMap;
import java.util.Properties;
import org.apache.avro.Schema;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.kafka.serialize.MD5Digest;
/**
* A SchemaRegistry that hands out MD5 based ids based on configuration
* Can be configured to be initialized with a single schema name, value pair.
* {@see ConfigDrivenMd5SchemaRegistry.ConfigurationKeys} for configuration.
*
*/
public class ConfigDrivenMd5SchemaRegistry implements KafkaSchemaRegistry<MD5Digest, Schema> {
private static class ConfigurationKeys {
private static final String SCHEMA_NAME_KEY="schemaRegistry.schema.name";
private static final String SCHEMA_VALUE_KEY="schemaRegistry.schema.value";
}
private final HashMap<MD5Digest, Schema> _schemaHashMap = new HashMap<>();
private final HashMap<String, Schema> _topicSchemaMap = new HashMap<>();
private final MD5Digest generateId(Schema schema) {
try {
byte[] schemaBytes = schema.toString().getBytes("UTF-8");
byte[] md5bytes = MessageDigest.getInstance("MD5").digest(schemaBytes);
MD5Digest md5Digest = MD5Digest.fromBytes(md5bytes);
return md5Digest;
} catch (UnsupportedEncodingException | NoSuchAlgorithmException e) {
throw new IllegalStateException("Unexpected error trying to convert schema to bytes", e);
}
}
public ConfigDrivenMd5SchemaRegistry(String name, Schema schema)
throws IOException, SchemaRegistryException {
this.register(name, schema);
}
public ConfigDrivenMd5SchemaRegistry(Properties props)
throws IOException, SchemaRegistryException {
this(ConfigFactory.parseProperties(props));
}
public ConfigDrivenMd5SchemaRegistry(Config config)
throws IOException, SchemaRegistryException {
if (config.hasPath(ConfigurationKeys.SCHEMA_NAME_KEY)) {
String name = config.getString(ConfigurationKeys.SCHEMA_NAME_KEY);
String value = config.getString(ConfigurationKeys.SCHEMA_VALUE_KEY);
Schema schema = new Schema.Parser().parse(value);
register(name, schema);
}
}
/**
* Register this schema under the provided name
* @param name
* @param schema
* @return
* @throws IOException
* @throws SchemaRegistryException
*/
@Override
public synchronized MD5Digest register(String name, Schema schema)
throws IOException, SchemaRegistryException {
MD5Digest md5Digest = generateId(schema);
if (!_schemaHashMap.containsKey(md5Digest)) {
_schemaHashMap.put(md5Digest, schema);
_topicSchemaMap.put(name, schema);
}
return md5Digest;
}
/**
* Get a schema given an id
* @param id
* @return
* @throws IOException
* @throws SchemaRegistryException
*/
@Override
public Schema getById(MD5Digest id)
throws IOException, SchemaRegistryException {
if (_schemaHashMap.containsKey(id))
{
return _schemaHashMap.get(id);
}
else
{
throw new SchemaRegistryException("Could not find schema with id : " + id.asString());
}
}
/**
* Will throw a SchemaRegistryException if it cannot find any schema for the provided name.
* {@inheritDoc}
*/
@Override
public Schema getLatestSchema(String name)
throws IOException, SchemaRegistryException {
if (_topicSchemaMap.containsKey(name))
{
return _topicSchemaMap.get(name);
}
else
{
throw new SchemaRegistryException("Could not find any schema for " + name);
}
}
/**
*
* {@inheritDoc}
* @return false
*/
@Override
public boolean hasInternalCache() {
return true;
}
}
| 3,337 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/schemareg/KafkaSchemaRegistryConfigurationKeys.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.schemareg;
/**
* Configuration keys for a kafka schema registry
*/
public class KafkaSchemaRegistryConfigurationKeys {
public final static String KAFKA_SCHEMA_REGISTRY_CLASS = "kafka.schemaRegistry.class";
public final static String KAFKA_SCHEMA_REGISTRY_URL = "kafka.schemaRegistry.url";
public final static String KAFKA_SCHEMA_REGISTRY_CACHE = "kafka.schemaRegistry.cache";
public final static String KAFKA_SCHEMA_REGISTRY_SWITCH_NAME = "kafka.schemaRegistry.switchName";
public final static String KAFKA_SCHEMA_REGISTRY_SWITCH_NAME_DEFAULT = "true";
public final static String KAFKA_SCHEMA_REGISTRY_OVERRIDE_NAMESPACE = "kafka.schemaRegistry.overrideNamespace";
}
| 3,338 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/schemareg/CachingKafkaSchemaRegistry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.schemareg;
import java.io.IOException;
import java.util.HashMap;
import java.util.IdentityHashMap;
import java.util.Map;
import com.google.common.base.Preconditions;
import lombok.extern.slf4j.Slf4j;
/**
* An implementation that wraps a passed in schema registry and caches interactions with it
* {@inheritDoc}
* */
@Slf4j
public class CachingKafkaSchemaRegistry<K,S> implements KafkaSchemaRegistry<K,S> {
private static final int DEFAULT_MAX_SCHEMA_REFERENCES = 10;
private final KafkaSchemaRegistry<K,S> _kafkaSchemaRegistry;
private final HashMap<String, Map<S, K>> _namedSchemaCache;
private final HashMap<K, S> _idBasedCache;
private final int _maxSchemaReferences;
public CachingKafkaSchemaRegistry(KafkaSchemaRegistry kafkaSchemaRegistry)
{
this(kafkaSchemaRegistry, DEFAULT_MAX_SCHEMA_REFERENCES);
}
/**
* Create a caching schema registry.
* @param kafkaSchemaRegistry: a schema registry that needs caching
* @param maxSchemaReferences: the maximum number of unique references that can exist for a given schema.
*/
public CachingKafkaSchemaRegistry(KafkaSchemaRegistry kafkaSchemaRegistry, int maxSchemaReferences)
{
Preconditions.checkArgument(kafkaSchemaRegistry!=null, "KafkaSchemaRegistry cannot be null");
Preconditions.checkArgument(!kafkaSchemaRegistry.hasInternalCache(), "SchemaRegistry already has a cache.");
_kafkaSchemaRegistry = kafkaSchemaRegistry;
_namedSchemaCache = new HashMap<>();
_idBasedCache = new HashMap<>();
_maxSchemaReferences = maxSchemaReferences;
}
@Override
synchronized public K register(String name, S schema)
throws IOException, SchemaRegistryException {
Map<S, K> schemaIdMap;
if (_namedSchemaCache.containsKey(name))
{
schemaIdMap = _namedSchemaCache.get(name);
}
else {
// we really care about reference equality to de-dup using cache
// when it comes to registering schemas, so use an IdentityHashMap here
schemaIdMap = new IdentityHashMap<>();
_namedSchemaCache.put(name, schemaIdMap);
}
if (schemaIdMap.containsKey(schema))
{
return schemaIdMap.get(schema);
}
else
{
// check if schemaIdMap is getting too full
Preconditions.checkState(schemaIdMap.size() < _maxSchemaReferences, "Too many schema objects for " + name +". Cache is overfull.");
}
K id = _kafkaSchemaRegistry.register(name, schema);
schemaIdMap.put(schema, id);
_idBasedCache.put(id, schema);
return id;
}
@Override
synchronized public S getById(K id)
throws IOException, SchemaRegistryException {
if (_idBasedCache.containsKey(id))
{
return _idBasedCache.get(id);
}
else
{
S schema = _kafkaSchemaRegistry.getById(id);
_idBasedCache.put(id, schema);
return schema;
}
}
/**
* This call is not cached because we never want to miss out on the latest schema.
* {@inheritDoc}
*/
@Override
public S getLatestSchema(String name)
throws IOException, SchemaRegistryException {
return _kafkaSchemaRegistry.getLatestSchema(name);
}
@Override
public boolean hasInternalCache() {
return true;
}
}
| 3,339 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/schemareg/KafkaSchemaRegistry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.schemareg;
import java.io.IOException;
/**
* An interface for a Kafka Schema Registry
* Classes implementing this interface will typically be constructed by a {@link KafkaSchemaRegistryFactory}
* and should have a constructor that takes a {@link java.util.Properties} object as a parameter.
*
* @param <K> : the type of the schema identifier (e.g. int, string, md5, ...)
* @param <S> : the type of the schema system in use (e.g. avro's Schema, ... )
*/
public interface KafkaSchemaRegistry<K, S> {
/**
* Register this schema under the provided name
* @param name
* @param schema
* @return the schema identifier
* @throws IOException
* @throws SchemaRegistryException
*/
public K register(String name, S schema) throws IOException, SchemaRegistryException;
/**
* Get a schema given an id
* @param id
* @return the Schema
* @throws IOException
* @throws SchemaRegistryException
*/
public S getById(K id)
throws IOException, SchemaRegistryException;
/**
* Get the latest schema that was registered under this name
* @param name
* @return the latest Schema
* @throws SchemaRegistryException
*/
public S getLatestSchema(String name) throws IOException, SchemaRegistryException;
/**
*
* SchemaRegistry implementations that do not have an internal cache can set this to false
* and Gobblin will supplement such registries with a cache on top (if enabled).
* @return whether this implementation of the schema registry has an internal cache
*/
boolean hasInternalCache();
}
| 3,340 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/client/DecodeableKafkaRecord.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.client;
/**
* A kafka record that provides getters for deserialized key and value. This record type can be used to wrap kafka records
* consumed through new kafka-client consumer APIs (0.9 and above) which support serializers and deserializers.
*
* @param <K> Message key type. If this record does not have a key use - <b><code>?</code></b>
* @param <V> Message value type
*/
public interface DecodeableKafkaRecord<K, V> extends KafkaConsumerRecord {
/**
* The key of this record. Can be null if only value exists
*/
public K getKey();
/**
* The value of this record.
*/
public V getValue();
}
| 3,341 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/client/GobblinConsumerRebalanceListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.client;
import java.util.Collection;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaPartition;
/**
* A listener that is called when kafka partitions are re-assigned when a consumer leaves/joins a group
*
* For more details, See https://kafka.apache.org/10/javadoc/org/apache/kafka/clients/consumer/ConsumerRebalanceListener.html
*/
public interface GobblinConsumerRebalanceListener {
void onPartitionsRevoked(Collection<KafkaPartition> partitions);
void onPartitionsAssigned(Collection<KafkaPartition> partitions);
}
| 3,342 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/client/ByteArrayBasedKafkaRecord.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.client;
/**
* A kafka record that provides getters for raw bytes of key and value . This record type can be used to wrap kafka
* records consumed through old kafka-client consumer APIs (0.8 and below) which do NOT support serializers and deserializers.
*/
public interface ByteArrayBasedKafkaRecord extends KafkaConsumerRecord {
public byte[] getMessageBytes();
public byte[] getKeyBytes();
}
| 3,343 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/client/AbstractBaseKafkaConsumerClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.client;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.regex.Pattern;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import javax.annotation.Nonnull;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import org.apache.gobblin.metrics.kafka.SchemaRegistryException;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaTopic;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.DatasetFilterUtils;
/**
* A base {@link GobblinKafkaConsumerClient} that sets configurations shared by all {@link GobblinKafkaConsumerClient}s
*/
@Slf4j
public abstract class AbstractBaseKafkaConsumerClient implements GobblinKafkaConsumerClient {
public static final String CONFIG_NAMESPACE = "source.kafka";
public static final String CONFIG_PREFIX = CONFIG_NAMESPACE + ".";
public static final String CONSUMER_CONFIG = "consumerConfig";
public static final String CONFIG_KAFKA_FETCH_TIMEOUT_VALUE = CONFIG_PREFIX + "fetchTimeoutMillis";
public static final int CONFIG_KAFKA_FETCH_TIMEOUT_VALUE_DEFAULT = 1000; // 1 second
public static final String CONFIG_KAFKA_FETCH_REQUEST_MIN_BYTES = CONFIG_PREFIX + "fetchMinBytes";
private static final int CONFIG_KAFKA_FETCH_REQUEST_MIN_BYTES_DEFAULT = 1024;
public static final String CONFIG_KAFKA_SOCKET_TIMEOUT_VALUE = CONFIG_PREFIX + "socketTimeoutMillis";
public static final int CONFIG_KAFKA_SOCKET_TIMEOUT_VALUE_DEFAULT = 30000; // 30 seconds
public static final String CONFIG_ENABLE_SCHEMA_CHECK = CONFIG_PREFIX + "enableSchemaCheck";
public static final boolean ENABLE_SCHEMA_CHECK_DEFAULT = false;
protected final List<String> brokers;
protected final int fetchTimeoutMillis;
protected final int fetchMinBytes;
protected final int socketTimeoutMillis;
protected final Config config;
protected Optional<KafkaSchemaRegistry> schemaRegistry;
protected final boolean schemaCheckEnabled;
public AbstractBaseKafkaConsumerClient(Config config) {
this.config = config;
this.brokers = ConfigUtils.getStringList(config, ConfigurationKeys.KAFKA_BROKERS);
if (this.brokers.isEmpty()) {
throw new IllegalArgumentException("Need to specify at least one Kafka broker.");
}
this.socketTimeoutMillis =
ConfigUtils.getInt(config, CONFIG_KAFKA_SOCKET_TIMEOUT_VALUE, CONFIG_KAFKA_SOCKET_TIMEOUT_VALUE_DEFAULT);
this.fetchTimeoutMillis =
ConfigUtils.getInt(config, CONFIG_KAFKA_FETCH_TIMEOUT_VALUE, CONFIG_KAFKA_FETCH_TIMEOUT_VALUE_DEFAULT);
this.fetchMinBytes =
ConfigUtils.getInt(config, CONFIG_KAFKA_FETCH_REQUEST_MIN_BYTES, CONFIG_KAFKA_FETCH_REQUEST_MIN_BYTES_DEFAULT);
Preconditions.checkArgument((this.fetchTimeoutMillis < this.socketTimeoutMillis),
"Kafka Source configuration error: FetchTimeout " + this.fetchTimeoutMillis
+ " must be smaller than SocketTimeout " + this.socketTimeoutMillis);
this.schemaCheckEnabled = ConfigUtils.getBoolean(config, CONFIG_ENABLE_SCHEMA_CHECK, ENABLE_SCHEMA_CHECK_DEFAULT);
}
/**
* Filter topics based on whitelist and blacklist patterns
* and if {@link #schemaCheckEnabled}, also filter on whether schema is present in schema registry
* @param blacklist - List of regex patterns that need to be blacklisted
* @param whitelist - List of regex patterns that need to be whitelisted
*
* @return
*/
@Override
public List<KafkaTopic> getFilteredTopics(final List<Pattern> blacklist, final List<Pattern> whitelist) {
return Lists.newArrayList(Iterables.filter(getTopics(), new Predicate<KafkaTopic>() {
@Override
public boolean apply(@Nonnull KafkaTopic kafkaTopic) {
return DatasetFilterUtils.survived(kafkaTopic.getName(), blacklist, whitelist) && isSchemaPresent(kafkaTopic.getName());
}
}));
}
private boolean isSchemaRegistryConfigured() {
if(this.schemaRegistry == null) {
this.schemaRegistry = (config.hasPath(KafkaSchemaRegistry.KAFKA_SCHEMA_REGISTRY_CLASS) && config.hasPath(KafkaSchemaRegistry.KAFKA_SCHEMA_REGISTRY_URL)) ? Optional.of(KafkaSchemaRegistry.get(ConfigUtils.configToProperties(this.config))) : Optional.absent();
}
return this.schemaRegistry.isPresent();
}
/**
* accept topic if {@link #schemaCheckEnabled} and schema registry is configured
* @param topic
* @return
*/
private boolean isSchemaPresent(String topic) {
if(this.schemaCheckEnabled && isSchemaRegistryConfigured()) {
try {
if(this.schemaRegistry.get().getLatestSchemaByTopic(topic) == null) {
log.warn(String.format("Schema not found for topic %s skipping.", topic));
return false;
}
} catch (SchemaRegistryException e) {
log.warn(String.format("Schema not found for topic %s skipping.", topic));
return false;
}
}
return true;
}
/**
* A helper method that returns the canonical metric name for a kafka metric. A typical canonicalized metric name would
* be of the following format: "{metric-group}_{client-id}_{metric-name}". This method is invoked in {@link GobblinKafkaConsumerClient#getMetrics()}
* implementations to convert KafkaMetric names to a Coda Hale metric name. Note that the canonicalization is done on every invocation of the
* {@link GobblinKafkaConsumerClient#getMetrics()} ()} API.
* @param metricGroup the type of the Kafka metric e.g."consumer-fetch-manager-metrics", "consumer-coordinator-metrics" etc.
* @param metricTags any tags associated with the Kafka metric, typically include the kafka client id, topic name, partition number etc.
* @param metricName the name of the Kafka metric e.g. "records-lag-max", "fetch-throttle-time-max" etc.
* @return the canonicalized metric name.
*/
protected String canonicalMetricName(String metricGroup, Collection<String> metricTags, String metricName) {
List<String> nameParts = new ArrayList<>();
nameParts.add(metricGroup);
nameParts.addAll(metricTags);
nameParts.add(metricName);
StringBuilder builder = new StringBuilder();
for (String namePart : nameParts) {
builder.append(namePart);
builder.append(".");
}
// Remove the trailing dot.
builder.setLength(builder.length() - 1);
String processedName = builder.toString().replace(' ', '_').replace("\\.", "_");
return processedName;
}
/**
* Get a list of all kafka topics
*/
public abstract List<KafkaTopic> getTopics();
/**
* Get a list of {@link KafkaTopic} with the provided topic names.
* The default implementation lists all the topics.
* Implementations of this class can improve this method.
*/
public Collection<KafkaTopic> getTopics(Collection<String> topics) {
return getTopics();
}
}
| 3,344 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/client/BaseKafkaConsumerRecord.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.client;
import lombok.AllArgsConstructor;
import lombok.EqualsAndHashCode;
import lombok.ToString;
/**
* A base {@link KafkaConsumerRecord} that with offset and valueSizeInBytes
*/
@AllArgsConstructor
@EqualsAndHashCode
@ToString
public abstract class BaseKafkaConsumerRecord implements KafkaConsumerRecord {
private final long offset;
private final long valueSizeInBytes;
private final String topic;
private final int partitionId;
public static final long VALUE_SIZE_UNAVAILABLE = -1l;
@Override
public long getOffset() {
return this.offset;
}
@Override
public long getNextOffset() {
return this.offset + 1l;
}
@Override
public long getValueSizeInBytes() {
return this.valueSizeInBytes;
}
@Override
public int getPartition() {
return this.partitionId;
}
@Override
public String getTopic() {
return this.topic;
}
}
| 3,345 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/client/KafkaConsumerRecord.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.client;
import java.util.concurrent.TimeUnit;
/**
* A kafka message/record consumed from {@link GobblinKafkaConsumerClient}. This interface provides APIs to read message
* metadata. Extension interfaces like {@link DecodeableKafkaRecord} or {@link ByteArrayBasedKafkaRecord} provide APIs
* to read the actual message/record.
*/
public interface KafkaConsumerRecord {
/**
* Offset of this record
*/
public long getOffset();
/**
* Next offset after this record
*/
public long getNextOffset();
/**
* Size of the message in bytes. {@value BaseKafkaConsumerRecord#VALUE_SIZE_UNAVAILABLE} if kafka-client version
* does not provide size (like Kafka 09 clients)
*/
public long getValueSizeInBytes();
/**
* @return the timestamp of the underlying ConsumerRecord.
*/
public default long getTimestamp() { return 0; }
/**
* @return true if the timestamp in the ConsumerRecord is the timestamp when the record is written to Kafka.
*/
public default boolean isTimestampLogAppend() {
return false;
}
/**
* @return Partition id for this record
*/
int getPartition();
/**
* @return topic for this record
*/
String getTopic();
/**
* @param fieldName the field name containing the record creation time.
* @param timeUnit the timeunit for the timestamp field.
* @return the record creation timestamp, if it is available. Defaults to 0.
*/
public default long getRecordCreationTimestamp(String fieldName, TimeUnit timeUnit) { return 0; }
}
| 3,346 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/client/GobblinKafkaConsumerClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.client;
import java.io.Closeable;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.regex.Pattern;
import com.codahale.metrics.Metric;
import com.google.common.collect.Maps;
import com.typesafe.config.Config;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaOffsetRetrievalFailureException;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaPartition;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaTopic;
import org.apache.gobblin.util.DatasetFilterUtils;
/**
* A simplified, generic wrapper client to communicate with Kafka. This class is (AND MUST never) depend on classes
* defined in kafka-clients library. The request and response objects are defined in gobblin. This allows gobblin
* sources and extractors to work with different versions of kafka. Concrete classes implementing this interface use a
* specific version of kafka-client library.
*
* <p>
* This simplified client interface supports consumer operations required by gobblin to pull from kafka. Most of the APIs
* are migrated from the legacy gobblin.source.extractor.extract.kafka.KafkaWrapper$KafkaAPI
* </p>
*/
public interface GobblinKafkaConsumerClient extends Closeable {
/**
* Get a list of {@link KafkaTopic}s satisfy <code>blacklist</code> and <code>whitelist</code> patterns
* @param blacklist - List of regex patterns that need to be blacklisted
* @param whitelist - List of regex patterns that need to be whitelisted
*
* @see DatasetFilterUtils#survived(String, List, List)
*
*/
public List<KafkaTopic> getFilteredTopics(List<Pattern> blacklist, List<Pattern> whitelist);
/**
* Get the earliest available offset for a <code>partition</code>
*
* @param partition for which earliest offset is retrieved
*
* @throws UnsupportedOperationException - If the underlying kafka-client does not support getting earliest offset
*/
public long getEarliestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException;
/**
* Get the latest available offset for a <code>partition</code>
*
* @param partition for which latest offset is retrieved
*
* @throws UnsupportedOperationException - If the underlying kafka-client does not support getting latest offset
*/
public long getLatestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException;
/**
* Get the latest available offset for a {@link Collection} of {@link KafkaPartition}s. NOTE: The default implementation
* is not efficient i.e. it will make a getLatestOffset() call for every {@link KafkaPartition}. Individual implementations
* of {@link GobblinKafkaConsumerClient} should override this method to use more advanced APIs of the underlying KafkaConsumer
* to retrieve the latest offsets for a collection of partitions.
*
* @param partitions for which latest offset is retrieved
*
* @throws KafkaOffsetRetrievalFailureException - If the underlying kafka-client does not support getting latest offset
*/
public default Map<KafkaPartition, Long> getLatestOffsets(Collection<KafkaPartition> partitions)
throws KafkaOffsetRetrievalFailureException {
Map<KafkaPartition, Long> offsetMap = Maps.newHashMap();
for (KafkaPartition partition: partitions) {
offsetMap.put(partition, getLatestOffset(partition));
}
return offsetMap;
}
/**
* API to consume records from kakfa starting from <code>nextOffset</code> till <code>maxOffset</code>.
* If <code>nextOffset</code> is greater than <code>maxOffset</code>, returns a null.
* <code>nextOffset</code>
* <p>
* <b>NOTE:</b> If the underlying kafka-client version does not support
* reading message till an end offset, all available messages are read, <code>maxOffset</code> is ignored.
* </p>
*
* @param partition whose messages are read
* @param nextOffset to being reading
* @param maxOffset to stop reading (Iff underlying client supports)
*
* @return An {@link Iterator} of {@link KafkaConsumerRecord}s
*/
public Iterator<KafkaConsumerRecord> consume(KafkaPartition partition, long nextOffset, long maxOffset);
/**
* API to consume records from kakfa
* @return
*/
default Iterator<KafkaConsumerRecord> consume() {
return Collections.emptyIterator();
}
/**
* Subscribe to a topic
* @param topic
*/
default void subscribe(String topic) {
return;
}
/**
* Subscribe to a topic along with a GobblinKafkaRebalanceListener
* @param topic
*/
default void subscribe(String topic, GobblinConsumerRebalanceListener listener) {
return;
}
/**
* API to return underlying Kafka consumer metrics. The individual implementations must translate
* org.apache.kafka.common.Metric to Coda Hale Metrics. A typical use case for reporting the consumer metrics
* will call this method inside a scheduled thread.
* @return
*/
public default Map<String, Metric> getMetrics() {
return Maps.newHashMap();
}
/**
* Commit offsets manually to Kafka asynchronously
*/
default void commitOffsetsAsync(Map<KafkaPartition, Long> partitionOffsets) {
return;
}
/**
* Commit offsets manually to Kafka synchronously
*/
default void commitOffsetsSync(Map<KafkaPartition, Long> partitionOffsets) {
return;
}
/**
* returns the last committed offset for a KafkaPartition
* @param partition
* @return last committed offset or -1 for invalid KafkaPartition
*/
default long committed(KafkaPartition partition) {
return -1L;
}
public default void assignAndSeek(List<KafkaPartition> topicPartitions, Map<KafkaPartition, LongWatermark> topicWatermarksMap) { return; }
/**
* A factory to create {@link GobblinKafkaConsumerClient}s
*/
public interface GobblinKafkaConsumerClientFactory {
/**
* Creates a new {@link GobblinKafkaConsumerClient} for <code>config</code>
*
*/
public GobblinKafkaConsumerClient create(Config config);
}
}
| 3,347 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/jmh/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-common/src/jmh/java/org/apache/gobblin/source/extractor/extract/kafka/HdrHistogramPerformanceBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.util.concurrent.TimeUnit;
import java.util.stream.IntStream;
import org.HdrHistogram.Histogram;
import org.apache.commons.math3.random.RandomDataGenerator;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.options.ChainedOptionsBuilder;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import lombok.extern.slf4j.Slf4j;
/**
* A micro-benchmark to measure the time taken to serialize a {@link Histogram} instance to its String representation. The
* benchmark uses a Random number generator to generate values according to a Uniform Distribution, an adversarial pattern
* for a Histogram that is likely to produce more count buckets in comparison with a skewed distribution. The benchmark
* provides an upper bound on memory footprint of the histogram, serialization time, as well as the size of the
* serialized representation.
*/
@Warmup (iterations = 3)
@Measurement (iterations = 10)
@BenchmarkMode (value = Mode.AverageTime)
@Fork (value = 1)
@OutputTimeUnit (TimeUnit.MILLISECONDS)
@Slf4j
public class HdrHistogramPerformanceBenchmark {
@State (value = Scope.Benchmark)
public static class HistogramState {
private static long MIN_VALUE = 1;
private static long MAX_VALUE = TimeUnit.HOURS.toMillis(24);
private Histogram histogram1;
private Histogram histogram2;
private Histogram histogram3;
private Histogram histogram4;
private final RandomDataGenerator random = new RandomDataGenerator();
@Setup (value = Level.Iteration)
public void setUp() {
this.histogram1 = buildHistogram(1000000);
this.histogram2 = buildHistogram(2000000);
this.histogram3 = buildHistogram(4000000);
this.histogram4 = buildHistogram(10000000);
}
private Histogram buildHistogram(int size) {
Histogram histogram = new Histogram(MIN_VALUE, MAX_VALUE, 3);
IntStream.range(0, size).mapToLong(i -> random.nextLong(MIN_VALUE, MAX_VALUE))
.forEachOrdered(histogram::recordValue);
System.out.println("Estimated memory footprint of histogram is: " + histogram.getEstimatedFootprintInBytes());
return histogram;
}
@TearDown (value = Level.Iteration)
public void tearDown() {
this.histogram1.reset();
this.histogram2.reset();
this.histogram3.reset();
this.histogram4.reset();
}
}
@Benchmark
public String trackHistogram1MToStringConversion(HistogramState histogramState) {
String histogramString = KafkaExtractorStatsTracker.convertHistogramToString(histogramState.histogram1);
System.out.println("Histogram serialized string size: " + histogramString.length());
return histogramString;
}
@Benchmark
public String trackHistogram2MToStringConversion(HistogramState histogramState) {
String histogramString = KafkaExtractorStatsTracker.convertHistogramToString(histogramState.histogram2);
System.out.println("Histogram serialized string size: " + histogramString.length());
return histogramString;
}
@Benchmark
public String trackHistogram4MToStringConversion(HistogramState histogramState) {
String histogramString = KafkaExtractorStatsTracker.convertHistogramToString(histogramState.histogram3);
System.out.println("Histogram serialized string size: " + histogramString.length());
return histogramString;
}
@Benchmark
public String trackHistogram10MToStringConversion(HistogramState histogramState) {
String histogramString = KafkaExtractorStatsTracker.convertHistogramToString(histogramState.histogram4);
System.out.println("Histogram serialized string size: " + histogramString.length());
return histogramString;
}
@Benchmark
public Histogram trackMergeHistogram(HistogramState histogramState) {
Histogram histogram = new Histogram(histogramState.MIN_VALUE, histogramState.MAX_VALUE, 3);
histogram.add(histogramState.histogram1);
histogram.add(histogramState.histogram2);
histogram.add(histogramState.histogram3);
histogram.add(histogramState.histogram4);
return histogram;
}
@Benchmark
public Histogram trackBuildHistogram(HistogramState histogramState) {
Histogram histogram = new Histogram(histogramState.MIN_VALUE, histogramState.MAX_VALUE, 3);
return histogram;
}
@Benchmark
public void trackResetHistogram(HistogramState histogramState, Blackhole blackhole) {
int dummyVal = 1;
histogramState.histogram4.reset();
blackhole.consume(dummyVal);
}
public static void main(String[] args) throws Exception {
ChainedOptionsBuilder opt = new OptionsBuilder()
.include(HdrHistogramPerformanceBenchmark.class.getSimpleName())
.warmupIterations(3)
.measurementIterations(10);
new Runner(opt.build()).run();
}
}
| 3,348 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/test/java/org/apache/gobblin/writer/GobblinOrcWriterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.Decoder;
import org.apache.avro.io.DecoderFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Writable;
import org.apache.orc.OrcConf;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.io.Closer;
import com.google.common.io.Files;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.source.workunit.WorkUnit;
import static org.apache.gobblin.writer.GenericRecordToOrcValueWriterTest.deserializeOrcRecords;
import static org.mockito.Mockito.when;
/**
* For running these tests in IDE, make sure all ORC libraries existed in the external library folder are specified
* with "nohive" classifier if they do (orc-core)
*/
public class GobblinOrcWriterTest {
public static final List<GenericRecord> deserializeAvroRecords(Class clazz, Schema schema, String schemaPath)
throws IOException {
List<GenericRecord> records = new ArrayList<>();
GenericDatumReader<GenericRecord> reader = new GenericDatumReader<>(schema);
InputStream dataInputStream = clazz.getClassLoader().getResourceAsStream(schemaPath);
Decoder decoder = DecoderFactory.get().jsonDecoder(schema, dataInputStream);
GenericRecord recordContainer = reader.read(null, decoder);
;
try {
while (recordContainer != null) {
records.add(recordContainer);
recordContainer = reader.read(null, decoder);
}
} catch (IOException ioe) {
dataInputStream.close();
}
return records;
}
/**
* A basic unit for trivial writer correctness.
* TODO: A detailed test suite of ORC-writer for different sorts of schema:
*/
@Test
public void testWrite() throws Exception {
Schema schema =
new Schema.Parser().parse(this.getClass().getClassLoader().getResourceAsStream("orc_writer_test/schema.avsc"));
List<GenericRecord> recordList = deserializeAvroRecords(this.getClass(), schema, "orc_writer_test/data.json");
// Mock WriterBuilder, bunch of mocking behaviors to work-around precondition checks in writer builder
FsDataWriterBuilder<Schema, GenericRecord> mockBuilder =
(FsDataWriterBuilder<Schema, GenericRecord>) Mockito.mock(FsDataWriterBuilder.class);
when(mockBuilder.getSchema()).thenReturn(schema);
State dummyState = new WorkUnit();
String stagingDir = Files.createTempDir().getAbsolutePath();
String outputDir = Files.createTempDir().getAbsolutePath();
dummyState.setProp(ConfigurationKeys.WRITER_STAGING_DIR, stagingDir);
dummyState.setProp(ConfigurationKeys.WRITER_FILE_PATH, "simple");
dummyState.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, outputDir);
when(mockBuilder.getFileName(dummyState)).thenReturn("file");
Path outputFilePath = new Path(outputDir, "simple/file");
// Having a closer to manage the life-cycle of the writer object.
// Will verify if scenarios like double-close could survive.
Closer closer = Closer.create();
GobblinOrcWriter orcWriter = closer.register(new GobblinOrcWriter(mockBuilder, dummyState));
// Create one more writer to test fail-case.
GobblinOrcWriter orcFailWriter = new GobblinOrcWriter(mockBuilder, dummyState);
for (GenericRecord record : recordList) {
orcWriter.write(record);
orcFailWriter.write(record);
}
// Not yet flushed or reaching default batch size, no records should have been materialized.
Assert.assertEquals(orcWriter.recordsWritten(), 0);
Assert.assertEquals(orcFailWriter.recordsWritten(), 0);
// Try close, should catch relevant CloseBeforeFlushException
try {
orcFailWriter.close();
} catch (CloseBeforeFlushException e) {
Assert.assertEquals(e.datasetName, schema.getName());
}
orcWriter.commit();
Assert.assertEquals(orcWriter.recordsWritten(), 2);
// Verify ORC file contains correct records.
FileSystem fs = FileSystem.getLocal(new Configuration());
Assert.assertTrue(fs.exists(outputFilePath));
List<Writable> orcRecords = deserializeOrcRecords(outputFilePath, fs);
Assert.assertEquals(orcRecords.size(), 2);
// Double-close without protection of org.apache.gobblinGobblinOrcWriter#closed
// leads to NPE within org.apache.orc.impl.PhysicalFsWriter.writeFileMetadata. Try removing protection condition
// in close method implementation if want to verify.
try {
closer.close();
} catch (NullPointerException npe) {
Assert.fail();
}
}
@Test
public void testSelfTuneRowBatchSizeIncrease() throws Exception {
Schema schema =
new Schema.Parser().parse(this.getClass().getClassLoader().getResourceAsStream("orc_writer_test/schema.avsc"));
List<GenericRecord> recordList = deserializeAvroRecords(this.getClass(), schema, "orc_writer_test/data_multi.json");
// Mock WriterBuilder, bunch of mocking behaviors to work-around precondition checks in writer builder
FsDataWriterBuilder<Schema, GenericRecord> mockBuilder =
(FsDataWriterBuilder<Schema, GenericRecord>) Mockito.mock(FsDataWriterBuilder.class);
when(mockBuilder.getSchema()).thenReturn(schema);
State dummyState = new WorkUnit();
String stagingDir = Files.createTempDir().getAbsolutePath();
String outputDir = Files.createTempDir().getAbsolutePath();
dummyState.setProp(ConfigurationKeys.WRITER_STAGING_DIR, stagingDir);
dummyState.setProp(ConfigurationKeys.WRITER_FILE_PATH, "selfTune");
dummyState.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, outputDir);
dummyState.setProp(GobblinOrcWriterConfigs.ORC_WRITER_AUTO_SELFTUNE_ENABLED, "true");
when(mockBuilder.getFileName(dummyState)).thenReturn("file");
Path outputFilePath = new Path(outputDir, "selfTune/file");
// Having a closer to manage the life-cycle of the writer object.
Closer closer = Closer.create();
GobblinOrcWriter orcWriter = closer.register(new GobblinOrcWriter(mockBuilder, dummyState));
// Initialize the rowBatch such that it should store all records
orcWriter.rowBatch.ensureSize(5);
orcWriter.batchSize=5;
for (GenericRecord record : recordList) {
orcWriter.write(record);
}
// Force the batchSize to increase, lets ensure that the records are not lost in the rowBatch
orcWriter.tuneBatchSize(1);
Assert.assertFalse(orcWriter.batchSize == 5);
Assert.assertTrue(orcWriter.rowBatch.size == 0, "Expected the row batch to be flushed to preserve data");
// Not yet flushed in ORC
Assert.assertEquals(orcWriter.recordsWritten(), 0);
orcWriter.commit();
Assert.assertEquals(orcWriter.recordsWritten(), 4);
// Verify ORC file contains correct records.
FileSystem fs = FileSystem.getLocal(new Configuration());
Assert.assertTrue(fs.exists(outputFilePath));
List<Writable> orcRecords = deserializeOrcRecords(outputFilePath, fs);
Assert.assertEquals(orcRecords.size(), 4);
}
@Test
public void testSelfTuneRowBatchSizeDecrease() throws Exception {
Schema schema =
new Schema.Parser().parse(this.getClass().getClassLoader().getResourceAsStream("orc_writer_test/schema.avsc"));
List<GenericRecord> recordList = deserializeAvroRecords(this.getClass(), schema, "orc_writer_test/data_multi.json");
// Mock WriterBuilder, bunch of mocking behaviors to work-around precondition checks in writer builder
FsDataWriterBuilder<Schema, GenericRecord> mockBuilder =
(FsDataWriterBuilder<Schema, GenericRecord>) Mockito.mock(FsDataWriterBuilder.class);
when(mockBuilder.getSchema()).thenReturn(schema);
State dummyState = new WorkUnit();
String stagingDir = Files.createTempDir().getAbsolutePath();
String outputDir = Files.createTempDir().getAbsolutePath();
dummyState.setProp(ConfigurationKeys.WRITER_STAGING_DIR, stagingDir);
dummyState.setProp(ConfigurationKeys.WRITER_FILE_PATH, "selfTune");
dummyState.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, outputDir);
dummyState.setProp(GobblinOrcWriterConfigs.ORC_WRITER_AUTO_SELFTUNE_ENABLED, "true");
dummyState.setProp(GobblinOrcWriterConfigs.ORC_WRITER_AUTO_SELFTUNE_ROWS_BETWEEN_CHECK, "1");
when(mockBuilder.getFileName(dummyState)).thenReturn("file");
Path outputFilePath = new Path(outputDir, "selfTune/file");
// Having a closer to manage the life-cycle of the writer object.
Closer closer = Closer.create();
GobblinOrcWriter orcWriter = closer.register(new GobblinOrcWriter(mockBuilder, dummyState));
// Force a larger initial batchSize that can be tuned down
orcWriter.batchSize = 10;
orcWriter.rowBatch.ensureSize(10);
for (GenericRecord record : recordList) {
orcWriter.write(record);
}
// Force the batchSize to decrease
orcWriter.tuneBatchSize(1000000000);
Assert.assertTrue(orcWriter.batchSize == 1);
Assert.assertTrue(orcWriter.rowBatch.size == 0, "Expected the row batch to be flushed to preserve data");
// Not yet flushed in ORC
Assert.assertEquals(orcWriter.recordsWritten(), 0);
orcWriter.commit();
Assert.assertEquals(orcWriter.recordsWritten(), 4);
// Verify ORC file contains correct records.
FileSystem fs = FileSystem.getLocal(new Configuration());
Assert.assertTrue(fs.exists(outputFilePath));
List<Writable> orcRecords = deserializeOrcRecords(outputFilePath, fs);
Assert.assertEquals(orcRecords.size(), 4);
}
@Test
public void testSelfTuneRowBatchCalculation() throws Exception {
Schema schema =
new Schema.Parser().parse(this.getClass().getClassLoader().getResourceAsStream("orc_writer_test/schema.avsc"));
List<GenericRecord> recordList = deserializeAvroRecords(this.getClass(), schema, "orc_writer_test/data_multi.json");
// Mock WriterBuilder, bunch of mocking behaviors to work-around precondition checks in writer builder
FsDataWriterBuilder<Schema, GenericRecord> mockBuilder =
(FsDataWriterBuilder<Schema, GenericRecord>) Mockito.mock(FsDataWriterBuilder.class);
when(mockBuilder.getSchema()).thenReturn(schema);
State dummyState = new WorkUnit();
String stagingDir = Files.createTempDir().getAbsolutePath();
String outputDir = Files.createTempDir().getAbsolutePath();
dummyState.setProp(ConfigurationKeys.WRITER_STAGING_DIR, stagingDir);
dummyState.setProp(ConfigurationKeys.WRITER_FILE_PATH, "selfTune");
dummyState.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, outputDir);
dummyState.setProp(GobblinOrcWriterConfigs.ORC_WRITER_AUTO_SELFTUNE_ENABLED, "true");
dummyState.setProp(OrcConf.STRIPE_SIZE.getAttribute(), "100");
when(mockBuilder.getFileName(dummyState)).thenReturn("file");
// Having a closer to manage the life-cycle of the writer object.
Closer closer = Closer.create();
GobblinOrcWriter orcWriter = closer.register(new GobblinOrcWriter(mockBuilder, dummyState));
// Force a larger initial batchSize that can be tuned down
orcWriter.batchSize = 10;
orcWriter.rowBatch.ensureSize(10);
orcWriter.availableMemory = 100000000;
// Given the amount of available memory and a low stripe size, and estimated rowBatchSize, the resulting batchsize should be maxed out
orcWriter.tuneBatchSize(10);
System.out.println(orcWriter.batchSize);
// Take into account that increases in batchsize are multiplied by a factor to prevent large jumps in batchsize
Assert.assertTrue(orcWriter.batchSize == (GobblinOrcWriterConfigs.DEFAULT_ORC_WRITER_BATCH_SIZE+10)/2);
orcWriter.availableMemory = 100;
orcWriter.tuneBatchSize(10);
// Given that the amount of available memory is low, the resulting batchsize should be 1
Assert.assertTrue(orcWriter.batchSize == 1);
orcWriter.availableMemory = 10000;
orcWriter.rowBatch.ensureSize(10000);
// Since the rowBatch is large, the resulting batchsize should still be 1 even with more memory
orcWriter.tuneBatchSize(10);
Assert.assertTrue(orcWriter.batchSize == 1);
}
@Test
public void testStatePersistenceWhenClosingWriter() throws IOException {
Schema schema =
new Schema.Parser().parse(this.getClass().getClassLoader().getResourceAsStream("orc_writer_test/schema.avsc"));
List<GenericRecord> recordList = deserializeAvroRecords(this.getClass(), schema, "orc_writer_test/data_multi.json");
// Mock WriterBuilder, bunch of mocking behaviors to work-around precondition checks in writer builder
FsDataWriterBuilder<Schema, GenericRecord> mockBuilder =
(FsDataWriterBuilder<Schema, GenericRecord>) Mockito.mock(FsDataWriterBuilder.class);
when(mockBuilder.getSchema()).thenReturn(schema);
State dummyState = new WorkUnit();
String stagingDir = Files.createTempDir().getAbsolutePath();
String outputDir = Files.createTempDir().getAbsolutePath();
dummyState.setProp(ConfigurationKeys.WRITER_STAGING_DIR, stagingDir);
dummyState.setProp(ConfigurationKeys.WRITER_FILE_PATH, "selfTune");
dummyState.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, outputDir);
dummyState.setProp(GobblinOrcWriterConfigs.ORC_WRITER_AUTO_SELFTUNE_ENABLED, "true");
dummyState.setProp(OrcConf.STRIPE_SIZE.getAttribute(), "100");
when(mockBuilder.getFileName(dummyState)).thenReturn("file");
// Having a closer to manage the life-cycle of the writer object.
Closer closer = Closer.create();
GobblinOrcWriter orcWriter = closer.register(new GobblinOrcWriter(mockBuilder, dummyState));
for (GenericRecord record : recordList) {
orcWriter.write(record);
}
// Hard code the batchsize here as tuning batch size is dependent on the runtime environment
orcWriter.batchSize = 10;
orcWriter.commit();
Assert.assertEquals(dummyState.getProp(GobblinOrcWriterConfigs.RuntimeStateConfigs.ORC_WRITER_ESTIMATED_RECORD_SIZE), "9");
Assert.assertEquals(dummyState.getProp(GobblinOrcWriterConfigs.RuntimeStateConfigs.ORC_WRITER_PREVIOUS_BATCH_SIZE), "10");
Assert.assertEquals(dummyState.getProp(GobblinOrcWriterConfigs.RuntimeStateConfigs.ORC_WRITER_ESTIMATED_BYTES_ALLOCATED_CONVERTER_MEMORY), "18000");
Assert.assertNotNull(dummyState.getProp(GobblinOrcWriterConfigs.RuntimeStateConfigs.ORC_WRITER_NATIVE_WRITER_MEMORY));
Assert.assertNotNull(OrcConf.ROWS_BETWEEN_CHECKS.getAttribute());
}
} | 3,349 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/test/java/org/apache/gobblin/writer/OrcConverterMemoryManagerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.util.List;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.orc.TypeDescription;
import org.apache.orc.storage.ql.exec.vector.VectorizedRowBatch;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.orc.AvroOrcSchemaConverter;
public class OrcConverterMemoryManagerTest {
@Test
public void testBufferSizeCalculationResize()
throws Exception {
Schema schema =
new Schema.Parser().parse(this.getClass().getClassLoader().getResourceAsStream("list_map_test/schema.avsc"));
TypeDescription orcSchema = AvroOrcSchemaConverter.getOrcSchema(schema);
// Make batch size small so that the enlarge behavior would easily be triggered.
VectorizedRowBatch rowBatch = orcSchema.createRowBatch(10);
OrcConverterMemoryManager memoryManager = new OrcConverterMemoryManager(rowBatch, new State());
GenericRecordToOrcValueWriter valueWriter = new GenericRecordToOrcValueWriter(orcSchema, schema, memoryManager);
List<GenericRecord> recordList = GobblinOrcWriterTest
.deserializeAvroRecords(this.getClass(), schema, "list_map_test/data.json");
Assert.assertEquals(recordList.size(), 6);
for (GenericRecord record : recordList) {
valueWriter.write(record, rowBatch);
}
// Expected size is the size of the lists, map keys and map vals after resize. Since there are 6 records, and each array/map have at least 2 elements, then
// One resize is performed when the respective list/maps exceed the initial size of 10, in this case 12.
// So the resized total length would be 12*3 for the list, map keys and map vals, with 8 bytes per value .
int expectedSize = 36 * 9 + 36 * 9 + 36 * 9 + 10*2;
Assert.assertEquals(memoryManager.getConverterBufferTotalSize(), expectedSize);
}
@Test
public void testBufferSizeCalculatedDeepNestedList() throws Exception {
Schema schema =
new Schema.Parser().parse(this.getClass().getClassLoader().getResourceAsStream("converter_memory_manager_nested_test/schema.avsc"));
TypeDescription orcSchema = AvroOrcSchemaConverter.getOrcSchema(schema);
// Make batch such that only deeply nested list is resized
VectorizedRowBatch rowBatch = orcSchema.createRowBatch(15);
OrcConverterMemoryManager memoryManager = new OrcConverterMemoryManager(rowBatch, new State());
GenericRecordToOrcValueWriter valueWriter = new GenericRecordToOrcValueWriter(orcSchema, schema, memoryManager);
List<GenericRecord> recordList = GobblinOrcWriterTest
.deserializeAvroRecords(this.getClass(), schema, "converter_memory_manager_nested_test/data.json");
Assert.assertEquals(recordList.size(), 1);
for (GenericRecord record : recordList) {
valueWriter.write(record, rowBatch);
}
// Deeply nested list should be resized once, since it resizes at 30 elements (5+10+15) to 90
// Other fields should not be resized, (map keys and vals, and top level arrays)
// Account for size of top level arrays that should be small
int expectedSize = 30*3*9 + 30*9 + 15*4; // Deeply nested list + maps + other structure overhead
Assert.assertEquals(memoryManager.getConverterBufferTotalSize(), expectedSize);
}
@Test
public void testBufferSmartResize() throws Exception {
Schema schema =
new Schema.Parser().parse(this.getClass().getClassLoader().getResourceAsStream("converter_memory_manager_nested_test/schema.avsc"));
TypeDescription orcSchema = AvroOrcSchemaConverter.getOrcSchema(schema);
// Make batch such that only deeply nested list is resized
VectorizedRowBatch rowBatch = orcSchema.createRowBatch(15);
State memoryManagerState = new State();
memoryManagerState.setProp(GobblinOrcWriterConfigs.ENABLE_SMART_ARRAY_ENLARGE, "true");
memoryManagerState.setProp(GobblinOrcWriterConfigs.SMART_ARRAY_ENLARGE_DECAY_FACTOR, "0.5");
memoryManagerState.setProp(GobblinOrcWriterConfigs.SMART_ARRAY_ENLARGE_FACTOR_MAX, "10");
memoryManagerState.setProp(GobblinOrcWriterConfigs.SMART_ARRAY_ENLARGE_FACTOR_MIN, "1");
OrcConverterMemoryManager memoryManager = new OrcConverterMemoryManager(rowBatch, memoryManagerState);
int result = memoryManager.resize(1, 1000);
Assert.assertEquals(result, 10000);
// Result is equal to requested size since the decay factor dominates the resize
result = memoryManager.resize(100, 1000);
Assert.assertEquals(result, 1000);
}
@Test
public void testBufferSmartResizeParameters() throws Exception {
Schema schema =
new Schema.Parser().parse(this.getClass().getClassLoader().getResourceAsStream("converter_memory_manager_nested_test/schema.avsc"));
TypeDescription orcSchema = AvroOrcSchemaConverter.getOrcSchema(schema);
// Make batch such that only deeply nested list is resized
VectorizedRowBatch rowBatch = orcSchema.createRowBatch(15);
State memoryManagerState0 = new State();
memoryManagerState0.setProp(GobblinOrcWriterConfigs.ENABLE_SMART_ARRAY_ENLARGE, "true");
memoryManagerState0.setProp(GobblinOrcWriterConfigs.SMART_ARRAY_ENLARGE_DECAY_FACTOR, "0.5");
memoryManagerState0.setProp(GobblinOrcWriterConfigs.SMART_ARRAY_ENLARGE_FACTOR_MAX, "0");
memoryManagerState0.setProp(GobblinOrcWriterConfigs.SMART_ARRAY_ENLARGE_FACTOR_MIN, "1");
Assert.assertThrows(IllegalArgumentException.class, () -> new OrcConverterMemoryManager(rowBatch, memoryManagerState0));
State memoryManagerState1 = new State();
memoryManagerState1.setProp(GobblinOrcWriterConfigs.ENABLE_SMART_ARRAY_ENLARGE, "true");
memoryManagerState1.setProp(GobblinOrcWriterConfigs.SMART_ARRAY_ENLARGE_DECAY_FACTOR, "0.5");
memoryManagerState1.setProp(GobblinOrcWriterConfigs.SMART_ARRAY_ENLARGE_FACTOR_MAX, "1");
memoryManagerState1.setProp(GobblinOrcWriterConfigs.SMART_ARRAY_ENLARGE_FACTOR_MIN, "0");
Assert.assertThrows(IllegalArgumentException.class, () -> new OrcConverterMemoryManager(rowBatch, memoryManagerState1));
State memoryManagerState2 = new State();
memoryManagerState2.setProp(GobblinOrcWriterConfigs.ENABLE_SMART_ARRAY_ENLARGE, "true");
memoryManagerState2.setProp(GobblinOrcWriterConfigs.SMART_ARRAY_ENLARGE_DECAY_FACTOR, "1.5");
memoryManagerState2.setProp(GobblinOrcWriterConfigs.SMART_ARRAY_ENLARGE_FACTOR_MAX, "1");
memoryManagerState2.setProp(GobblinOrcWriterConfigs.SMART_ARRAY_ENLARGE_FACTOR_MIN, "1");
Assert.assertThrows(IllegalArgumentException.class, () -> new OrcConverterMemoryManager(rowBatch, memoryManagerState2));
}
}
| 3,350 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/test/java/org/apache/gobblin/writer/GobblinBaseOrcWriterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.orc.FileFormatException;
import org.apache.orc.OrcFile;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.AfterTest;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.Test;
import com.google.common.io.Files;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.event.GobblinEventBuilder;
import static org.apache.gobblin.writer.GobblinBaseOrcWriter.CORRUPTED_ORC_FILE_DELETION_EVENT;
import static org.mockito.MockitoAnnotations.openMocks;
public class GobblinBaseOrcWriterTest {
Configuration conf;
FileSystem fs;
File tmpDir;
File orcFile;
Path orcFilePath;
@Mock
MetricContext mockContext;
AutoCloseable closeable;
@BeforeTest
public void setup() throws IOException {
this.closeable = openMocks(this);
this.conf = new Configuration();
this.fs = FileSystem.getLocal(conf);
this.tmpDir = Files.createTempDir();
this.orcFile = new File(tmpDir, "test.orc");
this.orcFilePath = new Path(orcFile.getAbsolutePath());
}
@AfterTest
public void tearDown()
throws Exception {
this.closeable.close();
}
@Test
public void testOrcValidationOnlyHeader()
throws IOException {
try (FileWriter writer = new FileWriter(orcFile)) {
// writer a corrupted ORC file that only contains thethe header
writer.write(OrcFile.MAGIC);
}
Assert.assertThrows(FileFormatException.class, () -> GobblinBaseOrcWriter.assertOrcFileIsValid(
fs, orcFilePath, new OrcFile.ReaderOptions(conf), mockContext));
GobblinEventBuilder eventBuilder = new GobblinEventBuilder(CORRUPTED_ORC_FILE_DELETION_EVENT, GobblinBaseOrcWriter.ORC_WRITER_NAMESPACE);
eventBuilder.addMetadata("filePath", orcFilePath.toString());
eventBuilder.addMetadata("exceptionType", "org.apache.orc.FileFormatException");
eventBuilder.addMetadata("exceptionMessage", String.format("Not a valid ORC file %s (maxFileLength= 9223372036854775807)", orcFilePath));
Mockito.verify(mockContext, Mockito.times(1)).submitEvent(eventBuilder.build());
}
@Test
public void testOrcValidationWithContent() throws IOException {
try (FileWriter writer = new FileWriter(orcFile)) {
// write a corrupted ORC file that only contains the header and invalid protobuf content
writer.write(OrcFile.MAGIC);
writer.write("\n");
}
Assert.assertThrows(com.google.protobuf25.InvalidProtocolBufferException.class,
() -> GobblinBaseOrcWriter.assertOrcFileIsValid(fs, orcFilePath, new OrcFile.ReaderOptions(conf), mockContext));
GobblinEventBuilder eventBuilder = new GobblinEventBuilder(CORRUPTED_ORC_FILE_DELETION_EVENT, GobblinBaseOrcWriter.ORC_WRITER_NAMESPACE);
eventBuilder.addMetadata("filePath", orcFilePath.toString());
eventBuilder.addMetadata("exceptionType", "com.google.protobuf25.InvalidProtocolBufferException");
eventBuilder.addMetadata("exceptionMessage", "Protocol message tag had invalid wire type.");
Mockito.verify(mockContext, Mockito.times(1))
.submitEvent(eventBuilder.build());
}
}
| 3,351 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/test/java/org/apache/gobblin/writer/GenericRecordToOrcValueWriterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.orc.OrcFile;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.apache.orc.mapred.OrcStruct;
import org.apache.orc.mapred.OrcUnion;
import org.apache.orc.storage.ql.exec.vector.VectorizedRowBatch;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.io.Files;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.orc.AvroOrcSchemaConverter;
import static org.apache.orc.mapred.OrcMapredRecordReader.nextValue;
@Slf4j
public class GenericRecordToOrcValueWriterTest {
@Test
public void testUnionRecordConversionWriter()
throws Exception {
Schema schema =
new Schema.Parser().parse(this.getClass().getClassLoader().getResourceAsStream("union_test/schema.avsc"));
TypeDescription orcSchema = AvroOrcSchemaConverter.getOrcSchema(schema);
VectorizedRowBatch rowBatch = orcSchema.createRowBatch();
OrcConverterMemoryManager memoryManager = new OrcConverterMemoryManager(rowBatch, new State());
GenericRecordToOrcValueWriter valueWriter = new GenericRecordToOrcValueWriter(orcSchema, schema, memoryManager);
List<GenericRecord> recordList = GobblinOrcWriterTest
.deserializeAvroRecords(this.getClass(), schema, "union_test/data.json");
for (GenericRecord record : recordList) {
valueWriter.write(record, rowBatch);
}
// Flush RowBatch into disk.
File tempFile = new File(Files.createTempDir(), "orc");
tempFile.deleteOnExit();
Path filePath = new Path(tempFile.getAbsolutePath());
OrcFile.WriterOptions options = OrcFile.writerOptions(new Properties(), new Configuration());
options.setSchema(orcSchema);
Writer orcFileWriter = OrcFile.createWriter(filePath, options);
orcFileWriter.addRowBatch(rowBatch);
orcFileWriter.close();
// Load it back and compare.
FileSystem fs = FileSystem.get(new Configuration());
List<Writable> orcRecords = deserializeOrcRecords(filePath, fs);
Assert.assertEquals(orcRecords.size(), 5);
// Knowing all of them are OrcStruct<OrcUnion>, save the effort to recursively convert GenericRecord to OrcStruct
// for comprehensive comparison which is non-trivial,
// although it is also theoretically possible and optimal way for doing this unit test.
List<OrcUnion> unionList = orcRecords.stream().map(this::getUnionFieldFromStruct).collect(Collectors.toList());
// Constructing all OrcUnion and verify all of them appears in unionList.
TypeDescription unionSchema = orcSchema.getChildren().get(0);
OrcUnion union_0 = new OrcUnion(unionSchema);
union_0.set((byte) 0, new Text("urn:li:member:3"));
Assert.assertTrue(unionList.contains(union_0));
OrcUnion union_1 = new OrcUnion(unionSchema);
union_1.set((byte) 0, new Text("urn:li:member:4"));
Assert.assertTrue(unionList.contains(union_1));
OrcUnion union_2 = new OrcUnion(unionSchema);
union_2.set((byte) 1, new IntWritable(2));
Assert.assertTrue(unionList.contains(union_2));
OrcUnion union_3 = new OrcUnion(unionSchema);
union_3.set((byte) 1, new IntWritable(1));
Assert.assertTrue(unionList.contains(union_3));
OrcUnion union_4 = new OrcUnion(unionSchema);
union_4.set((byte) 1, new IntWritable(3));
Assert.assertTrue(unionList.contains(union_4));
}
@Test
public void testDecimalRecordConversionWriter()
throws Exception {
Schema schema =
new Schema.Parser().parse(this.getClass().getClassLoader().getResourceAsStream("decimal_test/schema.avsc"));
TypeDescription orcSchema = AvroOrcSchemaConverter.getOrcSchema(schema);
VectorizedRowBatch rowBatch = orcSchema.createRowBatch();
OrcConverterMemoryManager memoryManager = new OrcConverterMemoryManager(rowBatch, new State());
GenericRecordToOrcValueWriter valueWriter = new GenericRecordToOrcValueWriter(orcSchema, schema, memoryManager);
List<GenericRecord> recordList = GobblinOrcWriterTest
.deserializeAvroRecords(this.getClass(), schema, "decimal_test/data.json");
for (GenericRecord record : recordList) {
valueWriter.write(record, rowBatch);
}
// Flush RowBatch into disk.
File tempFile = new File(Files.createTempDir(), "orc");
tempFile.deleteOnExit();
Path filePath = new Path(tempFile.getAbsolutePath());
OrcFile.WriterOptions options = OrcFile.writerOptions(new Properties(), new Configuration());
options.setSchema(orcSchema);
Writer orcFileWriter = OrcFile.createWriter(filePath, options);
orcFileWriter.addRowBatch(rowBatch);
orcFileWriter.close();
// Load it back and compare.
FileSystem fs = FileSystem.get(new Configuration());
List<Writable> orcRecords = deserializeOrcRecords(filePath, fs);
Assert.assertEquals(orcRecords.size(), 2);
Assert.assertEquals(orcRecords.get(0).toString(), "{3.4}");
Assert.assertEquals(orcRecords.get(1).toString(), "{5.97}");
}
@Test
public void testListResize()
throws Exception {
Schema schema =
new Schema.Parser().parse(this.getClass().getClassLoader().getResourceAsStream("list_map_test/schema.avsc"));
TypeDescription orcSchema = AvroOrcSchemaConverter.getOrcSchema(schema);
// Make the batch size very small so that the enlarge behavior would easily be triggered.
// But this has to more than the number of records that we deserialized form data.json, as here we don't reset batch.
VectorizedRowBatch rowBatch = orcSchema.createRowBatch(10);
OrcConverterMemoryManager memoryManager = new OrcConverterMemoryManager(rowBatch, new State());
GenericRecordToOrcValueWriter valueWriter = new GenericRecordToOrcValueWriter(orcSchema, schema, memoryManager);
List<GenericRecord> recordList = GobblinOrcWriterTest
.deserializeAvroRecords(this.getClass(), schema, "list_map_test/data.json");
Assert.assertEquals(recordList.size(), 6);
for (GenericRecord record : recordList) {
valueWriter.write(record, rowBatch);
}
// Examining resize count, which should happen only once for map and list, so totally 2.
Assert.assertEquals(valueWriter.getResizeCount(), 2);
}
@Test
public void testConvertedBytesCalculation()
throws Exception {
Schema schema =
new Schema.Parser().parse(this.getClass().getClassLoader().getResourceAsStream("list_map_test/schema.avsc"));
TypeDescription orcSchema = AvroOrcSchemaConverter.getOrcSchema(schema);
// Make the batch size very small so that the enlarge behavior would easily be triggered.
// But this has to more than the number of records that we deserialized form data.json, as here we don't reset batch.
VectorizedRowBatch rowBatch = orcSchema.createRowBatch(10);
OrcConverterMemoryManager memoryManager = new OrcConverterMemoryManager(rowBatch, new State());
GenericRecordToOrcValueWriter valueWriter = new GenericRecordToOrcValueWriter(orcSchema, schema, memoryManager);
List<GenericRecord> recordList = GobblinOrcWriterTest
.deserializeAvroRecords(this.getClass(), schema, "list_map_test/data.json");
Assert.assertEquals(recordList.size(), 6);
for (GenericRecord record : recordList) {
valueWriter.write(record, rowBatch);
}
// We want to add the sum of the sizes of the elements in the list and map, as well as any isNull values created by resizing the array
long byteSumOfIdList = 4 * 3 * 6;
// Sum of keys + values
long byteSumOfMaps = 1 * 2 * 6 + 4 * 2 * 6;
long expectedBytesConverted = byteSumOfIdList + byteSumOfMaps;
Assert.assertEquals(valueWriter.getTotalBytesConverted(), expectedBytesConverted);
Assert.assertEquals(valueWriter.getTotalRecordsConverted(), 6);
}
/**
* Accessing "fields" using reflection to work-around access modifiers.
*/
private OrcUnion getUnionFieldFromStruct(Writable struct) {
try {
OrcStruct orcStruct = (OrcStruct) struct;
Field objectArr = OrcStruct.class.getDeclaredField("fields");
objectArr.setAccessible(true);
return (OrcUnion) ((Object[]) objectArr.get(orcStruct))[0];
} catch (Exception e) {
throw new RuntimeException("Cannot access with reflection", e);
}
}
public static final List<Writable> deserializeOrcRecords(Path orcFilePath, FileSystem fs)
throws IOException {
org.apache.orc.Reader fileReader = OrcFile.createReader(orcFilePath, new OrcFile.ReaderOptions(new Configuration()));
RecordReader recordReader = fileReader.rows();
TypeDescription schema = fileReader.getSchema();
VectorizedRowBatch batch = schema.createRowBatch();
recordReader.nextBatch(batch);
int rowInBatch = 0;
// result container
List<Writable> orcRecords = new ArrayList<>();
long rowCount = fileReader.getNumberOfRows();
while (rowCount > 0) {
// Deserialize records using Mapreduce-like API
if (schema.getCategory() == TypeDescription.Category.STRUCT) {
OrcStruct result = (OrcStruct) OrcStruct.createValue(fileReader.getSchema());
List<TypeDescription> children = schema.getChildren();
int numberOfChildren = children.size();
for (int i = 0; i < numberOfChildren; ++i) {
result.setFieldValue(i, nextValue(batch.cols[i], rowInBatch, children.get(i), result.getFieldValue(i)));
}
orcRecords.add(result);
} else {
throw new UnsupportedOperationException("The serialized records have to be a struct in the outer-most layer.");
}
rowCount -= 1;
rowInBatch += 1;
}
return orcRecords;
}
} | 3,352 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/test/java/org/apache/gobblin/writer/RowBatchPoolTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.orc.TypeDescription;
import org.apache.orc.storage.ql.exec.vector.VectorizedRowBatch;
import org.testng.Assert;
import org.testng.annotations.Test;
public class RowBatchPoolTest {
@Test
public void testExpiry() throws Exception {
State state = WorkUnit.createEmpty();
RowBatchPool instance = RowBatchPool.instance(state);
TypeDescription schema = TypeDescription.fromString("struct<a:int,b:string>");
VectorizedRowBatch rowBatch1 = instance.getRowBatch(schema, 1024);
instance.recycle(schema, rowBatch1);
VectorizedRowBatch rowBatch2 = instance.getRowBatch(schema, 1024);
// existing rowbatch is fetched from pool
Assert.assertEquals(rowBatch1, rowBatch2);
// since the pool has no existing rowbatch, a new one is created
VectorizedRowBatch rowBatch3 = instance.getRowBatch(schema, 1024);
Assert.assertNotEquals(rowBatch1, rowBatch3);
// recyle fetched rowbatches
instance.recycle(schema, rowBatch2);
instance.recycle(schema, rowBatch3);
// wait for their expiry
Thread.sleep(RowBatchPool.DEFAULT_ROW_BATCH_EXPIRY_INTERVAL * 1000L);
VectorizedRowBatch rowBatch4 = instance.getRowBatch(schema, 1024);
// new rowbatch is created, all old ones are expired
Assert.assertNotEquals(rowBatch1, rowBatch4);
}
}
| 3,353 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/main/java/org/apache/gobblin/writer/GobblinOrcMemoryManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.orc.impl.MemoryManagerImpl;
import lombok.extern.slf4j.Slf4j;
/**
* A thin layer extending {@link MemoryManagerImpl} for logging and instrumentation purpose.
*/
@Slf4j
public class GobblinOrcMemoryManager extends MemoryManagerImpl {
public GobblinOrcMemoryManager(Configuration conf) {
super(conf);
log.info("The pool reserved for memory manager is :{}", getTotalMemoryPool());
}
@Override
public synchronized void addWriter(Path path, long requestedAllocation, Callback callback)
throws IOException {
super.addWriter(path, requestedAllocation, callback);
log.info("Adding writer for Path {}, Current allocation: {}", path.toString(), getAllocationScale());
}
@Override
public synchronized void removeWriter(Path path)
throws IOException {
super.removeWriter(path);
log.info("Closing writer for Path {}, Current allocation: {}", path.toString(), getAllocationScale());
}
}
| 3,354 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/main/java/org/apache/gobblin/writer/OrcConverterMemoryManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import org.apache.orc.storage.ql.exec.vector.BytesColumnVector;
import org.apache.orc.storage.ql.exec.vector.ColumnVector;
import org.apache.orc.storage.ql.exec.vector.DecimalColumnVector;
import org.apache.orc.storage.ql.exec.vector.DoubleColumnVector;
import org.apache.orc.storage.ql.exec.vector.ListColumnVector;
import org.apache.orc.storage.ql.exec.vector.LongColumnVector;
import org.apache.orc.storage.ql.exec.vector.MapColumnVector;
import org.apache.orc.storage.ql.exec.vector.StructColumnVector;
import org.apache.orc.storage.ql.exec.vector.UnionColumnVector;
import org.apache.orc.storage.ql.exec.vector.VectorizedRowBatch;
import com.google.common.base.Preconditions;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.State;
/**
* A helper class to calculate the size of array buffers in a {@link VectorizedRowBatch}.
* This estimate is mainly based on the maximum size of each variable length column, which can be resized
* Since the resizing algorithm for each column can balloon, this can affect likelihood of OOM
*/
@Slf4j
public class OrcConverterMemoryManager {
private static final boolean DEFAULT_ENABLE_SMART_ARRAY_ENLARGE = false;
private static final int DEFAULT_ENLARGE_FACTOR = 3;
private static final double DEFAULT_SMART_ARRAY_ENLARGE_FACTOR_MAX = 5.0;
// Needs to be greater than 1.0
private static final double DEFAULT_SMART_ARRAY_ENLARGE_FACTOR_MIN = 1.2;
// Given the defaults it will take around 500 records to reach the min enlarge factor - given that the default
// batch size is 1000 this is a fairly conservative default
private static final double DEFAULT_SMART_ARRAY_ENLARGE_DECAY_FACTOR = 0.003;
private VectorizedRowBatch rowBatch;
@Getter
private int resizeCount = 0;
private double smartArrayEnlargeFactorMax;
private double smartArrayEnlargeFactorMin;
private double smartArrayEnlargeDecayFactor;
private boolean enabledSmartSizing = false;
int enlargeFactor = 0;
OrcConverterMemoryManager(VectorizedRowBatch rowBatch, State state) {
this.rowBatch = rowBatch;
this.enabledSmartSizing = state.getPropAsBoolean(GobblinOrcWriterConfigs.ENABLE_SMART_ARRAY_ENLARGE, DEFAULT_ENABLE_SMART_ARRAY_ENLARGE);
this.enlargeFactor = state.getPropAsInt(GobblinOrcWriterConfigs.ENLARGE_FACTOR_KEY, DEFAULT_ENLARGE_FACTOR);
this.smartArrayEnlargeFactorMax = state.getPropAsDouble(GobblinOrcWriterConfigs.SMART_ARRAY_ENLARGE_FACTOR_MAX, DEFAULT_SMART_ARRAY_ENLARGE_FACTOR_MAX);
this.smartArrayEnlargeFactorMin = state.getPropAsDouble(GobblinOrcWriterConfigs.SMART_ARRAY_ENLARGE_FACTOR_MIN, DEFAULT_SMART_ARRAY_ENLARGE_FACTOR_MIN);
this.smartArrayEnlargeDecayFactor = state.getPropAsDouble(GobblinOrcWriterConfigs.SMART_ARRAY_ENLARGE_DECAY_FACTOR, DEFAULT_SMART_ARRAY_ENLARGE_DECAY_FACTOR);
if (enabledSmartSizing) {
Preconditions.checkArgument(this.smartArrayEnlargeFactorMax >= 1,
String.format("Smart array enlarge factor needs to be larger than 1.0, provided value %s", this.smartArrayEnlargeFactorMax));
Preconditions.checkArgument(this.smartArrayEnlargeFactorMin >= 1,
String.format("Smart array enlarge factor needs to be larger than 1.0, provided value %s", this.smartArrayEnlargeFactorMin));
Preconditions.checkArgument(this.smartArrayEnlargeDecayFactor > 0 && this.smartArrayEnlargeDecayFactor < 1,
String.format("Smart array enlarge decay factor needs to be between 0 and 1, provided value %s", this.smartArrayEnlargeDecayFactor));
log.info("Enabled smart resizing for rowBatch - smartArrayEnlargeFactorMax: {}, smartArrayEnlargeFactorMin: {}, smartArrayEnlargeDecayFactor: {}",
smartArrayEnlargeFactorMax, smartArrayEnlargeFactorMin, smartArrayEnlargeDecayFactor);
}
log.info("Static enlargeFactor for rowBatch: {}", enlargeFactor);
}
/**
* Estimates the approximate size in bytes of elements in a column
* This takes into account the default null values of different ORC ColumnVectors and approximates their sizes
* Although its a rough calculation, it can accurately depict the weight of resizes in a column for very large arrays and maps
* Which tend to dominate the size of the buffer overall
* @param col
* @return
*/
public long calculateSizeOfColHelper(ColumnVector col) {
long converterBufferColSize = 0;
switch (col.type) {
case LIST:
ListColumnVector listColumnVector = (ListColumnVector) col;
converterBufferColSize += calculateSizeOfColHelper(listColumnVector.child);
break;
case MAP:
MapColumnVector mapColumnVector = (MapColumnVector) col;
converterBufferColSize += calculateSizeOfColHelper(mapColumnVector.keys);
converterBufferColSize += calculateSizeOfColHelper(mapColumnVector.values);
break;
case STRUCT:
StructColumnVector structColumnVector = (StructColumnVector) col;
for (int j = 0; j < structColumnVector.fields.length; j++) {
converterBufferColSize += calculateSizeOfColHelper(structColumnVector.fields[j]);
}
break;
case UNION:
UnionColumnVector unionColumnVector = (UnionColumnVector) col;
for (int j = 0; j < unionColumnVector.fields.length; j++) {
converterBufferColSize += calculateSizeOfColHelper(unionColumnVector.fields[j]);
}
break;
case BYTES:
BytesColumnVector bytesColumnVector = (BytesColumnVector) col;
converterBufferColSize += bytesColumnVector.vector.length * 8;
break;
case DECIMAL:
DecimalColumnVector decimalColumnVector = (DecimalColumnVector) col;
converterBufferColSize += decimalColumnVector.precision + 2;
break;
case DOUBLE:
DoubleColumnVector doubleColumnVector = (DoubleColumnVector) col;
converterBufferColSize += doubleColumnVector.vector.length * 8;
break;
case LONG:
LongColumnVector longColumnVector = (LongColumnVector) col;
converterBufferColSize += longColumnVector.vector.length * 8;
break;
default:
// Should never reach here given the types used in GenericRecordToOrcValueWriter
}
// Calculate overhead of the column's own null reference
converterBufferColSize += col.isNull.length;
return converterBufferColSize;
}
/**
* Returns the total size of all variable length columns in a {@link VectorizedRowBatch}
* TODO: Consider calculating this value on the fly everytime a resize is called
* @return
*/
public long getConverterBufferTotalSize() {
long converterBufferTotalSize = 0;
ColumnVector[] cols = this.rowBatch.cols;
for (int i = 0; i < cols.length; i++) {
converterBufferTotalSize += calculateSizeOfColHelper(cols[i]);
}
return converterBufferTotalSize;
}
/**
* Resize the child-array size based on configuration.
* If smart resizing is enabled, it will use an exponential decay algorithm where it would resize the array by a smaller amount
* the more records the converter has processed, as the fluctuation in record size becomes less likely to differ significantly by then
* Since the writer is closed and reset periodically, this is generally a safe assumption that should prevent large empty array buffers
*/
public int resize(int rowsAdded, int requestedSize) {
resizeCount += 1;
log.info(String.format("It has been resized %s times in current writer", resizeCount));
if (enabledSmartSizing) {
double decayingEnlargeFactor = this.smartArrayEnlargeFactorMax * Math.pow((1-this.smartArrayEnlargeDecayFactor), rowsAdded-1);
return (int) Math.round(requestedSize * Math.max(decayingEnlargeFactor, this.smartArrayEnlargeFactorMin));
}
return enlargeFactor * requestedSize;
}
}
| 3,355 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/main/java/org/apache/gobblin/writer/GobblinOrcWriterConfigs.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
/**
* Configuration keys for the Gobblin ORC Writer
*/
public class GobblinOrcWriterConfigs {
public static final String ORC_WRITER_PREFIX = "orcWriter.";
/**
* Configuration for enabling validation of ORC file to detect malformation. If enabled, will throw exception and
* delete malformed ORC file during commit
*/
public static final String ORC_WRITER_VALIDATE_FILE_AFTER_CLOSE = ORC_WRITER_PREFIX + "validate.file.after.close";
/**
* Default buffer size in the ORC Writer before sending the records to the native ORC Writer
*/
public static final String ORC_WRITER_BATCH_SIZE = ORC_WRITER_PREFIX + "batchSize";
/**
* Configuration for enabling Gobblin Avro -> ORC Self tuning writer, optimized for Kafka Streaming Ingestion
*/
public static final String ORC_WRITER_AUTO_SELFTUNE_ENABLED = ORC_WRITER_PREFIX + "auto.selfTune.enabled";
/**
* Max buffer size of the Gobblin ORC Writer that it can be tuned to
*/
public static final String ORC_WRITER_AUTO_SELFTUNE_MAX_BATCH_SIZE = ORC_WRITER_PREFIX + "auto.selfTune.max.batch.size";
/**
* How often should the Gobblin ORC Writer check for tuning
*/
public static final String ORC_WRITER_AUTO_SELFTUNE_ROWS_BETWEEN_CHECK = ORC_WRITER_PREFIX + "auto.selfTune.rowsBetweenCheck";
/**
* What percentage of the JVM memory should be reserved for the buffers of the Gobblin ORC Writer, approximately
*/
public static final String ORC_WRITER_ROWBATCH_MEMORY_USAGE_FACTOR = ORC_WRITER_PREFIX + "auto.selfTune.memory.usage.factor";
/**
* In the self tuning writer, the minimum buffer size that can be configured for the initialization of the native ORC Writer,
* size measured in records before checking to flush if buffer memory size exceeds stripe size. Note that the Gobblin ORC Writer
* will initialize the native ORC Writer just once in its lifecycle to prevent multiple small files.
*/
public static final String ORC_WRITER_MIN_ROWCHECK = ORC_WRITER_PREFIX + "min.rows.between.memory.checks";
/**
* In the self tuning writer, the maximum buffer size that can be configured for the initialization of the native ORC Writer,
* size measured in records before checking to flush if buffer memory size exceeds stripe size. Note that the Gobblin ORC Writer
* will initialize the native ORC Writer just once in its lifecycle to prevent multiple small files.
*/
public static final String ORC_WRITER_MAX_ROWCHECK = ORC_WRITER_PREFIX + "max.rows.between.memory.checks";
public static final String ORC_WRITER_INSTRUMENTED = ORC_WRITER_PREFIX + "instrumented";
public static final int DEFAULT_ORC_WRITER_BATCH_SIZE = 1000;
/**
* This value gives an estimation on how many writers are buffering records at the same time in a container.
* Since time-based partition scheme is a commonly used practice, plus the chances for late-arrival data,
* usually there would be 2-3 writers running during the hourly boundary. 3 is chosen here for being conservative.
*/
public static final int DEFAULT_CONCURRENT_WRITERS = 3;
public static final double DEFAULT_ORC_WRITER_BATCHSIZE_MEMORY_USAGE_FACTOR = 0.3;
/**
* The ratio of native ORC Writer buffer size to Gobblin ORC Writer buffer size
*/
public static final int DEFAULT_ORC_WRITER_BATCHSIZE_ROWCHECK_FACTOR = 5;
public static final int DEFAULT_MAX_ORC_WRITER_BATCH_SIZE = DEFAULT_ORC_WRITER_BATCH_SIZE;
public static final int DEFAULT_ORC_AUTO_SELFTUNE_ROWS_BETWEEN_CHECK = 500;
/**
* Tune iff the new batch size is 10% different from the current batch size
*/
public static final double DEFAULT_ORC_WRITER_TUNE_BATCHSIZE_SENSITIVITY = 0.1;
public static final int DEFAULT_MIN_ORC_WRITER_ROWCHECK = 150;
public static final int DEFAULT_MAX_ORC_WRITER_ROWCHECK = 5000;
/**
* Avro to ORC converter configs
*/
public static final String ENABLE_SMART_ARRAY_ENLARGE = ORC_WRITER_PREFIX + "smartArrayEnlargement.enabled";
public static final String SMART_ARRAY_ENLARGE_FACTOR_MAX = ORC_WRITER_PREFIX + "smartArrayEnlargement.factor.max";
public static final String SMART_ARRAY_ENLARGE_FACTOR_MIN = ORC_WRITER_PREFIX + "smartArrayEnlargement.factor.min";
public static final String SMART_ARRAY_ENLARGE_DECAY_FACTOR = ORC_WRITER_PREFIX + "smartArrayEnlargement.factor.decay";
public static final String ENLARGE_FACTOR_KEY = ORC_WRITER_PREFIX + "enlargeFactor";
public static class RuntimeStateConfigs {
public static final String ORC_WRITER_ESTIMATED_RECORD_SIZE = ORC_WRITER_PREFIX + "estimated.recordSize";
public static final String ORC_WRITER_NATIVE_WRITER_MEMORY = ORC_WRITER_PREFIX + "estimated.native.writer.memory";
public static final String ORC_WRITER_PREVIOUS_BATCH_SIZE = ORC_WRITER_PREFIX + "previous.batch.size";
public static final String ORC_WRITER_CONCURRENT_TASKS = ORC_WRITER_PREFIX + "auto.selfTune.concurrent.tasks";
public static final String ORC_WRITER_ESTIMATED_BYTES_ALLOCATED_CONVERTER_MEMORY = ORC_WRITER_PREFIX + "estimated.bytes.allocated.converter.memory";
}
}
| 3,356 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/main/java/org/apache/gobblin/writer/CloseBeforeFlushException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
public class CloseBeforeFlushException extends RuntimeException {
String datasetName;
public CloseBeforeFlushException(String datasetName) {
super(String.format("Dataset %s has an attempt to close writer before buffered data to be flushed", datasetName));
}
public CloseBeforeFlushException(String datasetName, Throwable cause) {
super(String.format("Dataset %s has an attempt to close writer before buffered data to be flushed", datasetName),
cause);
}
} | 3,357 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/main/java/org/apache/gobblin/writer/GobblinOrcWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import java.util.Properties;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator;
import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils;
import org.apache.orc.TypeDescription;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.orc.AvroOrcSchemaConverter;
/**
* A wrapper for ORC-core writer without dependency on Hive SerDe library.
*/
@Slf4j
public class GobblinOrcWriter extends GobblinBaseOrcWriter<Schema, GenericRecord> {
public GobblinOrcWriter(FsDataWriterBuilder<Schema, GenericRecord> builder, State properties) throws IOException {
super(builder, properties);
}
@Override
protected TypeDescription getOrcSchema() {
return AvroOrcSchemaConverter.getOrcSchema(this.inputSchema);
}
@Override
protected OrcValueWriter<GenericRecord> getOrcValueWriter(TypeDescription typeDescription, Schema inputSchema,
State state) {
return new GenericRecordToOrcValueWriter(typeDescription, this.inputSchema, this.converterMemoryManager);
}
@Override
protected Properties getPropsWithOrcSchema() throws SerDeException {
Properties properties = new Properties();
properties.setProperty(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName(), this.inputSchema.toString());
AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(this.inputSchema);
properties.setProperty("columns", StringUtils.join(aoig.getColumnNames(), ","));
properties.setProperty("columns.types", StringUtils.join(aoig.getColumnTypes(), ","));
return properties;
}
@Override
public boolean isSpeculativeAttemptSafe() {
return this.writerAttemptIdOptional.isPresent() && this.getClass() == GobblinOrcWriter.class;
}
}
| 3,358 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/main/java/org/apache/gobblin/writer/GenericRecordToOrcValueWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericEnumSymbol;
import org.apache.avro.generic.GenericFixed;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.util.Utf8;
import org.apache.orc.TypeDescription;
import org.apache.orc.storage.common.type.HiveDecimal;
import org.apache.orc.storage.ql.exec.vector.BytesColumnVector;
import org.apache.orc.storage.ql.exec.vector.ColumnVector;
import org.apache.orc.storage.ql.exec.vector.DecimalColumnVector;
import org.apache.orc.storage.ql.exec.vector.DoubleColumnVector;
import org.apache.orc.storage.ql.exec.vector.ListColumnVector;
import org.apache.orc.storage.ql.exec.vector.LongColumnVector;
import org.apache.orc.storage.ql.exec.vector.MapColumnVector;
import org.apache.orc.storage.ql.exec.vector.StructColumnVector;
import org.apache.orc.storage.ql.exec.vector.UnionColumnVector;
import org.apache.orc.storage.ql.exec.vector.VectorizedRowBatch;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.util.orc.AvroOrcSchemaConverter;
/**
* The converter for buffering rows and forming columnar batch.
* Additionally, records the estimated size of the data converted in bytes
* TODO: consider using the record size provided by the extractor instead of the converter as it may be more available and accurate
*/
@Slf4j
public class GenericRecordToOrcValueWriter implements OrcValueWriter<GenericRecord> {
private boolean enabledSmartSizing;
private int enlargeFactor;
private OrcConverterMemoryManager memoryManager;
@Getter
long totalBytesConverted = 0;
@Getter
long totalRecordsConverted = 0;
/**
* The interface for the conversion from GenericRecord to ORC's ColumnVectors.
*/
interface Converter {
/**
* Take a value from the Generic record data value and add it to the ORC output.
* @param rowId the row in the ColumnVector
* @param column either the column number or element number
* @param data Object which contains the data
* @param output the ColumnVector to put the value into
* @returns the number of elements converted by the converter, for tracking and estimation purposes
*/
long addValue(int rowId, int column, Object data, ColumnVector output);
}
private final Converter[] converters;
public GenericRecordToOrcValueWriter(TypeDescription typeDescription, Schema avroSchema, OrcConverterMemoryManager memoryManager) {
converters = buildConverters(typeDescription, avroSchema);
this.memoryManager = memoryManager;
}
/** Converts a record from the GenericRecord to the ORC ColumnVectors.
* Additionally, records the number of bytes converted and the number of records converted.
* @param value the data value to write.
* @param output the VectorizedRowBatch to which the output will be written.
* @throws IOException
*/
@Override
public void write(GenericRecord value, VectorizedRowBatch output)
throws IOException {
int row = output.size++;
long bytesConverted = 0;
for (int c = 0; c < converters.length; ++c) {
ColumnVector col = output.cols[c];
if (value.get(c) == null) {
col.noNulls = false;
col.isNull[row] = true;
} else {
col.isNull[row] = false;
bytesConverted += converters[c].addValue(row, c, value.get(c), col);
}
}
this.totalBytesConverted += bytesConverted;
this.totalRecordsConverted += 1;
}
static class BooleanConverter implements Converter {
private static final int MEMORY_SIZE_BYTES = 1;
public long addValue(int rowId, int column, Object data, ColumnVector output) {
((LongColumnVector) output).vector[rowId] = (boolean) data ? 1 : 0;
return MEMORY_SIZE_BYTES;
}
}
static class ByteConverter implements Converter {
private static final int MEMORY_SIZE_BYTES = 1;
public long addValue(int rowId, int column, Object data, ColumnVector output) {
((LongColumnVector) output).vector[rowId] = (byte) data;
return MEMORY_SIZE_BYTES;
}
}
static class ShortConverter implements Converter {
private static final int MEMORY_SIZE_BYTES = 4;
public long addValue(int rowId, int column, Object data, ColumnVector output) {
((LongColumnVector) output).vector[rowId] = (short) data;
return MEMORY_SIZE_BYTES;
}
}
static class IntConverter implements Converter {
private static final int MEMORY_SIZE_BYTES = 4;
public long addValue(int rowId, int column, Object data, ColumnVector output) {
((LongColumnVector) output).vector[rowId] = (int) data;
return MEMORY_SIZE_BYTES;
}
}
static class LongConverter implements Converter {
private static final int MEMORY_SIZE_BYTES = 8;
public long addValue(int rowId, int column, Object data, ColumnVector output) {
((LongColumnVector) output).vector[rowId] = (long) data;
return MEMORY_SIZE_BYTES;
}
}
static class FloatConverter implements Converter {
private static final int MEMORY_SIZE_BYTES = 4;
public long addValue(int rowId, int column, Object data, ColumnVector output) {
((DoubleColumnVector) output).vector[rowId] = (float) data;
return MEMORY_SIZE_BYTES;
}
}
static class DoubleConverter implements Converter {
private static final int MEMORY_SIZE_BYTES = 8;
public long addValue(int rowId, int column, Object data, ColumnVector output) {
((DoubleColumnVector) output).vector[rowId] = (double) data;
return MEMORY_SIZE_BYTES;
}
}
static class StringConverter implements Converter {
public long addValue(int rowId, int column, Object data, ColumnVector output) {
final byte[] value;
if (data instanceof GenericEnumSymbol) {
value = data.toString().getBytes(StandardCharsets.UTF_8);
} else if (data instanceof Enum) {
value = ((Enum) data).name().getBytes(StandardCharsets.UTF_8);
} else if (data instanceof Utf8) {
value = ((Utf8) data).getBytes();
} else {
value = ((String) data).getBytes(StandardCharsets.UTF_8);
}
((BytesColumnVector) output).setRef(rowId, value, 0, value.length);
return value.length;
}
}
static class BytesConverter implements Converter {
public long addValue(int rowId, int column, Object data, ColumnVector output) {
final byte[] value;
if (data instanceof GenericFixed) {
value = ((GenericFixed) data).bytes();
} else if (data instanceof ByteBuffer) {
value = ((ByteBuffer) data).array();
} else {
value = (byte[]) data;
}
((BytesColumnVector) output).setRef(rowId, value, 0, value.length);
return value.length;
}
}
static class DecimalConverter implements Converter {
// This is a naive estimation
private static final int MEMORY_SIZE_BYTES = 17;
private final int scale;
public DecimalConverter(int scale) {
this.scale = scale;
}
public long addValue(int rowId, int column, Object data, ColumnVector output) {
((DecimalColumnVector) output).vector[rowId].set(getHiveDecimalFromByteBuffer((ByteBuffer) data));
return MEMORY_SIZE_BYTES;
}
/**
* Based on logic from org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils
*/
private byte[] getBytesFromByteBuffer(ByteBuffer byteBuffer) {
byteBuffer.rewind();
byte[] result = new byte[byteBuffer.limit()];
byteBuffer.get(result);
return result;
}
/**
* Based on logic from org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils
*/
private HiveDecimal getHiveDecimalFromByteBuffer(ByteBuffer byteBuffer) {
byte[] result = getBytesFromByteBuffer(byteBuffer);
return HiveDecimal.create(new BigInteger(result), this.scale);
}
}
class StructConverter implements Converter {
private final Converter[] children;
StructConverter(TypeDescription schema, Schema avroSchema) {
children = new Converter[schema.getChildren().size()];
for (int c = 0; c < children.length; ++c) {
children[c] = buildConverter(schema.getChildren().get(c), avroSchema.getFields().get(c).schema());
}
}
public long addValue(int rowId, int column, Object data, ColumnVector output) {
GenericRecord value = (GenericRecord) data;
StructColumnVector cv = (StructColumnVector) output;
long estimatedBytes = 0;
for (int c = 0; c < children.length; ++c) {
ColumnVector field = cv.fields[c];
if (value.get(c) == null) {
field.noNulls = false;
field.isNull[rowId] = true;
estimatedBytes += 1;
} else {
field.isNull[rowId] = false;
estimatedBytes += children[c].addValue(rowId, c, value.get(c), field);
}
}
return estimatedBytes;
}
}
class UnionConverter implements Converter {
private final Converter[] children;
private final Schema unionSchema;
UnionConverter(TypeDescription schema, Schema avroSchema) {
children = new Converter[schema.getChildren().size()];
for (int c = 0; c < children.length; ++c) {
children[c] = buildConverter(schema.getChildren().get(c), avroSchema.getTypes().get(c));
}
this.unionSchema = avroSchema;
}
/**
* @param data Object which contains the data, for Union, this data object is already the
* original data type without union wrapper.
*/
@Override
public long addValue(int rowId, int column, Object data, ColumnVector output) {
UnionColumnVector cv = (UnionColumnVector) output;
int tag = (data != null) ? GenericData.get().resolveUnion(unionSchema, data) : children.length;
long estimatedBytes = 0;
for (int c = 0; c < children.length; ++c) {
ColumnVector field = cv.fields[c];
// If c == tag that indicates data must not be null
if (c == tag) {
field.isNull[rowId] = false;
cv.tags[rowId] = c;
estimatedBytes += children[c].addValue(rowId, c, data, field);
} else {
field.noNulls = false;
field.isNull[rowId] = true;
estimatedBytes += 1;
}
}
return estimatedBytes;
}
}
class ListConverter implements Converter {
private final Converter children;
// Keep track of total number of rows being added to help calculate row's avg size.
private int rowsAdded;
ListConverter(TypeDescription schema, Schema avroSchema) {
children = buildConverter(schema.getChildren().get(0), avroSchema.getElementType());
rowsAdded = 0;
}
public long addValue(int rowId, int column, Object data, ColumnVector output) {
rowsAdded += 1;
List value = (List) data;
ListColumnVector cv = (ListColumnVector) output;
// record the length and start of the list elements
cv.lengths[rowId] = value.size();
cv.offsets[rowId] = cv.childCount;
cv.childCount += cv.lengths[rowId];
// make sure the child is big enough
// If seeing child array being saturated, will need to expand with a reasonable amount.
if (cv.childCount > cv.child.isNull.length) {
int resizedLength = memoryManager.resize(rowsAdded, cv.childCount);
log.info("Column vector: {}, resizing to: {}, child count: {}", cv.child, resizedLength, cv.childCount);
cv.child.ensureSize(resizedLength, true);
}
// Add the size of the empty space of the list
long estimatedBytes = 0;
// Add each element
for (int e = 0; e < cv.lengths[rowId]; ++e) {
int offset = (int) (e + cv.offsets[rowId]);
if (value.get(e) == null) {
cv.child.noNulls = false;
cv.child.isNull[offset] = true;
estimatedBytes += 1;
} else {
cv.child.isNull[offset] = false;
estimatedBytes += children.addValue(offset, e, value.get(e), cv.child);
}
}
return estimatedBytes;
}
}
class MapConverter implements Converter {
private final Converter keyConverter;
private final Converter valueConverter;
// Keep track of total number of rows being added to help calculate row's avg size.
private int rowsAdded;
MapConverter(TypeDescription schema, Schema avroSchema) {
keyConverter = buildConverter(schema.getChildren().get(0), SchemaBuilder.builder().stringType());
valueConverter = buildConverter(schema.getChildren().get(1), avroSchema.getValueType());
rowsAdded = 0;
}
public long addValue(int rowId, int column, Object data, ColumnVector output) {
rowsAdded += 1;
Map<Object, Object> map = (Map<Object, Object>) data;
Set<Map.Entry<Object, Object>> entries = map.entrySet();
MapColumnVector cv = (MapColumnVector) output;
// record the length and start of the list elements
cv.lengths[rowId] = entries.size();
cv.offsets[rowId] = cv.childCount;
cv.childCount += cv.lengths[rowId];
// make sure the child is big enough
if (cv.childCount > cv.keys.isNull.length) {
int resizedLength = memoryManager.resize(rowsAdded, cv.childCount);
log.info("Column vector: {}, resizing to: {}, child count: {}", cv.keys, resizedLength, cv.childCount);
cv.keys.ensureSize(resizedLength, true);
log.info("Column vector: {}, resizing to: {}, child count: {}", cv.values, resizedLength, cv.childCount);
cv.values.ensureSize(resizedLength, true);
}
// Add each element
int e = 0;
long estimatedBytes = 0;
for (Map.Entry entry : entries) {
int offset = (int) (e + cv.offsets[rowId]);
if (entry.getKey() == null) {
cv.keys.noNulls = false;
cv.keys.isNull[offset] = true;
estimatedBytes += 1;
} else {
cv.keys.isNull[offset] = false;
estimatedBytes += keyConverter.addValue(offset, e, entry.getKey(), cv.keys);
}
if (entry.getValue() == null) {
cv.values.noNulls = false;
cv.values.isNull[offset] = true;
estimatedBytes += 1;
} else {
cv.values.isNull[offset] = false;
estimatedBytes += valueConverter.addValue(offset, e, entry.getValue(), cv.values);
}
e++;
}
return estimatedBytes;
}
}
private Converter buildConverter(TypeDescription schema, Schema avroSchema) {
switch (schema.getCategory()) {
case BOOLEAN:
return new BooleanConverter();
case BYTE:
return new ByteConverter();
case SHORT:
return new ShortConverter();
case INT:
return new IntConverter();
case LONG:
return new LongConverter();
case FLOAT:
return new FloatConverter();
case DOUBLE:
return new DoubleConverter();
case BINARY:
return new BytesConverter();
case STRING:
case CHAR:
case VARCHAR:
return new StringConverter();
case DECIMAL:
return new DecimalConverter(schema.getScale());
case STRUCT:
return new StructConverter(schema, AvroOrcSchemaConverter.sanitizeNullableSchema(avroSchema));
case LIST:
return new ListConverter(schema, AvroOrcSchemaConverter.sanitizeNullableSchema(avroSchema));
case MAP:
return new MapConverter(schema, AvroOrcSchemaConverter.sanitizeNullableSchema(avroSchema));
case UNION:
return new UnionConverter(schema, AvroOrcSchemaConverter.sanitizeNullableSchema(avroSchema));
default:
throw new IllegalArgumentException("Unhandled type " + schema);
}
}
private Converter[] buildConverters(TypeDescription schema, Schema avroSchema) {
if (schema.getCategory() != TypeDescription.Category.STRUCT) {
throw new IllegalArgumentException("Top level must be a struct " + schema);
}
List<TypeDescription> children = schema.getChildren();
Converter[] result = new Converter[children.size()];
for (int c = 0; c < children.size(); ++c) {
result[c] = buildConverter(children.get(c), avroSchema.getFields().get(c).schema());
}
return result;
}
public int getResizeCount() {
return memoryManager.getResizeCount();
}
} | 3,359 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/main/java/org/apache/gobblin/writer/RowBatchPool.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.State;
import org.apache.orc.TypeDescription;
import org.apache.orc.storage.ql.exec.vector.VectorizedRowBatch;
import java.util.LinkedList;
import java.util.Map;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
/***
* Maintains a pool of row batches per orc schema.
* Expires row batches which have not been accessed for {@code ROW_BATCH_EXPIRY_INTERVAL}
*/
@Slf4j
public class RowBatchPool {
static final String PREFIX = "orc.row.batch.";
static final String ENABLE_ROW_BATCH_POOL = PREFIX + "enable";
static final String ROW_BATCH_EXPIRY_INTERVAL = PREFIX + "expiry.interval.secs";
static final int DEFAULT_ROW_BATCH_EXPIRY_INTERVAL = 10;
static final String ROW_BATCH_EXPIRY_PERIOD = PREFIX + "expiry.period.secs";
static final int DEFAULT_ROW_BATCH_EXPIRY_PERIOD = 1;
private static RowBatchPool INSTANCE;
private final Map<TypeDescription, LinkedList<RowBatchHolder>> rowBatches;
private final ScheduledExecutorService rowBatchExpiryThread;
private final long rowBatchExpiryInterval;
private RowBatchPool(State properties) {
rowBatches = Maps.newHashMap();
rowBatchExpiryThread = Executors.newSingleThreadScheduledExecutor(
new ThreadFactoryBuilder().setDaemon(true).build());
// expire row batches older N secs
rowBatchExpiryInterval = properties.getPropAsLong(ROW_BATCH_EXPIRY_INTERVAL, DEFAULT_ROW_BATCH_EXPIRY_INTERVAL);
// check every N secs
long rowBatchExpiryPeriod = properties.getPropAsLong(ROW_BATCH_EXPIRY_PERIOD, DEFAULT_ROW_BATCH_EXPIRY_PERIOD);
rowBatchExpiryThread.scheduleAtFixedRate(
rowBatchExpiryFn(), rowBatchExpiryPeriod, rowBatchExpiryPeriod, TimeUnit.SECONDS);
}
private Runnable rowBatchExpiryFn() {
return () -> {
synchronized (rowBatches) {
for (Map.Entry<TypeDescription, LinkedList<RowBatchHolder>> e : rowBatches.entrySet()) {
LinkedList<RowBatchHolder> val = e.getValue();
val.removeIf(this::candidateForRemoval);
}
}
};
}
private boolean candidateForRemoval(RowBatchHolder batch) {
long expiryInterval = TimeUnit.SECONDS.toMillis(rowBatchExpiryInterval);
long interval = System.currentTimeMillis() - batch.lastUsed;
if (interval > expiryInterval) {
log.info("Expiring row batch {} as it has not been accessed since {} ms",
System.identityHashCode(batch.rowBatch), interval);
return true;
} else {
return false;
}
}
private static class RowBatchHolder {
long lastUsed;
VectorizedRowBatch rowBatch;
private RowBatchHolder(VectorizedRowBatch rowBatch, long currentTimeMillis) {
this.rowBatch = rowBatch;
this.lastUsed = currentTimeMillis;
}
}
public synchronized static RowBatchPool instance(State properties) {
if (INSTANCE == null) {
INSTANCE = new RowBatchPool(properties);
}
return INSTANCE;
}
public VectorizedRowBatch getRowBatch(TypeDescription schema, int batchSize) {
synchronized (rowBatches) {
LinkedList<RowBatchHolder> vals = rowBatches.get(schema);
VectorizedRowBatch rowBatch;
if (vals == null || vals.size() == 0) {
rowBatch = schema.createRowBatch(batchSize);
log.info("Creating new row batch {}", System.identityHashCode(rowBatch));
} else {
rowBatch = vals.removeLast().rowBatch;
log.info("Using existing row batch {}", System.identityHashCode(rowBatch));
}
return rowBatch;
}
}
public void recycle(TypeDescription schema, VectorizedRowBatch rowBatch) {
log.info("Recycling row batch {}", System.identityHashCode(rowBatch));
synchronized (rowBatches) {
rowBatches.computeIfAbsent(schema, ignore -> Lists.newLinkedList());
LinkedList<RowBatchHolder> vals = rowBatches.get(schema);
vals.add(new RowBatchHolder(rowBatch, System.currentTimeMillis()));
}
}
}
| 3,360 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/main/java/org/apache/gobblin/writer/InstrumentedGobblinOrcWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import com.google.common.collect.Maps;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.GobblinEventBuilder;
/***
* A class for an event emitting GobblinOrcWriter metrics, such as internal memory resizing and flushing
*/
@Slf4j
public class InstrumentedGobblinOrcWriter extends GobblinOrcWriter {
public static final String METRICS_SCHEMA_NAME = "schemaName";
public static final String METRICS_BYTES_WRITTEN = "bytesWritten";
public static final String METRICS_RECORDS_WRITTEN = "recordsWritten";
public static final String METRICS_BUFFER_RESIZES = "bufferResizes";
public static final String METRICS_BUFFER_SIZE = "bufferSize";
public static final String ORC_WRITER_METRICS_NAME = "OrcWriterMetrics";
public InstrumentedGobblinOrcWriter(FsDataWriterBuilder<Schema, GenericRecord> builder, State properties) throws IOException {
super(builder, properties);
}
@Override
protected synchronized void closeInternal() throws IOException {
// close() can be called multiple times by super.commit() and super.close(), but we only want to emit metrics once
if (!this.closed) {
this.flush();
this.orcFileWriter.close();
this.closed = true;
log.info("Emitting ORC event metrics");
this.sendOrcWriterMetadataEvent();
this.recycleRowBatchPool();
} else {
// Throw fatal exception if there's outstanding buffered data since there's risk losing data if proceeds.
if (rowBatch.size > 0) {
throw new CloseBeforeFlushException(this.inputSchema.toString());
}
}
}
private void sendOrcWriterMetadataEvent() {
GobblinEventBuilder builder = new GobblinEventBuilder(ORC_WRITER_METRICS_NAME, ORC_WRITER_NAMESPACE);
Map<String, String> eventMetadataMap = Maps.newHashMap();
eventMetadataMap.put(METRICS_SCHEMA_NAME, this.inputSchema.getName());
eventMetadataMap.put(METRICS_BYTES_WRITTEN, String.valueOf(this.bytesWritten()));
eventMetadataMap.put(METRICS_RECORDS_WRITTEN, String.valueOf(this.recordsWritten()));
eventMetadataMap.put(METRICS_BUFFER_RESIZES, String.valueOf(((GenericRecordToOrcValueWriter) this.valueWriter).getResizeCount()));
eventMetadataMap.put(METRICS_BUFFER_SIZE, String.valueOf(this.batchSize));
builder.addAdditionalMetadata(eventMetadataMap);
EventSubmitter.submit(metricContext, builder);
}
}
| 3,361 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/main/java/org/apache/gobblin/writer/GobblinBaseOrcWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.Properties;
import java.util.Queue;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.apache.orc.storage.ql.exec.vector.VectorizedRowBatch;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.GobblinEventBuilder;
import org.apache.gobblin.state.ConstructState;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.JobConfigurationUtils;
/**
* A wrapper for ORC-core writer without dependency on Hive SerDe library.
*/
@Slf4j
public abstract class GobblinBaseOrcWriter<S, D> extends FsDataWriter<D> {
public static final String ORC_WRITER_NAMESPACE = "gobblin.orc.writer";
public static final String CORRUPTED_ORC_FILE_DELETION_EVENT = "CorruptedOrcFileDeletion";
protected final MetricContext metricContext;
protected final OrcValueWriter<D> valueWriter;
@VisibleForTesting
VectorizedRowBatch rowBatch;
private final TypeDescription typeDescription;
protected Writer orcFileWriter;
private final RowBatchPool rowBatchPool;
private final boolean enableRowBatchPool;
// the close method may be invoked multiple times, but the underlying writer only supports close being called once
protected volatile boolean closed = false;
protected int batchSize;
protected final S inputSchema;
private final boolean validateORCAfterClose;
private final boolean selfTuningWriter;
private int selfTuneRowsBetweenCheck;
private double rowBatchMemoryUsageFactor;
private int nextSelfTune;
private boolean initialEstimatingRecordSizePhase = false;
private Queue<Integer> initialSelfTuneCheckpoints = new LinkedList<>(Arrays.asList(10, 100, 500));
private AtomicInteger recordCounter = new AtomicInteger(0);
@VisibleForTesting
long availableMemory = -1;
private long currentOrcWriterMaxUnderlyingMemory = -1;
private long prevOrcWriterMaxUnderlyingMemory = -1;
private int orcFileWriterMaxRowsBetweenCheck;
private int orcFileWriterMinRowsBetweenCheck;
private int orcFileWriterRowsBetweenCheck;
private long orcStripeSize;
private int maxOrcBatchSize;
private int concurrentWriterTasks;
private long orcWriterStripeSizeBytes;
// Holds the maximum size of the previous run's maximum buffer or the max of the current run's maximum buffer
private long estimatedBytesAllocatedConverterMemory = -1;
protected OrcConverterMemoryManager converterMemoryManager;
Configuration writerConfig;
public GobblinBaseOrcWriter(FsDataWriterBuilder<S, D> builder, State properties)
throws IOException {
super(builder, properties);
// Create value-writer which is essentially a record-by-record-converter with buffering in batch.
this.inputSchema = builder.getSchema();
this.typeDescription = getOrcSchema();
this.selfTuningWriter = properties.getPropAsBoolean(GobblinOrcWriterConfigs.ORC_WRITER_AUTO_SELFTUNE_ENABLED, false);
this.validateORCAfterClose = properties.getPropAsBoolean(GobblinOrcWriterConfigs.ORC_WRITER_VALIDATE_FILE_AFTER_CLOSE, false);
this.maxOrcBatchSize = properties.getPropAsInt(GobblinOrcWriterConfigs.ORC_WRITER_AUTO_SELFTUNE_MAX_BATCH_SIZE,
GobblinOrcWriterConfigs.DEFAULT_MAX_ORC_WRITER_BATCH_SIZE);
this.batchSize = this.selfTuningWriter ?
properties.getPropAsInt(GobblinOrcWriterConfigs.RuntimeStateConfigs.ORC_WRITER_PREVIOUS_BATCH_SIZE, GobblinOrcWriterConfigs.DEFAULT_MAX_ORC_WRITER_BATCH_SIZE)
: properties.getPropAsInt(GobblinOrcWriterConfigs.ORC_WRITER_BATCH_SIZE, GobblinOrcWriterConfigs.DEFAULT_ORC_WRITER_BATCH_SIZE);
this.rowBatchPool = RowBatchPool.instance(properties);
this.enableRowBatchPool = properties.getPropAsBoolean(RowBatchPool.ENABLE_ROW_BATCH_POOL, false);
this.selfTuneRowsBetweenCheck = properties.getPropAsInt(GobblinOrcWriterConfigs.ORC_WRITER_AUTO_SELFTUNE_ROWS_BETWEEN_CHECK,
GobblinOrcWriterConfigs.DEFAULT_ORC_AUTO_SELFTUNE_ROWS_BETWEEN_CHECK);
this.rowBatchMemoryUsageFactor = properties.getPropAsDouble(GobblinOrcWriterConfigs.ORC_WRITER_ROWBATCH_MEMORY_USAGE_FACTOR,
GobblinOrcWriterConfigs.DEFAULT_ORC_WRITER_BATCHSIZE_MEMORY_USAGE_FACTOR);
this.rowBatch = enableRowBatchPool ? rowBatchPool.getRowBatch(typeDescription, batchSize) : typeDescription.createRowBatch(batchSize);
this.orcWriterStripeSizeBytes = properties.getPropAsLong(OrcConf.STRIPE_SIZE.getAttribute(), (long) OrcConf.STRIPE_SIZE.getDefaultValue());
this.converterMemoryManager = new OrcConverterMemoryManager(this.rowBatch, properties);
this.valueWriter = getOrcValueWriter(typeDescription, this.inputSchema, properties);
this.metricContext = getMetricContext();
// Track the number of other writer tasks from different datasets ingesting on the same container
this.concurrentWriterTasks = properties.getPropAsInt(GobblinOrcWriterConfigs.RuntimeStateConfigs.ORC_WRITER_CONCURRENT_TASKS, 1);
this.orcStripeSize = properties.getPropAsLong(OrcConf.STRIPE_SIZE.getAttribute(), (long) OrcConf.STRIPE_SIZE.getDefaultValue());
this.orcFileWriterMinRowsBetweenCheck = properties.getPropAsInt(GobblinOrcWriterConfigs.ORC_WRITER_MIN_ROWCHECK,
GobblinOrcWriterConfigs.DEFAULT_MIN_ORC_WRITER_ROWCHECK);
this.orcFileWriterMaxRowsBetweenCheck = properties.getPropAsInt(GobblinOrcWriterConfigs.ORC_WRITER_MAX_ROWCHECK,
GobblinOrcWriterConfigs.DEFAULT_MAX_ORC_WRITER_ROWCHECK);
// Create file-writer
this.writerConfig = new Configuration();
// Populate job Configurations into Conf as well so that configurations related to ORC writer can be tuned easily.
JobConfigurationUtils.putStateIntoConfiguration(properties, this.writerConfig);
OrcFile.WriterOptions options = OrcFile.writerOptions(properties.getProperties(), this.writerConfig);
options.setSchema(typeDescription);
// Get the amount of allocated and future space available
this.availableMemory = (Runtime.getRuntime().maxMemory() - (Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()))/this.concurrentWriterTasks;
log.info("Available memory for ORC writer: {}", this.availableMemory);
if (this.selfTuningWriter) {
if (properties.contains(GobblinOrcWriterConfigs.RuntimeStateConfigs.ORC_WRITER_ESTIMATED_RECORD_SIZE) &&
properties.getPropAsLong(GobblinOrcWriterConfigs.RuntimeStateConfigs.ORC_WRITER_ESTIMATED_RECORD_SIZE) != -1) {
long estimatedRecordSizeBytes = properties.getPropAsLong(GobblinOrcWriterConfigs.RuntimeStateConfigs.ORC_WRITER_ESTIMATED_RECORD_SIZE);
this.estimatedBytesAllocatedConverterMemory = properties.getPropAsLong(GobblinOrcWriterConfigs.RuntimeStateConfigs.ORC_WRITER_ESTIMATED_BYTES_ALLOCATED_CONVERTER_MEMORY, -1);
this.orcFileWriterRowsBetweenCheck = properties.getPropAsInt(OrcConf.ROWS_BETWEEN_CHECKS.getAttribute(), (int) OrcConf.ROWS_BETWEEN_CHECKS.getDefaultValue());
this.prevOrcWriterMaxUnderlyingMemory = properties.getPropAsLong(GobblinOrcWriterConfigs.RuntimeStateConfigs.ORC_WRITER_NATIVE_WRITER_MEMORY, this.orcStripeSize);
// Use the last run's rows between check value for the underlying file size writer, if it exists. Otherwise it will default to 5000
log.info("Using previously stored properties to calculate new batch size, ORC Estimated Record size is : {},"
+ "estimated bytes converter allocated is : {}, ORC rows between check is {}, native ORC writer estimated size is {}",
estimatedRecordSizeBytes, this.estimatedBytesAllocatedConverterMemory, this.orcFileWriterRowsBetweenCheck, this.prevOrcWriterMaxUnderlyingMemory);
this.tuneBatchSize(estimatedRecordSizeBytes);
log.info("Initialized batch size at {}", this.batchSize);
this.nextSelfTune = this.selfTuneRowsBetweenCheck;
} else {
// We will need to incrementally tune the writer based on the first few records
this.nextSelfTune = 5;
this.initialEstimatingRecordSizePhase = true;
this.prevOrcWriterMaxUnderlyingMemory = this.orcStripeSize;
}
} else {
log.info("Created ORC writer, batch size: {}, {}: {}",
this.batchSize, OrcConf.ROWS_BETWEEN_CHECKS.getAttribute(),
this.writerConfig.get(
OrcConf.ROWS_BETWEEN_CHECKS.getAttribute(),
OrcConf.ROWS_BETWEEN_CHECKS.getDefaultValue().toString()));
this.orcFileWriter = OrcFile.createWriter(this.stagingFile, options);
}
}
/**
* Get the ORC schema as a {@link TypeDescription}
*/
protected abstract TypeDescription getOrcSchema();
/**
* Get an {@link OrcValueWriter} for the specified schema and configuration.
*/
protected abstract OrcValueWriter<D> getOrcValueWriter(TypeDescription typeDescription, S inputSchema, State state);
/**
* Get the schema properties, including the following:
* avro.schema.literal
* columns
* column_types
*/
protected abstract Properties getPropsWithOrcSchema() throws SerDeException;
@Override
public long recordsWritten() {
return this.orcFileWriter != null ? this.orcFileWriter.getNumberOfRows(): 0;
}
@Override
public long bytesWritten() {
return this.orcFileWriter != null ? this.orcFileWriter.getRawDataSize() : 0;
}
@Override
public State getFinalState() {
/**
* Creating {@link ConstructState} to provide overwrite of {@link WorkUnitState} from constructs.
*/
ConstructState state = new ConstructState(super.getFinalState());
try {
state.addOverwriteProperties(new State(getPropsWithOrcSchema()));
} catch (SerDeException e) {
throw new RuntimeException("Failure to set schema metadata in finalState properly which "
+ "could possible lead to incorrect data registration", e);
}
return state;
}
@Override
public void flush()
throws IOException {
if (rowBatch.size > 0) {
// We only initialize the native ORC file writer once to avoid creating too many small files, as reconfiguring rows between memory check
// requires one to close the writer and start a new file
if (this.orcFileWriter == null) {
initializeOrcFileWriter();
}
orcFileWriter.addRowBatch(rowBatch);
// Depending on the orcFileWriter orc.rows.between.memory.check, this may be an underestimate depending on if it flushed right after
// adding the rows or not. However, since the rowBatch is reset and that buffer is cleared, this should still be safe to use as an estimate
// We can also explore checking to see if rowBatch size is greater than orc.rows.between.memory check, add just the maximum amount of rows
// such that the native file writer is saturated but not flushed, record that memory then flush after. But that may be overkill for the time being.
if (this.selfTuningWriter) {
this.currentOrcWriterMaxUnderlyingMemory = Math.max(this.currentOrcWriterMaxUnderlyingMemory, orcFileWriter.estimateMemory());
}
rowBatch.reset();
}
}
protected void recycleRowBatchPool() {
if (enableRowBatchPool) {
rowBatchPool.recycle(typeDescription, rowBatch);
}
}
protected synchronized void closeInternal()
throws IOException {
if (!closed) {
this.flush();
this.orcFileWriter.close();
this.closed = true;
this.recycleRowBatchPool();
} else {
// Throw fatal exception if there's outstanding buffered data since there's risk losing data if proceeds.
if (rowBatch.size > 0) {
throw new CloseBeforeFlushException(this.inputSchema.toString());
}
}
}
@Override
public void close()
throws IOException {
closeInternal();
super.close();
}
/**
* Extra careful about the fact: super.commit() invoke closer.close based on its semantics of "commit".
* That means close can happen in both commit() and close()
*/
@Override
public void commit()
throws IOException {
closeInternal();
// Validate the ORC file after writer close. Default is false as it introduce more load to FS from an extra read call
if(this.validateORCAfterClose) {
assertOrcFileIsValid(this.fs, this.stagingFile, new OrcFile.ReaderOptions(conf), this.metricContext);
}
super.commit();
if (this.selfTuningWriter) {
properties.setProp(GobblinOrcWriterConfigs.RuntimeStateConfigs.ORC_WRITER_ESTIMATED_RECORD_SIZE, String.valueOf(getEstimatedRecordSizeBytes()));
properties.setProp(GobblinOrcWriterConfigs.RuntimeStateConfigs.ORC_WRITER_ESTIMATED_BYTES_ALLOCATED_CONVERTER_MEMORY,
String.valueOf(this.converterMemoryManager.getConverterBufferTotalSize()));
properties.setProp(OrcConf.ROWS_BETWEEN_CHECKS.getAttribute(), String.valueOf(this.orcFileWriterRowsBetweenCheck));
properties.setProp(GobblinOrcWriterConfigs.RuntimeStateConfigs.ORC_WRITER_PREVIOUS_BATCH_SIZE, this.batchSize);
properties.setProp(GobblinOrcWriterConfigs.RuntimeStateConfigs.ORC_WRITER_NATIVE_WRITER_MEMORY, this.currentOrcWriterMaxUnderlyingMemory);
}
}
/**
* Modifies the size of the writer buffer based on the average size of the records written so far, the amount of available memory during initialization, and the number of concurrent writers.
* The new batch size is calculated as follows:
* 1. Memory available = (available memory during startup)/(concurrent writers) - (memory used by ORCFile writer)
* 2. Average file size, estimated during Avro -> ORC conversion
* 3. Estimate of memory used by the converter lists, as during resize the internal buffer size can grow large
* 4. New batch size = (Memory available - Estimated memory used by converter lists) / Average file size
* Generally in this writer, the memory the converter uses for large arrays is the leading cause of OOM in streaming, along with the records stored in the rowBatch
* Another potential approach is to also check the memory available before resizing the converter lists, and to flush the batch whenever a resize is needed.
*/
void tuneBatchSize(long averageSizePerRecord) throws IOException {
this.estimatedBytesAllocatedConverterMemory = Math.max(this.estimatedBytesAllocatedConverterMemory, this.converterMemoryManager.getConverterBufferTotalSize());
int currentPartitionedWriters = this.properties.getPropAsInt(PartitionedDataWriter.CURRENT_PARTITIONED_WRITERS_COUNTER,
GobblinOrcWriterConfigs.DEFAULT_CONCURRENT_WRITERS);
// In the native ORC writer implementation, it will flush the writer if the internal memory exceeds the size of a stripe after rows between check
// Use ORC Writer estimation API to get the max memory used by the underlying ORC writer, but note that it is an overestimation as it includes memory allocated but not used
// More details in https://lists.apache.org/thread/g6yo7m46mr86ov1vkm9wnmshgw7hcl6b
if (this.orcFileWriter != null) {
this.currentOrcWriterMaxUnderlyingMemory = Math.max(this.currentOrcWriterMaxUnderlyingMemory, orcFileWriter.estimateMemory());
}
long maxMemoryInFileWriter = Math.max(currentOrcWriterMaxUnderlyingMemory, prevOrcWriterMaxUnderlyingMemory);
int newBatchSize = (int) ((this.availableMemory*1.0 / currentPartitionedWriters * this.rowBatchMemoryUsageFactor - maxMemoryInFileWriter
- this.estimatedBytesAllocatedConverterMemory) / averageSizePerRecord);
// Handle scenarios where new batch size can be 0 or less due to overestimating memory used by other components
newBatchSize = Math.min(Math.max(1, newBatchSize), this.maxOrcBatchSize);
if (Math.abs(newBatchSize - this.batchSize) > GobblinOrcWriterConfigs.DEFAULT_ORC_WRITER_TUNE_BATCHSIZE_SENSITIVITY * this.batchSize) {
// Add a factor when tuning up the batch size to prevent large sudden increases in memory usage
if (newBatchSize > this.batchSize) {
newBatchSize = (newBatchSize - this.batchSize) / 2 + this.batchSize;
}
log.info("Tuning ORC writer batch size from {} to {} based on average byte size per record: {} with available memory {} and {} bytes "
+ "of allocated memory in converter buffers, native orc writer estimated memory {}, with {} partitioned writers",
batchSize, newBatchSize, averageSizePerRecord, availableMemory,
estimatedBytesAllocatedConverterMemory, maxMemoryInFileWriter, currentPartitionedWriters);
this.batchSize = newBatchSize;
// We need to always flush because ORC VectorizedRowBatch.ensureSize() does not provide an option to preserve data, refer to
// https://orc.apache.org/api/hive-storage-api/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatch.html
this.flush();
this.rowBatch.ensureSize(this.batchSize);
}
}
void initializeOrcFileWriter() {
try {
this.orcFileWriterRowsBetweenCheck = Math.max(
Math.min(this.batchSize * GobblinOrcWriterConfigs.DEFAULT_ORC_WRITER_BATCHSIZE_ROWCHECK_FACTOR, this.orcFileWriterMaxRowsBetweenCheck),
this.orcFileWriterMinRowsBetweenCheck
);
this.writerConfig.set(OrcConf.ROWS_BETWEEN_CHECKS.getAttribute(), String.valueOf(this.orcFileWriterRowsBetweenCheck));
log.info("Created ORC writer, batch size: {}, {}: {}",
this.batchSize, OrcConf.ROWS_BETWEEN_CHECKS.getAttribute(),
this.writerConfig.get(
OrcConf.ROWS_BETWEEN_CHECKS.getAttribute(),
OrcConf.ROWS_BETWEEN_CHECKS.getDefaultValue().toString()));
OrcFile.WriterOptions options = OrcFile.writerOptions(properties.getProperties(), this.writerConfig);
options.setSchema(typeDescription);
this.orcFileWriter = OrcFile.createWriter(this.stagingFile, options);
} catch (IOException e) {
log.error("Failed to flush the current batch", e);
}
}
private long getEstimatedRecordSizeBytes() {
long totalBytes = ((GenericRecordToOrcValueWriter) valueWriter).getTotalBytesConverted();
long totalRecords = ((GenericRecordToOrcValueWriter) valueWriter).getTotalRecordsConverted();
return totalBytes / totalRecords;
}
/*
* Note: orc.rows.between.memory.checks is the configuration available to tune memory-check sensitivity in ORC-Core
* library. By default it is set to 5000. If the user-application is dealing with large-row Kafka topics for example,
* one should consider lower this value to make memory-check more active.
*/
@Override
public void write(D record) throws IOException {
Preconditions.checkState(!closed, "Writer already closed");
this.valueWriter.write(record, this.rowBatch);
int recordCount = this.recordCounter.incrementAndGet();
if (this.selfTuningWriter && recordCount == this.nextSelfTune) {
this.tuneBatchSize(this.getEstimatedRecordSizeBytes());
if (this.initialEstimatingRecordSizePhase && !initialSelfTuneCheckpoints.isEmpty()) {
this.nextSelfTune = initialSelfTuneCheckpoints.poll();
} else {
this.nextSelfTune += this.selfTuneRowsBetweenCheck;
}
}
if (rowBatch.size == this.batchSize) {
this.flush();
}
}
protected MetricContext getMetricContext() {
return Instrumented.getMetricContext(new State(properties), this.getClass());
}
@VisibleForTesting
@SuppressFBWarnings(value = "RCN_REDUNDANT_NULLCHECK_OF_NULL_VALUE",
justification = "Find bugs believes the eventBuilder is always null and that there is a null check, "
+ "but both are not true.")
static void assertOrcFileIsValid(FileSystem fs, Path filePath, OrcFile.ReaderOptions readerOptions, MetricContext metricContext) throws IOException {
try (Reader ignored = OrcFile.createReader(filePath, readerOptions)) {
} catch (Exception e) {
log.error("Found error when validating staging ORC file {} during the commit phase. "
+ "Will delete the malformed file and abort the commit by throwing an exception", filePath, e);
HadoopUtils.deletePath(fs, filePath, false);
GobblinEventBuilder eventBuilder = new GobblinEventBuilder(CORRUPTED_ORC_FILE_DELETION_EVENT, GobblinBaseOrcWriter.ORC_WRITER_NAMESPACE);
eventBuilder.addMetadata("filePath", filePath.toString());
eventBuilder.addMetadata("exceptionType", e.getClass().getCanonicalName());
eventBuilder.addMetadata("exceptionMessage", e.getMessage());
EventSubmitter.submit(metricContext, eventBuilder);
throw e;
}
}
}
| 3,362 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/main/java/org/apache/gobblin/writer/OrcValueWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import org.apache.orc.storage.ql.exec.vector.VectorizedRowBatch;
/**
* Write data value of a schema.
* @param <T> Indicating the input type, {@link org.apache.avro.generic.GenericRecord} for example.
*/
public interface OrcValueWriter<T> {
/**
* Writes the data.
* @param value the data value to write.
* @param output the VectorizedRowBatch to which the output will be written.
* @throws IOException if there's any IO error while writing the data value.
*/
void write(T value, VectorizedRowBatch output)
throws IOException;
} | 3,363 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-orc/src/main/java/org/apache/gobblin/writer/GobblinOrcWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
/**
* The WriterBuilder extension to create {@link GobblinOrcWriter} on top of {@link FsDataWriterBuilder}
*/
public class GobblinOrcWriterBuilder extends FsDataWriterBuilder<Schema, GenericRecord> {
public GobblinOrcWriterBuilder() {
}
@Override
public DataWriter<GenericRecord> build()
throws IOException {
Preconditions.checkNotNull(this.destination);
Preconditions.checkArgument(!Strings.isNullOrEmpty(this.writerId));
Preconditions.checkNotNull(this.schema);
switch (this.destination.getType()) {
case HDFS:
if (this.destination.getProperties().getPropAsBoolean(GobblinOrcWriterConfigs.ORC_WRITER_INSTRUMENTED, false)) {
return new InstrumentedGobblinOrcWriter(this, this.destination.getProperties());
}
return new GobblinOrcWriter(this, this.destination.getProperties());
default:
throw new RuntimeException("Unknown destination type: " + this.destination.getType());
}
}
}
| 3,364 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-avro-json/src/test/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-modules/gobblin-avro-json/src/test/java/org/apache/gobblin/converter/avro/AvroToJsonStringConverterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.avro;
import java.io.IOException;
import java.util.Iterator;
import org.apache.avro.generic.GenericRecord;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.map.ObjectMapper;
import org.testng.Assert;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.Test;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.test.TestUtils;
public class AvroToJsonStringConverterTest {
private AvroToJsonStringConverter converter;
private GenericRecord sampleRecord;
private WorkUnitState state;
@BeforeTest
public void setUp() throws SchemaConversionException {
sampleRecord = TestUtils.generateRandomAvroRecord();
state = new WorkUnitState();
converter = new AvroToJsonStringConverter();
converter.convertSchema(sampleRecord.getSchema(), state);
}
@Test
public void testRecord() throws DataConversionException, IOException {
Iterable<String> records = converter.convertRecord(null, sampleRecord, state);
Iterator<String> recordIt = records.iterator();
ObjectMapper objectMapper = new ObjectMapper();
String record = recordIt.next();
Assert.assertFalse(recordIt.hasNext());
JsonNode parsedRecord = objectMapper.readValue(record, JsonNode.class);
Assert.assertEquals(parsedRecord.get("field1").getTextValue(), sampleRecord.get("field1").toString());
}
}
| 3,365 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-avro-json/src/test/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-modules/gobblin-avro-json/src/test/java/org/apache/gobblin/converter/avro/AvroToJsonRecordWithMetadataConverterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.avro;
import org.apache.avro.generic.GenericRecord;
import org.codehaus.jackson.JsonNode;
import org.testng.Assert;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.Test;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.test.TestUtils;
import org.apache.gobblin.type.RecordWithMetadata;
public class AvroToJsonRecordWithMetadataConverterTest {
private AvroToJsonRecordWithMetadataConverter converter;
private WorkUnitState state;
private GenericRecord sampleRecord;
@BeforeTest
public void setUp() throws SchemaConversionException {
sampleRecord = TestUtils.generateRandomAvroRecord();
state = new WorkUnitState();
converter = new AvroToJsonRecordWithMetadataConverter();
converter.convertSchema(sampleRecord.getSchema(), state);
}
@Test
public void testRecord() throws DataConversionException {
Iterable<RecordWithMetadata<JsonNode>> records = converter.convertRecord(null, sampleRecord, state);
RecordWithMetadata<JsonNode> node = records.iterator().next();
Assert.assertEquals(node.getMetadata().getGlobalMetadata().getContentType(), "test.name+json");
Assert.assertEquals(node.getRecord().get("field1").getTextValue(), sampleRecord.get("field1").toString());
}
}
| 3,366 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-avro-json/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-modules/gobblin-avro-json/src/main/java/org/apache/gobblin/converter/avro/AvroToJsonBytesConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.avro;
public class AvroToJsonBytesConverter extends AvroToJsonStringConverterBase<byte[]> {
@Override
protected byte[] processUtf8Bytes(byte[] utf8Bytes) {
return utf8Bytes;
}
}
| 3,367 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-avro-json/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-modules/gobblin-avro-json/src/main/java/org/apache/gobblin/converter/avro/AvroToJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.avro;
import java.util.Collections;
import java.util.Map;
import org.apache.avro.Schema.Field;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.util.Utf8;
import com.google.common.collect.Maps;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.google.gson.JsonArray;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
/**
* Converts Avro record to Json record
*
* @author nveeramr
*
*/
public class AvroToJsonConverter extends Converter<String, JsonArray, GenericRecord, JsonObject> {
private Gson gson;
@Override
public Converter<String, JsonArray, GenericRecord, JsonObject> init(WorkUnitState workUnit) {
this.gson = new GsonBuilder().create();
return this;
}
@Override
public JsonArray convertSchema(String inputSchema, WorkUnitState workUnit) throws SchemaConversionException {
return new JsonParser().parse(inputSchema).getAsJsonArray();
}
@Override
public Iterable<JsonObject> convertRecord(JsonArray outputSchema, GenericRecord inputRecord, WorkUnitState workUnit)
throws DataConversionException {
Map<String, Object> record = Maps.newHashMap();
for (Field field : inputRecord.getSchema().getFields()) {
Object col = inputRecord.get(field.name());
if (col != null && col instanceof Utf8) {
col = col.toString();
}
record.put(field.name(), col);
}
return Collections.singleton(this.gson.fromJson(this.gson.toJson(record), JsonObject.class).getAsJsonObject());
}
}
| 3,368 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-avro-json/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-modules/gobblin-avro-json/src/main/java/org/apache/gobblin/converter/avro/AvroToJsonRecordWithMetadataConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.avro;
import java.io.IOException;
import java.util.Collections;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.map.ObjectMapper;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.metadata.types.Metadata;
import org.apache.gobblin.type.RecordWithMetadata;
public class AvroToJsonRecordWithMetadataConverter extends Converter<Schema, String, GenericRecord, RecordWithMetadata<JsonNode>> {
private Metadata defaultMetadata;
private static final ObjectMapper objectMapper = new ObjectMapper();
private AvroToJsonStringConverterBase innerConverter;
public AvroToJsonRecordWithMetadataConverter() {
innerConverter = new AvroToJsonStringConverter();
}
@Override
public String convertSchema(final Schema inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
this.defaultMetadata = new Metadata();
defaultMetadata.getGlobalMetadata().setContentType(inputSchema.getFullName() + "+json");
return innerConverter.convertSchema(inputSchema, workUnit);
}
@Override
public Iterable<RecordWithMetadata<JsonNode>> convertRecord(String outputSchema, GenericRecord inputRecord,
WorkUnitState workUnit)
throws DataConversionException {
try {
Iterable<String> innerRecordIterable = innerConverter.convertRecord(outputSchema, inputRecord, workUnit);
String record = innerRecordIterable.iterator().next();
JsonNode jsonRoot = objectMapper.readValue(record, JsonNode.class);
return Collections.singleton(new RecordWithMetadata<JsonNode>(jsonRoot, defaultMetadata));
} catch (IOException e) {
throw new DataConversionException("Error converting to JSON", e);
}
}
}
| 3,369 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-avro-json/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-modules/gobblin-avro-json/src/main/java/org/apache/gobblin/converter/avro/AvroToJsonStringConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.avro;
import java.nio.charset.StandardCharsets;
public class AvroToJsonStringConverter extends AvroToJsonStringConverterBase<String> {
@Override
protected String processUtf8Bytes(byte[] utf8Bytes) {
return new String(utf8Bytes, StandardCharsets.UTF_8);
}
}
| 3,370 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-avro-json/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-modules/gobblin-avro-json/src/main/java/org/apache/gobblin/converter/avro/AvroToJsonBytesWithMetadataConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.avro;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.MetadataConverterWrapper;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.metadata.types.Metadata;
/**
* Converts an Avro GenericRecord to a UTF-8 JSON encoded byte[]. Inserts the original recordname as content-type.
*/
public class AvroToJsonBytesWithMetadataConverter extends MetadataConverterWrapper<Schema, String, GenericRecord, byte[]> {
private String contentType = null;
public AvroToJsonBytesWithMetadataConverter() {
super(new AvroToJsonBytesConverter());
}
@Override
public String convertSchema(Schema inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
String schema = super.convertSchema(inputSchema, workUnit);
contentType = inputSchema.getFullName() + "+json";
return schema;
}
@Override
protected Metadata convertMetadata(Metadata metadata) {
Metadata md = super.convertMetadata(metadata);
if (md.getGlobalMetadata().getContentType() == null) {
md.getGlobalMetadata().setContentType(contentType);
}
return md;
}
}
| 3,371 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-avro-json/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-modules/gobblin-avro-json/src/main/java/org/apache/gobblin/converter/avro/AvroToJsonStringConverterBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.avro;
import java.io.IOException;
import java.util.Collections;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.Encoder;
import org.apache.avro.io.EncoderFactory;
import org.apache.commons.io.output.ByteArrayOutputStream;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
/**
* Converts an Avro record to a json string.
*/
public abstract class AvroToJsonStringConverterBase<T> extends Converter<Schema, String, GenericRecord, T> {
private Schema schema;
private final ThreadLocal<Serializer> serializer = new ThreadLocal<Serializer>() {
@Override
protected Serializer initialValue() {
return new Serializer(AvroToJsonStringConverterBase.this.schema);
}
};
private static class Serializer {
private final Encoder encoder;
private final GenericDatumWriter<GenericRecord> writer;
private final ByteArrayOutputStream outputStream;
public Serializer(Schema schema) {
try {
this.writer = new GenericDatumWriter<>(schema);
this.outputStream = new ByteArrayOutputStream();
this.encoder = EncoderFactory.get().jsonEncoder(schema, this.outputStream);
} catch (IOException ioe) {
throw new RuntimeException("Could not initialize avro json encoder.");
}
}
public byte[] serialize(GenericRecord record) throws IOException {
this.outputStream.reset();
this.writer.write(record, this.encoder);
this.encoder.flush();
return this.outputStream.toByteArray();
}
}
@Override
public String convertSchema(Schema inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
this.schema = inputSchema;
return this.schema.toString();
}
@Override
public Iterable<T> convertRecord(String outputSchema, GenericRecord inputRecord, WorkUnitState workUnit)
throws DataConversionException {
try {
byte[] utf8Bytes = this.serializer.get().serialize(inputRecord);
return Collections.singleton(processUtf8Bytes(utf8Bytes));
} catch (IOException ioe) {
throw new DataConversionException(ioe);
}
}
protected abstract T processUtf8Bytes(byte[] utf8Bytes);
}
| 3,372 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/test/java/org/apache/gobblin/azkaban/AzkabanJobLauncherTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.azkaban;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.runtime.JobLauncherFactory;
import org.apache.gobblin.source.Source;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.extract.AbstractSource;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.testng.Assert;
import org.testng.annotations.Test;
@Test
public class AzkabanJobLauncherTest {
@Test
public void testDisableTokenInitialization() throws Exception {
Properties props = new Properties();
props.setProperty(ConfigurationKeys.JOB_NAME_KEY, "job1");
props.setProperty(ConfigurationKeys.JOB_LAUNCHER_TYPE_KEY, JobLauncherFactory.JobLauncherType.LOCAL.name());
props.setProperty(ConfigurationKeys.JOB_LOCK_ENABLED_KEY, "false");
props.setProperty(ConfigurationKeys.STATE_STORE_ENABLED, "false");
props.setProperty(ConfigurationKeys.SOURCE_CLASS_KEY, DummySource.class.getName());
// Should get an error since tokens are initialized by default
try {
new AzkabanJobLauncher("test", props);
Assert.fail();
} catch (IllegalArgumentException e) {
Assert.assertTrue(e.getMessage().contains("Missing required property keytab.user"));
}
// No error expected since initialization is skipped
props.setProperty(AzkabanJobLauncher.GOBBLIN_AZKABAN_INITIALIZE_HADOOP_TOKENS, "false");
new AzkabanJobLauncher("test", props);
}
/**
* A dummy implementation of {@link Source}.
*/
public static class DummySource extends AbstractSource<String, Integer> {
@Override
public List<WorkUnit> getWorkunits(SourceState sourceState) {
return Collections.EMPTY_LIST;
}
@Override
public Extractor<String, Integer> getExtractor(WorkUnitState state) throws IOException {
return null;
}
@Override
public void shutdown(SourceState state) {
}
}
}
| 3,373 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/test/java/org/apache/gobblin/service/modules/orchestration/AzkabanAjaxAPIClientTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
import lombok.extern.slf4j.Slf4j;
import org.testng.Assert;
import org.testng.annotations.Test;
@Slf4j
@Test(groups = { "org.apache.gobblin.service.modules.orchestration" })
public class AzkabanAjaxAPIClientTest {
@Test (enabled=false)
public void testCurrentTimeWithinWindow()
throws ParseException {
// Generate a window encapsulating the current time
int windowStartInHours = 2;
int windowEndInHours = 5;
int delayInMinutes = 0;
// Get computed scheduled time
String outputScheduledString =
AzkabanAjaxAPIClient.getScheduledTimeInAzkabanFormat(windowStartInHours, windowEndInHours, delayInMinutes);
// Verify that output schedule time is within window
Assert.assertTrue(isWithinWindow(windowStartInHours, windowEndInHours, outputScheduledString));
}
@Test (enabled=false)
public void testCurrentTimeOutsideWindow()
throws ParseException {
// Current hour
Calendar now = Calendar.getInstance();
int currentHour = now.get(Calendar.HOUR_OF_DAY);
// Generate a window NOT encapsulating the current time
int windowStartInHours = currentHour > 10 ? 1 : 11;
int windowEndInHours = currentHour > 10 ? 6 : 16;
int delayInMinutes = 0;
// Get computed scheduled time
String outputScheduledString =
AzkabanAjaxAPIClient.getScheduledTimeInAzkabanFormat(windowStartInHours, windowEndInHours, delayInMinutes);
// Verify that output schedule time is within window
Assert.assertTrue(isWithinWindow(windowStartInHours, windowEndInHours, outputScheduledString));
}
private boolean isWithinWindow(int windowStartInHours, int windowEndInHours, String outputScheduledString)
throws ParseException {
Calendar windowStart = Calendar.getInstance();
windowStart.set(Calendar.HOUR_OF_DAY, windowStartInHours);
windowStart.set(Calendar.MINUTE, 0);
windowStart.set(Calendar.SECOND, 0);
Calendar windowEnd = Calendar.getInstance();
windowEnd.set(Calendar.HOUR_OF_DAY, windowEndInHours);
windowEnd.set(Calendar.MINUTE, 0);
windowEnd.set(Calendar.SECOND, 0);
Date outputDate = new SimpleDateFormat("hh,mm,a,z").parse(outputScheduledString);
Calendar receivedTime = Calendar.getInstance();
receivedTime.set(Calendar.HOUR_OF_DAY, Integer.parseInt(new SimpleDateFormat("HH").format(outputDate)));
receivedTime.set(Calendar.MINUTE, Integer.parseInt(new SimpleDateFormat("mm").format(outputDate)));
log.info("Window start time is: " + new SimpleDateFormat("MM/dd/yyyy hh,mm,a,z").format(windowStart.getTime()));
log.info("Window end time is: " + new SimpleDateFormat("MM/dd/yyyy hh,mm,a,z").format(windowEnd.getTime()));
log.info("Output time is: " + new SimpleDateFormat("MM/dd/yyyy hh,mm,a,z").format(receivedTime.getTime()));
return receivedTime.after(windowStart) && receivedTime.before(windowEnd);
}
} | 3,374 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/test/java/org/apache/gobblin/service/modules/orchestration/AzkabanProjectConfigTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import java.net.URI;
import java.util.Collections;
import java.util.Properties;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.util.ConfigUtils;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
@Slf4j
@Test(groups = { "org.apache.gobblin.service.modules.orchestration" })
public class AzkabanProjectConfigTest {
@Test
public void testProjectNameDefault() throws Exception {
String expectedProjectName = "GobblinService__uri";
Properties properties = new Properties();
JobSpec jobSpec = new JobSpec(new URI("uri"), "0.0", "test job spec",
ConfigUtils.propertiesToConfig(properties), properties, Optional.absent(), Optional.absent(), Collections.EMPTY_MAP);
AzkabanProjectConfig azkabanProjectConfig = new AzkabanProjectConfig(jobSpec);
String actualProjectName = azkabanProjectConfig.getAzkabanProjectName();
Assert.assertEquals(actualProjectName, expectedProjectName);
}
@Test
public void testProjectNameWithConfig() throws Exception {
String expectedProjectName = "randomPrefix_http___localhost_8000_context";
Properties properties = new Properties();
properties.setProperty("gobblin.service.azkaban.project.namePrefix", "randomPrefix");
JobSpec jobSpec = new JobSpec(new URI("http://localhost:8000/context"), "0.0", "test job spec",
ConfigUtils.propertiesToConfig(properties), properties, Optional.absent(), Optional.absent(), Collections.EMPTY_MAP);
AzkabanProjectConfig azkabanProjectConfig = new AzkabanProjectConfig(jobSpec);
String actualProjectName = azkabanProjectConfig.getAzkabanProjectName();
Assert.assertEquals(actualProjectName, expectedProjectName);
}
@Test
public void testProjectNameWithReallyLongName() throws Exception {
String expectedProjectName = "randomPrefixWithReallyLongName_http___localhost_8000__55490420";
Properties properties = new Properties();
properties.setProperty("gobblin.service.azkaban.project.namePrefix", "randomPrefixWithReallyLongName");
JobSpec jobSpec = new JobSpec(new URI("http://localhost:8000/context/that-keeps-expanding-and-explanding"),
"0.0", "test job spec", ConfigUtils.propertiesToConfig(properties), properties, Optional.absent(),
Optional.absent(), Collections.EMPTY_MAP);
AzkabanProjectConfig azkabanProjectConfig = new AzkabanProjectConfig(jobSpec);
String actualProjectName = azkabanProjectConfig.getAzkabanProjectName();
Assert.assertEquals(actualProjectName, expectedProjectName);
}
@Test
public void testProjectZipFileName() throws Exception {
String expectedZipFileName = "randomPrefix_http___localhost_8000_context.zip";
Properties properties = new Properties();
properties.setProperty("gobblin.service.azkaban.project.namePrefix", "randomPrefix");
JobSpec jobSpec = new JobSpec(new URI("http://localhost:8000/context"), "0.0", "test job spec",
ConfigUtils.propertiesToConfig(properties), properties, Optional.absent(), Optional.absent(), Collections.EMPTY_MAP);
AzkabanProjectConfig azkabanProjectConfig = new AzkabanProjectConfig(jobSpec);
String actualZipFileName = azkabanProjectConfig.getAzkabanProjectZipFilename();
Assert.assertEquals(actualZipFileName, expectedZipFileName);
}
@Test
public void testProjectZipFileNameForLongName() throws Exception {
String expectedZipFileName = "randomPrefixWithReallyLongName_http___localhost_8000__55490420.zip";
Properties properties = new Properties();
properties.setProperty("gobblin.service.azkaban.project.namePrefix", "randomPrefixWithReallyLongName");
JobSpec jobSpec = new JobSpec(new URI("http://localhost:8000/context/that-keeps-expanding-and-explanding"),
"0.0", "test job spec", ConfigUtils.propertiesToConfig(properties), properties, Optional.absent(),
Optional.absent(), Collections.EMPTY_MAP);
AzkabanProjectConfig azkabanProjectConfig = new AzkabanProjectConfig(jobSpec);
String actualZipFileName = azkabanProjectConfig.getAzkabanProjectZipFilename();
Assert.assertEquals(actualZipFileName, expectedZipFileName);
}
}
| 3,375 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/test/java/org/apache/gobblin/service/modules/orchestration/AzkabanClientTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import com.google.common.collect.Maps;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.UUID;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.io.FileUtils;
import org.junit.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
/**
* This test is disabled by default because it assumes the Azkaban-solo-server is setup on localhost:8081.
*
* Please check https://azkaban.github.io/azkaban/docs/latest/ for how to setup Azkaban-solo-server.
*/
@Slf4j
@Test(enabled = false)
public class AzkabanClientTest {
private AzkabanClient client = null;
private long sessionExpireInMin = 1;
String projectName;
String description;
@BeforeClass
public void setup() throws Exception {
Config azkConfig = ConfigFactory.load("local-azkaban-service.conf");
String userName = azkConfig.getString(ServiceAzkabanConfigKeys.AZKABAN_USERNAME_KEY);
String password = azkConfig.getString(ServiceAzkabanConfigKeys.AZKABAN_PASSWORD_KEY);
String url = azkConfig.getString(ServiceAzkabanConfigKeys.AZKABAN_SERVER_URL_KEY);
this.client = AzkabanClient.builder()
.username(userName)
.password(password)
.url(url)
.sessionExpireInMin(sessionExpireInMin)
.build();
}
@BeforeMethod
public void testSetup() {
projectName = "test-project-" + System.currentTimeMillis() + "-" + UUID.randomUUID().toString().substring(0, 4);
description = "This is test project.";
}
@AfterMethod
public void testCleanup() throws AzkabanClientException {
this.client.deleteProject(projectName);
}
@AfterClass
public void cleanup() throws IOException {
this.client.close();
}
private void ensureProjectExist(String projectName, String description) throws AzkabanClientException {
this.client.createProject(projectName, description);
}
public void testFetchLog() throws Exception {
String flowName = "test-exec-flow";
String jobId = "test-exec-flow";
ensureProjectExist(projectName, description);
File zipFile = createAzkabanZip(flowName);
this.client.uploadProjectZip(projectName, zipFile);
AzkabanExecuteFlowStatus execStatus = this.client.executeFlow(projectName, flowName, Maps.newHashMap());
String execId = execStatus.getResponse().getExecId();
ByteArrayOutputStream logStream = null;
// Logs are not instantly available. Retrying several times until the job has started, and logs are present.
int maxTries = 10;
for (int i = 0; i < maxTries; i++) {
logStream = new ByteArrayOutputStream();
Thread.sleep(1000);
try {
this.client.fetchExecutionLog(execId, jobId, 0, 100000000, logStream);
break;
} catch (Exception ex) {
if (i == maxTries - 1) {
throw ex;
}
}
}
Assert.assertTrue(logStream.size() > 0);
}
public void testProjectCreateAndDelete() throws AzkabanClientException {
this.client.createProject(projectName, description);
this.client.deleteProject(projectName);
}
public void testProjectExistenceCheck() throws AzkabanClientException {
Assert.assertFalse(this.client.projectExists(projectName));
this.client.createProject(projectName, description);
Assert.assertTrue(this.client.projectExists(projectName));
this.client.deleteProject(projectName);
Assert.assertFalse(this.client.projectExists(projectName));
}
public void testUploadZip() throws IOException {
String flowName = "test-upload";
ensureProjectExist(projectName, description);
// upload Zip to project
File zipFile = createAzkabanZip(flowName);
this.client.uploadProjectZip(projectName, zipFile);
// upload Zip to an non-existed project
try {
this.client.uploadProjectZip("Non-existed-project", zipFile);
Assert.fail();
} catch (Exception e) {
log.info("Expected exception " + e.toString());
}
}
public void testExecuteFlow() throws IOException {
String flowName = "test-exec-flow";
ensureProjectExist(projectName, description);
// upload Zip to project
File zipFile = createAzkabanZip(flowName);
this.client.uploadProjectZip(projectName, zipFile);
// execute a flow
AzkabanExecuteFlowStatus execStatus = this.client.executeFlow(projectName, flowName, Maps.newHashMap());
log.info("Execid: {}", execStatus.getResponse().execId);
}
public void testExecuteFlowWithParams() throws IOException {
String flowName = "test-exec-flow-param";
ensureProjectExist(projectName, description);
// upload Zip to project
File zipFile = createAzkabanZip(flowName);
this.client.uploadProjectZip(projectName, zipFile);
Map<String, String> flowParams = Maps.newHashMap();
flowParams.put("gobblin.source", "DummySource");
flowParams.put("gobblin.dataset.pattern", "/data/tracking/MessageActionEvent/hourly/*/*/*/*");
// execute a flow
AzkabanExecuteFlowStatus execStatus = this.client.executeFlow(projectName, flowName, flowParams);
log.info("Execid: {}", execStatus.getResponse().execId);
}
public void testExecuteFlowWithOptions() throws IOException {
String flowName = "test-exec-flow-options";
ensureProjectExist(projectName, description);
// upload Zip to project
File zipFile = createAzkabanZip(flowName);
this.client.uploadProjectZip(projectName, zipFile);
Map<String, String> flowOptions = Maps.newHashMap();
// execute a flow
AzkabanExecuteFlowStatus execStatus = this.client.executeFlowWithOptions(projectName, flowName, flowOptions, Maps.newHashMap());
log.info("Execid: {}", execStatus.getResponse().execId);
}
public void testFetchFlowExecution() throws Exception {
String flowName = "test-fetch-flow-executions";
ensureProjectExist(projectName, description);
// upload Zip to project
File zipFile = createAzkabanZip(flowName);
this.client.uploadProjectZip(projectName, zipFile);
Map<String, String> flowOptions = Maps.newHashMap();
// execute a flow
AzkabanExecuteFlowStatus execStatus = this.client.executeFlowWithOptions(projectName, flowName, flowOptions, Maps.newHashMap());
log.info("Execid: {}", execStatus.getResponse().execId);
// wait for the job started and failed
Thread.sleep(3000);
// job should fail
AzkabanFetchExecuteFlowStatus fetchExecuteFlowStatus = this.client.fetchFlowExecution(execStatus.getResponse().execId);
for (Map.Entry<String, String> entry : fetchExecuteFlowStatus.getResponse().getMap().entrySet()) {
log.info(entry.getKey() + " -> " + entry.getValue());
}
}
@Test(enabled = false)
public void testSessionExpiration() throws Exception {
Thread.sleep(sessionExpireInMin * 60 * 1000);
ensureProjectExist(projectName, description);
}
public void testGettingProjectFlows() throws IOException {
String flowName = "test-exec-flow";
ensureProjectExist(projectName, description);
AzkabanProjectFlowsStatus status = this.client.fetchProjectFlows(projectName);
Assert.assertTrue(status.getResponse().getFlows().isEmpty());
File zipFile = createAzkabanZip(flowName);
this.client.uploadProjectZip(projectName, zipFile);
status = this.client.fetchProjectFlows(projectName);
List<AzkabanProjectFlowsStatus.Flow> flows = status.getResponse().getFlows();
Assert.assertEquals(1, flows.size());
Assert.assertEquals(flowName, flows.get(0).flowId);
}
private File createAzkabanZip(String flowName) throws IOException {
Properties jobProps = new Properties();
jobProps.load(this.getClass().getClassLoader().
getResourceAsStream("azkakaban-job-basic.properties"));
String basePath = "/tmp/testAzkabanZip";
FileUtils.deleteDirectory(new File(basePath));
// create testAzkabanZip/test dir
File jobDir = new File(basePath, flowName);
Assert.assertTrue(jobDir.mkdirs());
// create testAzkabanZip/test/test.job
File jobFile = new File(jobDir,flowName + ".job");
OutputStream jobOut = new FileOutputStream(jobFile);
jobProps.store(jobOut, "Writing a test job file.");
// create testAzkabanZip/test.zip
FileOutputStream fos = new FileOutputStream(jobDir.getPath() + ".zip");
ZipOutputStream zos = new ZipOutputStream(fos);
addDirToZipArchive(zos, jobDir, null);
zos.close();
fos.close();
return new File(jobDir.getPath() + ".zip");
}
private static void addDirToZipArchive(ZipOutputStream zos, File fileToZip, String parentDirectoryName) throws IOException {
if (fileToZip == null || !fileToZip.exists()) {
return;
}
String zipEntryName = fileToZip.getName();
if (parentDirectoryName!=null && !parentDirectoryName.isEmpty()) {
zipEntryName = parentDirectoryName + "/" + fileToZip.getName();
}
if (fileToZip.isDirectory()) {
for (File file : fileToZip.listFiles()) {
addDirToZipArchive(zos, file, zipEntryName);
}
} else {
byte[] buffer = new byte[1024];
FileInputStream fis = new FileInputStream(fileToZip);
zos.putNextEntry(new ZipEntry(zipEntryName));
int length;
while ((length = fis.read(buffer)) > 0) {
zos.write(buffer, 0, length);
}
zos.closeEntry();
fis.close();
}
}
}
| 3,376 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/azkaban/AzkabanGobblinDaemon.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.azkaban;
import java.util.List;
import java.util.Properties;
import org.apache.log4j.Logger;
import com.google.common.collect.Lists;
import org.apache.gobblin.metrics.RootMetricContext;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.scheduler.SchedulerDaemon;
import azkaban.jobExecutor.AbstractJob;
/**
* Wrapper of {@link SchedulerDaemon}, specially used by Azkaban to launch a job scheduler daemon.
*/
public class AzkabanGobblinDaemon extends AbstractJob {
private static final Logger LOG = Logger.getLogger(AzkabanGobblinDaemon.class);
private SchedulerDaemon daemon;
public AzkabanGobblinDaemon(String jobId, Properties props) throws Exception {
super(jobId, LOG);
List<Tag<?>> tags = Lists.newArrayList();
tags.addAll(Tag.fromMap(AzkabanTags.getAzkabanTags()));
RootMetricContext.get(tags);
this.daemon = new SchedulerDaemon(props);
}
@Override
public void run()
throws Exception {
this.daemon.start();
}
@Override
public void cancel()
throws Exception {
this.daemon.stop();
}
}
| 3,377 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/azkaban/AzkabanCompactionJobLauncher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.azkaban;
import java.io.IOException;
import java.util.Map;
import java.util.Properties;
import azkaban.jobExecutor.AbstractJob;
import com.google.common.base.Optional;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigValue;
import org.apache.log4j.Logger;
import org.apache.gobblin.compaction.Compactor;
import org.apache.gobblin.compaction.CompactorFactory;
import org.apache.gobblin.compaction.CompactorCreationException;
import org.apache.gobblin.compaction.listeners.CompactorListener;
import org.apache.gobblin.compaction.listeners.CompactorListenerCreationException;
import org.apache.gobblin.compaction.listeners.CompactorListenerFactory;
import org.apache.gobblin.compaction.ReflectionCompactorFactory;
import org.apache.gobblin.compaction.listeners.ReflectionCompactorListenerFactory;
import org.apache.gobblin.configuration.DynamicConfigGenerator;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.runtime.DynamicConfigGeneratorFactory;
import org.apache.gobblin.util.ConfigUtils;
/**
* A class for launching a Gobblin MR job for compaction through Azkaban.
* @deprecated use {@link AzkabanJobLauncher} and {@link org.apache.gobblin.compaction.source.CompactionSource}
*/
@Deprecated
public class AzkabanCompactionJobLauncher extends AbstractJob {
private static final Logger LOG = Logger.getLogger(AzkabanCompactionJobLauncher.class);
private final Properties properties;
private final Compactor compactor;
public AzkabanCompactionJobLauncher(String jobId, Properties props) {
super(jobId, LOG);
this.properties = new Properties();
this.properties.putAll(props);
// load dynamic configuration and add them to the job properties
Config propsAsConfig = ConfigUtils.propertiesToConfig(props);
DynamicConfigGenerator dynamicConfigGenerator =
DynamicConfigGeneratorFactory.createDynamicConfigGenerator(propsAsConfig);
Config dynamicConfig = dynamicConfigGenerator.generateDynamicConfig(propsAsConfig);
// add the dynamic config to the job config
for (Map.Entry<String, ConfigValue> entry : dynamicConfig.entrySet()) {
this.properties.put(entry.getKey(), entry.getValue().unwrapped().toString());
}
this.compactor = getCompactor(getCompactorFactory(), getCompactorListener(getCompactorListenerFactory()));
}
private Compactor getCompactor(CompactorFactory compactorFactory, Optional<CompactorListener> compactorListener) {
try {
return compactorFactory
.createCompactor(this.properties, Tag.fromMap(AzkabanTags.getAzkabanTags()), compactorListener);
} catch (CompactorCreationException e) {
throw new RuntimeException("Unable to create compactor", e);
}
}
protected CompactorFactory getCompactorFactory() {
return new ReflectionCompactorFactory();
}
private Optional<CompactorListener> getCompactorListener(CompactorListenerFactory compactorListenerFactory) {
try {
return compactorListenerFactory.createCompactorListener(this.properties);
} catch (CompactorListenerCreationException e) {
throw new RuntimeException("Unable to create compactor listener", e);
}
}
protected CompactorListenerFactory getCompactorListenerFactory() {
return new ReflectionCompactorListenerFactory();
}
@Override
public void run() throws Exception {
this.compactor.compact();
}
@Override
public void cancel() throws IOException {
this.compactor.cancel();
}
}
| 3,378 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/azkaban/EmbeddedGobblinYarnAppLauncher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.azkaban;
import java.io.File;
import java.io.FileOutputStream;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.lang.reflect.Field;
import java.util.Map;
import org.apache.gobblin.testing.AssertWithBackoff;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.MiniYARNCluster;
import org.testng.collections.Lists;
import com.google.common.base.Preconditions;
import com.google.common.base.Predicate;
import com.google.common.collect.ImmutableMap;
import com.google.common.io.Closer;
import lombok.extern.slf4j.Slf4j;
/**
* Given a set up Azkaban job configuration, launch the Gobblin-on-Yarn job in a semi-embedded mode:
* - Uses external Kafka cluster and requires external Zookeeper(Non-embedded TestingServer) to be set up.
* The Kafka Cluster was intentionally set to be external due to the data availability. External ZK was unintentional
* as the helix version (0.9) being used cannot finish state transition in the Embedded ZK.
* TODO: Adding embedded Kafka cluster and set golden datasets for data-validation.
* - Uses MiniYARNCluster so YARN components don't have to be installed.
*/
@Slf4j
public class EmbeddedGobblinYarnAppLauncher extends AzkabanJobRunner {
public static final String DYNAMIC_CONF_PATH = "dynamic.conf";
public static final String YARN_SITE_XML_PATH = "yarn-site.xml";
private static String zkString = "";
private static String fileAddress = "";
private static void setup(String[] args)
throws Exception {
// Parsing zk-string
Preconditions.checkArgument(args.length == 1);
zkString = args[0];
// Initialize necessary external components: Yarn and Helix
Closer closer = Closer.create();
// Set java home in environment since it isn't set on some systems
String javaHome = System.getProperty("java.home");
setEnv("JAVA_HOME", javaHome);
final YarnConfiguration clusterConf = new YarnConfiguration();
clusterConf.set("yarn.resourcemanager.connect.max-wait.ms", "10000");
clusterConf.set("yarn.nodemanager.resource.memory-mb", "512");
clusterConf.set("yarn.scheduler.maximum-allocation-mb", "1024");
MiniYARNCluster miniYARNCluster = closer.register(new MiniYARNCluster("TestCluster", 1, 1, 1));
miniYARNCluster.init(clusterConf);
miniYARNCluster.start();
// YARN client should not be started before the Resource Manager is up
AssertWithBackoff.create().logger(log).timeoutMs(10000).assertTrue(new Predicate<Void>() {
@Override
public boolean apply(Void input) {
return !clusterConf.get(YarnConfiguration.RM_ADDRESS).contains(":0");
}
}, "Waiting for RM");
try (PrintWriter pw = new PrintWriter(DYNAMIC_CONF_PATH, "UTF-8")) {
File dir = new File("target/dummydir");
// dummy directory specified in configuration
if (!dir.mkdir()) {
log.error("The dummy folder's creation is not successful");
}
dir.deleteOnExit();
pw.println("gobblin.cluster.zk.connection.string=\"" + zkString + "\"");
pw.println("jobconf.fullyQualifiedPath=\"" + dir.getAbsolutePath() + "\"");
}
// YARN config is dynamic and needs to be passed to other processes
try (OutputStream os = new FileOutputStream(new File(YARN_SITE_XML_PATH))) {
clusterConf.writeXml(os);
}
/** Have to pass the same yarn-site.xml to the GobblinYarnAppLauncher to initialize Yarn Client. */
fileAddress = new File(YARN_SITE_XML_PATH).getAbsolutePath();
}
static void setEnv(String key, String value) {
try {
Map<String, String> env = System.getenv();
Class<?> cl = env.getClass();
Field field = cl.getDeclaredField("m");
field.setAccessible(true);
Map<String, String> writableEnv = (Map<String, String>) field.get(env);
writableEnv.put(key, value);
} catch (Exception e) {
throw new IllegalStateException("Failed to set environment variable", e);
}
}
public static void main(String[] args)
throws Exception {
setup(args);
AzkabanJobRunner.doMain(EmbeddedGobblinYarnAppLauncher.class, args);
}
public EmbeddedGobblinYarnAppLauncher() {
super(Lists.newArrayList("gobblin-modules/gobblin-azkaban/src/main/resources/conf/properties/common.properties",
"gobblin-modules/gobblin-azkaban/src/main/resources/conf/properties/local.properties"),
Lists.newArrayList("gobblin-modules/gobblin-azkaban/src/main/resources/conf/jobs/kafka-streaming-on-yarn.job"),
ImmutableMap.of("yarn.resourcemanager.connect.max-wait.ms", "10000", "gobblin.cluster.zk.connection.string",
EmbeddedGobblinYarnAppLauncher.zkString, "gobblin.cluster.job.conf.path",
"gobblin-modules/gobblin-azkaban/src/main/resources/conf/gobblin_jobs", "gobblin.yarn.conf.dir",
"gobblin-modules/gobblin-azkaban/src/main/resources/conf/gobblin_conf", "yarn-site-address", fileAddress));
}
}
| 3,379 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/azkaban/AzkabanGobblinLocalYarnAppLauncher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.azkaban;
import java.io.IOException;
import java.util.Properties;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
/**
* An extension of {@link AzkabanGobblinYarnAppLauncher} for locally-running Azkaban instances since it provides
* capability of changing yarn-resource related configuration in the way that could work with lighter hardware.
*/
public class AzkabanGobblinLocalYarnAppLauncher extends AzkabanGobblinYarnAppLauncher {
public AzkabanGobblinLocalYarnAppLauncher(String jobId, Properties gobblinProps)
throws IOException {
super(jobId, gobblinProps);
}
@Override
protected YarnConfiguration initYarnConf(Properties gobblinProps) {
YarnConfiguration yarnConfiguration = super.initYarnConf(gobblinProps);
if (gobblinProps.containsKey("yarn-site-address")) {
yarnConfiguration.addResource(new Path(gobblinProps.getProperty("yarn-site-address")));
} else {
yarnConfiguration.set("yarn.resourcemanager.connect.max-wait.ms", "10000");
yarnConfiguration.set("yarn.nodemanager.resource.memory-mb", "512");
yarnConfiguration.set("yarn.scheduler.maximum-allocation-mb", "1024");
}
return yarnConfiguration;
}
}
| 3,380 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/azkaban/AzkabanStateStoreCleanerJob.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.azkaban;
import java.io.IOException;
import java.util.Properties;
import org.apache.log4j.Logger;
import azkaban.jobExecutor.AbstractJob;
import org.apache.gobblin.metastore.util.StateStoreCleaner;
/**
* A utility class for running the {@link StateStoreCleaner} on Azkaban.
*
* @author Yinan Li
*/
public class AzkabanStateStoreCleanerJob extends AbstractJob {
private static final Logger LOGGER = Logger.getLogger(AzkabanStateStoreCleanerJob.class);
private final StateStoreCleaner stateStoreCleaner;
public AzkabanStateStoreCleanerJob(String jobId, Properties props) throws IOException {
super(jobId, LOGGER);
this.stateStoreCleaner = new StateStoreCleaner(props);
}
@Override
public void run()
throws Exception {
this.stateStoreCleaner.run();
}
}
| 3,381 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/azkaban/AzkabanJobRunner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.azkaban;
import java.io.File;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import com.google.common.io.Files;
import azkaban.jobExecutor.AbstractJob;
import azkaban.utils.Props;
import azkaban.utils.PropsUtils;
import lombok.RequiredArgsConstructor;
/**
* Runs Azkaban jobs locally.
*
* Usage:
* Extend the class, in the constructor pass the list of relative paths to all common properties files, as well as list
* of job files to run.
*
* Execution:
* java -cp ... {@link AzkabanJobRunner} class-name root-directory
*
* Where class-name is the extension of {@link AzkabanJobRunner} that should be executed, and root-directory is the
* root directory of the repository.
*
* @author Issac Buenrostro
*/
@RequiredArgsConstructor
public class AzkabanJobRunner {
private File baseDirectory = new File(".");
private final List<String> commonProps;
private final List<String> jobProps;
private final Map<String, String> overrides;
static void doMain(Class<? extends AzkabanJobRunner> klazz, String[] args)
throws Exception {
AzkabanJobRunner runner = klazz.newInstance();
if (args.length >= 1) {
runner.setBaseDirectory(new File(args[0]));
}
runner.run();
}
public static String getTempDirectory() {
File tmpDirectory = Files.createTempDir();
tmpDirectory.deleteOnExit();
return tmpDirectory.getAbsolutePath();
}
private void setBaseDirectory(File baseDirectory) {
this.baseDirectory = baseDirectory;
}
public void run()
throws IOException {
Props commonProps = new Props();
for (String commonPropsFile : this.commonProps) {
commonProps = new Props(commonProps, new File(baseDirectory, commonPropsFile));
}
for (String jobFile : this.jobProps) {
File file = new File(baseDirectory, jobFile);
Props jobProps = new Props(new Props(commonProps, file), this.overrides);
jobProps = PropsUtils.resolveProps(jobProps);
try {
AbstractJob job = constructAbstractJob(file.getName(), jobProps);
job.run();
} catch (Throwable t) {
throw new RuntimeException(t);
}
}
}
private AbstractJob constructAbstractJob(String name, Props jobProps) {
try {
return (AbstractJob) jobProps.getClass("job.class").getConstructor(String.class, Props.class)
.newInstance(name, jobProps);
} catch (ReflectiveOperationException roe) {
try {
return (AbstractJob) jobProps.getClass("job.class").getConstructor(String.class, Properties.class)
.newInstance(name, propsToProperties(jobProps));
} catch (ReflectiveOperationException exc) {
throw new RuntimeException(exc);
}
}
}
private Properties propsToProperties(Props props) {
Properties properties = new Properties();
for (String key : props.getKeySet()) {
properties.put(key, props.getString(key));
}
return properties;
}
}
| 3,382 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/azkaban/AzkabanIntegrationTestLauncher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.azkaban;
import java.util.Properties;
import org.apache.log4j.Logger;
import azkaban.jobExecutor.AbstractJob;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.test.setup.config.TestHarnessLauncher;
/**
* This class launches the TestHarness framework using Azkaban
*
* Created by spyne on 6/8/15.
*/
public class AzkabanIntegrationTestLauncher extends AbstractJob {
private static final Logger LOG = Logger.getLogger(AzkabanIntegrationTestLauncher.class);
private final Properties properties;
private TestHarnessLauncher launcher;
public AzkabanIntegrationTestLauncher(String id, Properties properties) {
super(id, LOG);
this.properties = properties;
}
@Override
public void run() throws Exception {
// Get the test harness launcher instance
this.launcher = createTestHarnessInstance();
// Execute them
this.launcher.launchTest();
}
private TestHarnessLauncher createTestHarnessInstance()
throws ClassNotFoundException, InstantiationException, IllegalAccessException {
if (!this.properties.containsKey(ConfigurationKeys.TEST_HARNESS_LAUNCHER_IMPL)) {
throw new RuntimeException("Unable to launch Test Harness. No implementation class found");
}
final String className = this.properties.getProperty(ConfigurationKeys.TEST_HARNESS_LAUNCHER_IMPL);
final Class<TestHarnessLauncher> clazz = (Class<TestHarnessLauncher>) Class.forName(className);
return clazz.newInstance();
}
}
| 3,383 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/azkaban/AzkabanTags.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.azkaban;
/**
* Utility class for collecting metadata specific to a Azkaban runtime environment.
*/
public class AzkabanTags extends org.apache.gobblin.util.AzkabanTags {
}
| 3,384 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/azkaban/AzkabanGobblinYarnAppLauncher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.azkaban;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigValueFactory;
import azkaban.jobExecutor.AbstractJob;
import lombok.Getter;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.yarn.GobblinYarnAppLauncher;
import org.apache.gobblin.yarn.GobblinYarnConfigurationKeys;
/**
* A utility class for launching a Gobblin application on Yarn through Azkaban.
*
* <p>
* This class starts the driver of the Gobblin application on Yarn, which will be up running until the
* Azkaban job is killed/cancelled or the shutdown hook gets called and causes the driver to stop.
* </p>
*
* <p>
* See {@link GobblinYarnAppLauncher} for details information on the launcher/driver of the Gobblin
* application on Yarn.
* </p>
*
* @author Yinan Li
*/
public class AzkabanGobblinYarnAppLauncher extends AbstractJob {
private static final Logger LOGGER = Logger.getLogger(AzkabanGobblinYarnAppLauncher.class);
private final GobblinYarnAppLauncher gobblinYarnAppLauncher;
@Getter
protected final YarnConfiguration yarnConfiguration;
public AzkabanGobblinYarnAppLauncher(String jobId, Properties gobblinProps)
throws IOException {
super(jobId, LOGGER);
addRuntimeProperties(gobblinProps);
Config gobblinConfig = ConfigUtils.propertiesToConfig(gobblinProps);
//Suppress logs from classes that emit Yarn application Id that Azkaban uses to kill the application.
setLogLevelForClasses(gobblinConfig);
yarnConfiguration = initYarnConf(gobblinProps);
gobblinConfig = gobblinConfig.withValue(GobblinYarnAppLauncher.GOBBLIN_YARN_APP_LAUNCHER_MODE,
ConfigValueFactory.fromAnyRef(GobblinYarnAppLauncher.AZKABAN_APP_LAUNCHER_MODE_KEY));
this.gobblinYarnAppLauncher = getYarnAppLauncher(gobblinConfig);
}
protected GobblinYarnAppLauncher getYarnAppLauncher(Config gobblinConfig)
throws IOException {
GobblinYarnAppLauncher gobblinYarnAppLauncher = new GobblinYarnAppLauncher(gobblinConfig, this.yarnConfiguration);
gobblinYarnAppLauncher.initializeYarnClients(gobblinConfig);
return gobblinYarnAppLauncher;
}
/**
* Set Log Level for each class specified in the config. Class name and the corresponding log level can be specified
* as "a:INFO,b:ERROR", where logs of class "a" are set to INFO and logs from class "b" are set to ERROR.
* @param config
*/
private void setLogLevelForClasses(Config config) {
List<String> classLogLevels = ConfigUtils.getStringList(config, GobblinYarnConfigurationKeys.GOBBLIN_YARN_AZKABAN_CLASS_LOG_LEVELS);
for (String classLogLevel: classLogLevels) {
String className = classLogLevel.split(":")[0];
Level level = Level.toLevel(classLogLevel.split(":")[1], Level.INFO);
Logger.getLogger(className).setLevel(level);
}
}
/**
* Extended class can override this method by providing their own YARN configuration.
*/
protected YarnConfiguration initYarnConf(Properties gobblinProps) {
return new YarnConfiguration();
}
/**
* Extended class can override this method to add some runtime properties.
*/
protected void addRuntimeProperties(Properties gobblinProps) {
}
@Override
public void run() throws Exception {
this.gobblinYarnAppLauncher.launch();
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
try {
AzkabanGobblinYarnAppLauncher.this.gobblinYarnAppLauncher.stop();
} catch (IOException ioe) {
LOGGER.error("Failed to shutdown the " + GobblinYarnAppLauncher.class.getSimpleName(), ioe);
} catch (TimeoutException te) {
LOGGER.error("Timed out in shutting down the " + GobblinYarnAppLauncher.class.getSimpleName(), te);
}
}
});
}
@Override
public void cancel() throws Exception {
try {
this.gobblinYarnAppLauncher.stop();
} finally {
super.cancel();
}
}
}
| 3,385 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/azkaban/AzkabanJobLauncher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.azkaban;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.Credentials;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
import com.google.common.collect.Lists;
import com.google.common.io.Closer;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigValue;
import azkaban.jobExecutor.AbstractJob;
import javax.annotation.Nullable;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.DynamicConfigGenerator;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.RootMetricContext;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.metrics.event.TimingEvent;
import org.apache.gobblin.runtime.DynamicConfigGeneratorFactory;
import org.apache.gobblin.runtime.JobException;
import org.apache.gobblin.runtime.JobLauncher;
import org.apache.gobblin.runtime.JobLauncherFactory;
import org.apache.gobblin.runtime.app.ApplicationException;
import org.apache.gobblin.runtime.app.ApplicationLauncher;
import org.apache.gobblin.runtime.app.ServiceBasedAppLauncher;
import org.apache.gobblin.runtime.listeners.CompositeJobListener;
import org.apache.gobblin.runtime.listeners.EmailNotificationJobListener;
import org.apache.gobblin.runtime.listeners.JobListener;
import org.apache.gobblin.runtime.services.MetricsReportingService;
import org.apache.gobblin.service.modules.orchestration.AzkabanProjectConfig;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.PropertiesUtils;
import org.apache.gobblin.util.TimeRangeChecker;
import org.apache.gobblin.util.hadoop.TokenUtils;
import org.apache.gobblin.util.logs.Log4jConfigurationHelper;
import static org.apache.gobblin.runtime.AbstractJobLauncher.resolveGobblinJobTemplateIfNecessary;
import static org.apache.hadoop.security.UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION;
/**
* A utility class for launching a Gobblin Hadoop MR job through Azkaban.
*
* <p>
* By default, this class will use the {@link org.apache.gobblin.runtime.mapreduce.MRJobLauncher} to launch and run
* the Gobblin job unless a different job launcher type is explicitly specified in the job configuration
* using {@link ConfigurationKeys#JOB_LAUNCHER_TYPE_KEY}.
* </p>
*
* <p>
* The launcher will use Hadoop token provided in environment variable
* {@link org.apache.hadoop.security.UserGroupInformation#HADOOP_TOKEN_FILE_LOCATION}.
* If it is missing, the launcher will get a token using {@link TokenUtils#getHadoopTokens}.
* </p>
*
* @author Yinan Li
*/
public class AzkabanJobLauncher extends AbstractJob implements ApplicationLauncher, JobLauncher {
private static final Logger LOG = Logger.getLogger(AzkabanJobLauncher.class);
public static final String GOBBLIN_LOG_LEVEL_KEY = "gobblin.log.levelOverride";
public static final String LOG_LEVEL_OVERRIDE_MAP = "log.levelOverride.map";
public static final String GOBBLIN_CUSTOM_JOB_LISTENERS = "gobblin.custom.job.listeners";
private static final String HADOOP_FS_DEFAULT_NAME = "fs.default.name";
private static final String AZKABAN_LINK_JOBEXEC_URL = "azkaban.link.jobexec.url";
private static final String AZKABAN_LINK_JOBEXEC_PROXY_URL = "azkaban.link.jobexec.proxyUrl";
private static final String AZKABAN_FLOW_EXEC_ID = "azkaban.flow.execid";
private static final String MAPREDUCE_JOB_CREDENTIALS_BINARY = "mapreduce.job.credentials.binary";
private static final String AZKABAN_GOBBLIN_JOB_SLA_IN_SECONDS = "gobblin.azkaban.SLAInSeconds";
private static final String DEFAULT_AZKABAN_GOBBLIN_JOB_SLA_IN_SECONDS = "-1"; // No SLA.
public static final String GOBBLIN_AZKABAN_INITIALIZE_HADOOP_TOKENS = "gobblin.azkaban.initializeHadoopTokens";
public static final String DEFAULT_GOBBLIN_AZKABAN_INITIALIZE_HADOOP_TOKENS = "true";
private final Closer closer = Closer.create();
private final JobLauncher jobLauncher;
private final JobListener jobListener;
private final Properties props;
private final ApplicationLauncher applicationLauncher;
private final long ownAzkabanSla;
public AzkabanJobLauncher(String jobId, Properties props)
throws Exception {
super(jobId, LOG);
HadoopUtils.addGobblinSite();
// Configure root metric context
List<Tag<?>> tags = Lists.newArrayList();
tags.addAll(Tag.fromMap(AzkabanTags.getAzkabanTags()));
RootMetricContext.get(tags);
if (props.containsKey(GOBBLIN_LOG_LEVEL_KEY)) {
Level logLevel = Level.toLevel(props.getProperty(GOBBLIN_LOG_LEVEL_KEY), Level.INFO);
Logger.getLogger("org.apache.gobblin").setLevel(logLevel);
}
Log4jConfigurationHelper.setLogLevel(PropertiesUtils.getPropAsList(props, Log4jConfigurationHelper.LOG_LEVEL_OVERRIDE_MAP, ""));
this.props = new Properties();
this.props.putAll(props);
// initialize job listeners after properties has been initialized
this.jobListener = initJobListener();
// load dynamic configuration and add them to the job properties
Config propsAsConfig = ConfigUtils.propertiesToConfig(props);
DynamicConfigGenerator dynamicConfigGenerator =
DynamicConfigGeneratorFactory.createDynamicConfigGenerator(propsAsConfig);
Config dynamicConfig = dynamicConfigGenerator.generateDynamicConfig(propsAsConfig);
// add the dynamic config to the job config
for (Map.Entry<String, ConfigValue> entry : dynamicConfig.entrySet()) {
this.props.put(entry.getKey(), entry.getValue().unwrapped().toString());
}
Configuration conf = new Configuration();
String fsUri = conf.get(HADOOP_FS_DEFAULT_NAME);
if (!Strings.isNullOrEmpty(fsUri)) {
if (!this.props.containsKey(ConfigurationKeys.FS_URI_KEY)) {
this.props.setProperty(ConfigurationKeys.FS_URI_KEY, fsUri);
}
if (!this.props.containsKey(ConfigurationKeys.STATE_STORE_FS_URI_KEY)) {
this.props.setProperty(ConfigurationKeys.STATE_STORE_FS_URI_KEY, fsUri);
}
}
// Set the job tracking URL to point to the Azkaban job execution link URL
this.props
.setProperty(ConfigurationKeys.JOB_TRACKING_URL_KEY, Strings.nullToEmpty(conf.get(AZKABAN_LINK_JOBEXEC_URL)));
if (Boolean.parseBoolean(this.props.getProperty(GOBBLIN_AZKABAN_INITIALIZE_HADOOP_TOKENS,
DEFAULT_GOBBLIN_AZKABAN_INITIALIZE_HADOOP_TOKENS))) {
if (System.getenv(HADOOP_TOKEN_FILE_LOCATION) != null) {
LOG.info("Job type " + props.getProperty(JOB_TYPE) + " provided Hadoop token in the environment variable " + HADOOP_TOKEN_FILE_LOCATION);
this.props.setProperty(MAPREDUCE_JOB_CREDENTIALS_BINARY, System.getenv(HADOOP_TOKEN_FILE_LOCATION));
} else {
// see javadoc for more information
LOG.info(
"Job type " + props.getProperty(JOB_TYPE) + " did not provide Hadoop token in the environment variable " + HADOOP_TOKEN_FILE_LOCATION + ". Negotiating Hadoop tokens.");
File tokenFile = Files.createTempFile("mr-azkaban", ".token").toFile();
TokenUtils.getHadoopTokens(new State(props), Optional.of(tokenFile), new Credentials());
System.setProperty(HADOOP_TOKEN_FILE_LOCATION, tokenFile.getAbsolutePath());
System.setProperty(MAPREDUCE_JOB_CREDENTIALS_BINARY, tokenFile.getAbsolutePath());
this.props.setProperty(MAPREDUCE_JOB_CREDENTIALS_BINARY, tokenFile.getAbsolutePath());
this.props.setProperty("env." + HADOOP_TOKEN_FILE_LOCATION, tokenFile.getAbsolutePath());
}
}
Properties jobProps = this.props;
resolveGobblinJobTemplateIfNecessary(jobProps);
GobblinMetrics.addCustomTagsToProperties(jobProps, tags);
// If the job launcher type is not specified in the job configuration,
// override the default to use the MAPREDUCE launcher.
if (!jobProps.containsKey(ConfigurationKeys.JOB_LAUNCHER_TYPE_KEY)) {
jobProps.setProperty(ConfigurationKeys.JOB_LAUNCHER_TYPE_KEY,
JobLauncherFactory.JobLauncherType.MAPREDUCE.toString());
}
this.ownAzkabanSla = Long.parseLong(
jobProps.getProperty(AZKABAN_GOBBLIN_JOB_SLA_IN_SECONDS, DEFAULT_AZKABAN_GOBBLIN_JOB_SLA_IN_SECONDS));
List<? extends Tag<?>> metadataTags = Lists.newArrayList();
//Is the job triggered using Gobblin-as-a-Service? If so, add additional tags needed for tracking
//the job execution.
if (jobProps.containsKey(ConfigurationKeys.FLOW_NAME_KEY)) {
metadataTags = addAdditionalMetadataTags(jobProps);
}
// Create a JobLauncher instance depending on the configuration. The same properties object is
// used for both system and job configuration properties because Azkaban puts configuration
// properties in the .job file and in the .properties file into the same Properties object.
this.jobLauncher = this.closer.register(JobLauncherFactory.newJobLauncher(jobProps, jobProps, null, metadataTags));
// Since Java classes cannot extend multiple classes and Azkaban jobs must extend AbstractJob, we must use composition
// verses extending ServiceBasedAppLauncher
boolean isMetricReportingFailureFatal = PropertiesUtils
.getPropAsBoolean(jobProps, ConfigurationKeys.GOBBLIN_JOB_METRIC_REPORTING_FAILURE_FATAL,
Boolean.toString(ConfigurationKeys.DEFAULT_GOBBLIN_JOB_METRIC_REPORTING_FAILURE_FATAL));
boolean isEventReportingFailureFatal = PropertiesUtils
.getPropAsBoolean(jobProps, ConfigurationKeys.GOBBLIN_JOB_EVENT_REPORTING_FAILURE_FATAL,
Boolean.toString(ConfigurationKeys.DEFAULT_GOBBLIN_JOB_EVENT_REPORTING_FAILURE_FATAL));
jobProps.setProperty(MetricsReportingService.METRICS_REPORTING_FAILURE_FATAL_KEY, Boolean.toString(isMetricReportingFailureFatal));
jobProps.setProperty(MetricsReportingService.EVENT_REPORTING_FAILURE_FATAL_KEY, Boolean.toString(isEventReportingFailureFatal));
this.applicationLauncher =
this.closer.register(new ServiceBasedAppLauncher(jobProps, "Azkaban-" + UUID.randomUUID()));
}
protected JobListener initJobListener() {
CompositeJobListener compositeJobListener = new CompositeJobListener();
List<String> listeners = new State(props).getPropAsList(GOBBLIN_CUSTOM_JOB_LISTENERS, EmailNotificationJobListener.class.getSimpleName());
try {
for (String listenerAlias: listeners) {
ClassAliasResolver<JobListener> conditionClassAliasResolver = new ClassAliasResolver<>(JobListener.class);
compositeJobListener.addJobListener(conditionClassAliasResolver.resolveClass(listenerAlias).newInstance());
}
} catch (IllegalAccessException | InstantiationException | ClassNotFoundException e) {
throw new IllegalArgumentException(e);
}
return compositeJobListener;
}
@Override
public void run()
throws Exception {
if (isCurrentTimeInRange()) {
if (this.ownAzkabanSla > 0) {
LOG.info("Found gobblin defined SLA: " + this.ownAzkabanSla);
final ExecutorService service = Executors.newSingleThreadExecutor();
boolean isCancelled = false;
Future<Void> future = service.submit(new Callable<Void>() {
@Override
public Void call()
throws Exception {
runRealJob();
return null;
}
});
try {
future.get(this.ownAzkabanSla, TimeUnit.SECONDS);
} catch (final TimeoutException e) {
LOG.info("Cancelling job since SLA is reached: " + this.ownAzkabanSla);
future.cancel(true);
isCancelled = true;
this.cancelJob(jobListener);
} finally {
service.shutdown();
if (isCancelled) {
this.cancel();
// Need to fail the Azkaban job.
throw new RuntimeException("Job failed because it reaches SLA limit: " + this.ownAzkabanSla);
}
}
} else {
runRealJob();
}
}
}
private void runRealJob()
throws Exception {
try {
start();
launchJob(jobListener);
} finally {
try {
stop();
} finally {
close();
}
}
}
@Override
public void cancel()
throws Exception {
try {
stop();
} finally {
close();
}
}
@Override
public void start()
throws ApplicationException {
this.applicationLauncher.start();
}
@Override
public void stop()
throws ApplicationException {
this.applicationLauncher.stop();
}
@Override
public void launchJob(@Nullable JobListener jobListener)
throws JobException {
this.jobLauncher.launchJob(jobListener);
}
@Override
public void cancelJob(@Nullable JobListener jobListener)
throws JobException {
this.jobLauncher.cancelJob(jobListener);
}
@Override
public void close()
throws IOException {
this.closer.close();
}
/**
* Uses the properties {@link ConfigurationKeys#AZKABAN_EXECUTION_DAYS_LIST},
* {@link ConfigurationKeys#AZKABAN_EXECUTION_TIME_RANGE}, and
* {@link TimeRangeChecker#isTimeInRange(List, String, String, DateTime)} to determine if the current job should
* continue its execution based on the extra scheduled parameters defined in the config.
*
* @return true if this job should be launched, false otherwise.
*/
private boolean isCurrentTimeInRange() {
Splitter splitter = Splitter.on(",").omitEmptyStrings().trimResults();
if (this.props.contains(ConfigurationKeys.AZKABAN_EXECUTION_DAYS_LIST) && this.props
.contains(ConfigurationKeys.AZKABAN_EXECUTION_TIME_RANGE)) {
List<String> executionTimeRange =
splitter.splitToList(this.props.getProperty(ConfigurationKeys.AZKABAN_EXECUTION_TIME_RANGE));
List<String> executionDays =
splitter.splitToList(this.props.getProperty(ConfigurationKeys.AZKABAN_EXECUTION_DAYS_LIST));
Preconditions.checkArgument(executionTimeRange.size() == 2,
"The property " + ConfigurationKeys.AZKABAN_EXECUTION_DAYS_LIST
+ " should be a comma separated list of two entries");
return TimeRangeChecker.isTimeInRange(executionDays, executionTimeRange.get(0), executionTimeRange.get(1),
new DateTime(DateTimeZone.forID(ConfigurationKeys.PST_TIMEZONE_NAME)));
}
return true;
}
/**
* Add additional properties such as flow.group, flow.name, executionUrl. Useful for tracking
* job executions on Azkaban triggered by Gobblin-as-a-Service (GaaS).
* @param jobProps job properties
* @return a list of tags uniquely identifying a job execution on Azkaban.
*/
private static List<? extends Tag<?>> addAdditionalMetadataTags(Properties jobProps) {
List<Tag<?>> metadataTags = Lists.newArrayList();
String jobExecutionId = jobProps.getProperty(AZKABAN_FLOW_EXEC_ID, "");
// Display the proxy URL in the metadata tag if it exists
String jobExecutionUrl = jobProps.getProperty(AZKABAN_LINK_JOBEXEC_PROXY_URL, jobProps.getProperty(AZKABAN_LINK_JOBEXEC_URL, ""));
metadataTags.add(new Tag<>(TimingEvent.FlowEventConstants.FLOW_GROUP_FIELD,
jobProps.getProperty(ConfigurationKeys.FLOW_GROUP_KEY, "")));
metadataTags.add(new Tag<>(TimingEvent.FlowEventConstants.FLOW_NAME_FIELD,
jobProps.getProperty(ConfigurationKeys.FLOW_NAME_KEY)));
if (jobProps.containsKey(ConfigurationKeys.JOB_CURRENT_ATTEMPTS)) {
metadataTags.add(new Tag<>(TimingEvent.FlowEventConstants.CURRENT_ATTEMPTS_FIELD,
jobProps.getProperty(ConfigurationKeys.JOB_CURRENT_ATTEMPTS, "1")));
metadataTags.add(new Tag<>(TimingEvent.FlowEventConstants.CURRENT_GENERATION_FIELD,
jobProps.getProperty(ConfigurationKeys.JOB_CURRENT_GENERATION, "1")));
metadataTags.add(new Tag<>(TimingEvent.FlowEventConstants.SHOULD_RETRY_FIELD,
"false"));
}
// use job execution id if flow execution id is not present
metadataTags.add(new Tag<>(TimingEvent.FlowEventConstants.FLOW_EXECUTION_ID_FIELD,
jobProps.getProperty(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, jobExecutionId)));
//Use azkaban.flow.execid as the jobExecutionId
metadataTags.add(new Tag<>(TimingEvent.FlowEventConstants.JOB_EXECUTION_ID_FIELD, jobExecutionId));
metadataTags.add(new Tag<>(TimingEvent.FlowEventConstants.JOB_GROUP_FIELD,
jobProps.getProperty(ConfigurationKeys.JOB_GROUP_KEY, "")));
metadataTags.add(new Tag<>(TimingEvent.FlowEventConstants.JOB_NAME_FIELD,
jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY, "")));
metadataTags.add(new Tag<>(TimingEvent.METADATA_MESSAGE, jobExecutionUrl));
metadataTags.add(new Tag<>(AzkabanProjectConfig.USER_TO_PROXY, jobProps.getProperty(AzkabanProjectConfig.USER_TO_PROXY, "")));
LOG.debug(String.format("AzkabanJobLauncher.addAdditionalMetadataTags: metadataTags %s", metadataTags));
return metadataTags;
}
}
| 3,386 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules/orchestration/UnreachableStatementException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
/**
* Used by {@link AzkabanClient} to indicate an unreachable code block.
*/
public class UnreachableStatementException extends AzkabanClientException {
public UnreachableStatementException(String message, Exception e) {
super(message, e);
}
public UnreachableStatementException(String message) {
super(message);
}
}
| 3,387 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules/orchestration/AzkabanJobHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URL;
import java.nio.charset.Charset;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import lombok.Cleanup;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.compress.archivers.ArchiveException;
import org.apache.commons.compress.archivers.ArchiveOutputStream;
import org.apache.commons.compress.archivers.ArchiveStreamFactory;
import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import com.google.common.collect.Lists;
@Slf4j
public class AzkabanJobHelper {
/***
* Checks if an Azkaban project exists by name.
* @param sessionId Session Id.
* @param azkabanProjectConfig Azkaban Project Config that contains project name.
* @return true if project exists else false.
* @throws IOException
*/
public static boolean isAzkabanJobPresent(String sessionId, AzkabanProjectConfig azkabanProjectConfig)
throws IOException {
log.info("Checking if Azkaban project: " + azkabanProjectConfig.getAzkabanProjectName() + " exists");
try {
// NOTE: hacky way to determine if project already exists because Azkaban does not provides a way to
// .. check if the project already exists or not
boolean isPresent = StringUtils.isNotBlank(AzkabanAjaxAPIClient.getProjectId(sessionId, azkabanProjectConfig));
log.info("Project exists: " + isPresent);
return isPresent;
} catch (IOException e) {
// Project doesn't exists
if (String.format("Project %s doesn't exist.", azkabanProjectConfig.getAzkabanProjectName())
.equalsIgnoreCase(e.getMessage())) {
log.info("Project does not exists.");
return false;
}
// Project exists but with no read access to current user
if ("Permission denied. Need READ access.".equalsIgnoreCase(e.getMessage())) {
log.info("Project exists, but current user does not has READ access.");
return true;
}
// Some other error
log.error("Issue in checking if project is present", e);
throw e;
}
}
/***
* Get Project Id by an Azkaban Project Name.
* @param sessionId Session Id.
* @param azkabanProjectConfig Azkaban Project Config that contains project Name.
* @return Project Id.
* @throws IOException
*/
public static String getProjectId(String sessionId, AzkabanProjectConfig azkabanProjectConfig)
throws IOException {
log.info("Getting project Id for project: " + azkabanProjectConfig.getAzkabanProjectName());
String projectId = AzkabanAjaxAPIClient.getProjectId(sessionId, azkabanProjectConfig);
log.info("Project id: " + projectId);
return projectId;
}
/***
* Create project on Azkaban based on Azkaban config. This includes preparing the zip file and uploading it to
* Azkaban, setting permissions and schedule.
* @param sessionId Session Id.
* @param azkabanProjectConfig Azkaban Project Config.
* @return Project Id.
* @throws IOException
*/
public static String createAzkabanJob(String sessionId, AzkabanProjectConfig azkabanProjectConfig)
throws IOException {
log.info("Creating Azkaban project for: " + azkabanProjectConfig.getAzkabanProjectName());
// Create zip file
String zipFilePath = createAzkabanJobZip(azkabanProjectConfig);
log.info("Zip file path: " + zipFilePath);
// Upload zip file to Azkaban
String projectId = AzkabanAjaxAPIClient.createAzkabanProject(sessionId, zipFilePath, azkabanProjectConfig);
log.info("Project Id: " + projectId);
return projectId;
}
/***
* Delete project on Azkaban based on Azkaban config.
* @param sessionId Session Id.
* @param azkabanProjectConfig Azkaban Project Config.
* @throws IOException
*/
public static void deleteAzkabanJob(String sessionId, AzkabanProjectConfig azkabanProjectConfig)
throws IOException {
log.info("Deleting Azkaban project for: " + azkabanProjectConfig.getAzkabanProjectName());
// Delete project
AzkabanAjaxAPIClient.deleteAzkabanProject(sessionId, azkabanProjectConfig);
}
/***
* Replace project on Azkaban based on Azkaban config. This includes preparing the zip file and uploading it to
* Azkaban, setting permissions and schedule.
* @param sessionId Session Id.
* @param azkabanProjectId Project Id.
* @param azkabanProjectConfig Azkaban Project Config.
* @return Project Id.
* @throws IOException
*/
public static String replaceAzkabanJob(String sessionId, String azkabanProjectId,
AzkabanProjectConfig azkabanProjectConfig) throws IOException {
log.info("Replacing zip for Azkaban project: " + azkabanProjectConfig.getAzkabanProjectName());
// Create zip file
String zipFilePath = createAzkabanJobZip(azkabanProjectConfig);
log.info("Zip file path: " + zipFilePath);
// Replace the zip file on Azkaban
String projectId = AzkabanAjaxAPIClient.replaceAzkabanProject(sessionId, zipFilePath, azkabanProjectConfig);
log.info("Project Id: " + projectId);
return projectId;
}
/***
* Schedule an already created Azkaban project.
* @param sessionId Session Id.
* @param azkabanProjectId Project Id.
* @param azkabanProjectConfig Azkaban Project Config that contains schedule information.
* @throws IOException
*/
public static void scheduleJob(String sessionId, String azkabanProjectId,
AzkabanProjectConfig azkabanProjectConfig)
throws IOException {
log.info("Scheduling Azkaban project: " + azkabanProjectConfig.getAzkabanProjectName());
AzkabanAjaxAPIClient.scheduleAzkabanProject(sessionId, azkabanProjectId, azkabanProjectConfig);
}
/***
* Change the schedule of an already created Azkaban project.
* @param sessionId Session Id.
* @param azkabanProjectId Project Id.
* @param azkabanProjectConfig Azkaban Project Config that contains schedule information.
* @throws IOException
*/
public static void changeJobSchedule(String sessionId, String azkabanProjectId,
AzkabanProjectConfig azkabanProjectConfig)
throws IOException {
log.info("Changing schedule for Azkaban project: " + azkabanProjectConfig.getAzkabanProjectName());
AzkabanAjaxAPIClient.scheduleAzkabanProject(sessionId, azkabanProjectId, azkabanProjectConfig);
}
/***
* Execute an already created Azkaban project.
* @param sessionId Session Id.
* @param azkabanProjectId Project Id.
* @param azkabanProjectConfig Azkaban Project Config that contains schedule information.
* @throws IOException
*/
public static void executeJob(String sessionId, String azkabanProjectId,
AzkabanProjectConfig azkabanProjectConfig)
throws IOException {
log.info("Executing Azkaban project: " + azkabanProjectConfig.getAzkabanProjectName());
AzkabanAjaxAPIClient.executeAzkabanProject(sessionId, azkabanProjectId, azkabanProjectConfig);
}
/***
* Create Azkaban project zip file.
* @param azkabanProjectConfig Azkaban Project Config that contains information about what to include in
* zip file.
* @return Zip file path.
* @throws IOException
*/
private static String createAzkabanJobZip(AzkabanProjectConfig azkabanProjectConfig)
throws IOException {
log.info("Creating Azkaban job zip file for project: " + azkabanProjectConfig.getAzkabanProjectName());
String workDir = azkabanProjectConfig.getWorkDir();
Optional<String> jarUrlTemplate = azkabanProjectConfig.getAzkabanZipJarUrlTemplate();
Optional<List<String>> jarNames = azkabanProjectConfig.getAzkabanZipJarNames();
Optional<String> jarVersion = azkabanProjectConfig.getAzkabanZipJarVersion();
Optional<List<String>> additionalFiles = azkabanProjectConfig.getAzkabanZipAdditionalFiles();
boolean failIfJarNotFound = azkabanProjectConfig.getFailIfJarNotFound();
String jobFlowName = azkabanProjectConfig.getAzkabanProjectFlowName();
String zipFilename = azkabanProjectConfig.getAzkabanProjectZipFilename();
// Download the job jars
List<File> filesToAdd = Lists.newArrayList();
if (jarNames.isPresent() && jarUrlTemplate.isPresent() && jarVersion.isPresent()) {
String urlTemplate = jarUrlTemplate.get();
String version = jarVersion.get();
for (String jarName : jarNames.get()) {
String jobJarUrl = urlTemplate.replaceAll("<module-version>", version).replaceAll("<module-name>", jarName);
log.info("Downloading job jar from: " + jobJarUrl + " to: " + workDir);
File jobJarFile = null;
try {
jobJarFile = downloadAzkabanJobJar(workDir, jobJarUrl);
filesToAdd.add(jobJarFile);
} catch (IOException e) {
if (failIfJarNotFound) {
throw e;
}
log.warn("Could not download: " + jobJarFile);
}
}
}
// Download additional files
if (additionalFiles.isPresent()) {
List<String> files = additionalFiles.get();
for (String fileName : files) {
log.info("Downloading additional file from: " + fileName + " to: " + workDir);
File additionalFile = null;
try {
additionalFile = downloadAzkabanJobJar(workDir, fileName);
filesToAdd.add(additionalFile);
} catch (IOException e) {
if(failIfJarNotFound) {
throw e;
}
log.warn("Could not download: " + additionalFile);
}
}
}
// Write the config files
log.info("Writing Azkaban config files");
File [] jobConfigFile = writeAzkabanConfigFiles(workDir, jobFlowName, azkabanProjectConfig);
filesToAdd.add(jobConfigFile[0]);
// Create the zip file
log.info("Writing zip file");
String zipfile = createZipFile(workDir, zipFilename, filesToAdd);
log.info("Wrote zip file: " + zipfile);
return zipfile;
}
private static String createZipFile(String directory, String zipFilename, List<File> filesToAdd)
throws IOException {
// Determine final zip file path
String zipFilePath = String.format("%s/%s", directory, zipFilename);
File zipFile = new File(zipFilePath);
if (zipFile.exists()) {
if (zipFile.delete()) {
log.info("Zipfile existed and was deleted: " + zipFilePath);
} else {
log.warn("Zipfile exists but was not deleted: " + zipFilePath);
}
}
// Create and add files to zip file
addFilesToZip(zipFile, filesToAdd);
return zipFilePath;
}
@edu.umd.cs.findbugs.annotations.SuppressWarnings(
value = "OBL_UNSATISFIED_OBLIGATION",
justification = "Lombok construct of @Cleanup is handing this, but not detected by FindBugs")
private static void addFilesToZip(File zipFile, List<File> filesToAdd) throws IOException {
try {
@Cleanup
OutputStream archiveStream = new FileOutputStream(zipFile);
@Cleanup
ArchiveOutputStream archive =
new ArchiveStreamFactory().createArchiveOutputStream(ArchiveStreamFactory.ZIP, archiveStream);
for (File fileToAdd : filesToAdd) {
ZipArchiveEntry entry = new ZipArchiveEntry(fileToAdd.getName());
archive.putArchiveEntry(entry);
@Cleanup
BufferedInputStream input = new BufferedInputStream(new FileInputStream(fileToAdd));
IOUtils.copy(input, archive);
archive.closeArchiveEntry();
}
archive.finish();
} catch (ArchiveException e) {
throw new IOException("Issue with creating archive", e);
}
}
private static File[] writeAzkabanConfigFiles(String workDir, String flowName, AzkabanProjectConfig azkabanProjectConfig)
throws IOException {
// Determine final config file path
String jobFilePath = String.format("%s/%s.job", workDir, flowName);
File jobFile = new File(jobFilePath);
if (jobFile.exists()) {
if (jobFile.delete()) {
log.info("JobFile existed and was deleted: " + jobFilePath);
} else {
log.warn("JobFile exists but was not deleted: " + jobFilePath);
}
}
StringBuilder propertyFileContent = new StringBuilder();
for (Map.Entry entry : azkabanProjectConfig.getJobSpec().getConfigAsProperties().entrySet()) {
propertyFileContent.append(String.format("%s=%s", entry.getKey(), entry.getValue())).append("\n");
}
// Write the job file
FileUtils.writeStringToFile(jobFile, propertyFileContent.toString(), Charset.forName("UTF-8"),true);
return new File[] {jobFile};
}
@edu.umd.cs.findbugs.annotations.SuppressWarnings(
value = "OBL_UNSATISFIED_OBLIGATION",
justification = "Lombok construct of @Cleanup is handing this, but not detected by FindBugs")
private static File downloadAzkabanJobJar(String workDir, String jobJarUrl)
throws IOException {
// Determine final jar file path
String[] jobJarUrlParts = jobJarUrl.trim().split("/");
String jobJarName = jobJarUrlParts[jobJarUrlParts.length-1];
String jobJarFilePath = String.format("%s/%s", workDir, jobJarName);
File jobJarFile = new File(jobJarFilePath);
if (jobJarFile.exists()) {
if (jobJarFile.delete()) {
log.info("JobJarFilePath existed and was deleted: " + jobJarFilePath);
} else {
log.warn("JobJarFilePath exists but was not deleted: " + jobJarFilePath);
}
}
// Create work directory if not already exists
FileUtils.forceMkdir(new File(workDir));
// Download jar file from artifactory
@Cleanup InputStream jobJarInputStream = new URL(jobJarUrl).openStream();
@Cleanup OutputStream jobJarOutputStream = new FileOutputStream(jobJarFile);
IOUtils.copy(jobJarInputStream, jobJarOutputStream);
// TODO: compare checksum
return jobJarFile;
}
}
| 3,388 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules/orchestration/AzkabanFetchExecuteFlowStatus.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import java.util.Map;
import lombok.AllArgsConstructor;
import lombok.Getter;
/**
* This status captures execution details returned by {@link AzkabanClient#fetchFlowExecution(String)}
*
* The execution details are captured by {@link Execution}
*/
public class AzkabanFetchExecuteFlowStatus extends AzkabanClientStatus<AzkabanFetchExecuteFlowStatus.Execution> {
public AzkabanFetchExecuteFlowStatus(AzkabanFetchExecuteFlowStatus.Execution exec) {
super(exec);
}
@Getter
@AllArgsConstructor
public static class Execution {
Map<String, String> map;
}
}
| 3,389 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules/orchestration/ServiceAzkabanConfigKeys.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
public class ServiceAzkabanConfigKeys {
public static final String GOBBLIN_SERVICE_AZKABAN_PREFIX = "gobblin.service.azkaban.";
// Azkaban Session Specifics
public static final String AZKABAN_USERNAME_KEY = GOBBLIN_SERVICE_AZKABAN_PREFIX + "username";
public static final String AZKABAN_PASSWORD_KEY = GOBBLIN_SERVICE_AZKABAN_PREFIX + "password";
public static final String AZKABAN_SERVER_URL_KEY = GOBBLIN_SERVICE_AZKABAN_PREFIX + "server.url";
public static final String AZKABAN_PROJECT_NAME_PREFIX_KEY = GOBBLIN_SERVICE_AZKABAN_PREFIX + "project.namePrefix";
public static final String AZKABAN_PROJECT_DESCRIPTION_KEY = GOBBLIN_SERVICE_AZKABAN_PREFIX + "project.description";
public static final String AZKABAN_PROJECT_USER_TO_PROXY_KEY = GOBBLIN_SERVICE_AZKABAN_PREFIX + "project.userToProxy";
public static final String AZKABAN_PROJECT_FLOW_NAME_KEY = GOBBLIN_SERVICE_AZKABAN_PREFIX + "project.flowName";
public static final String AZKABAN_PROJECT_GROUP_ADMINS_KEY = GOBBLIN_SERVICE_AZKABAN_PREFIX + "project.groupAdmins";
public static final String AZKABAN_PROJECT_ZIP_JAR_URL_TEMPLATE_KEY = GOBBLIN_SERVICE_AZKABAN_PREFIX + "project.zip.jarUrlTemplate";
public static final String AZKABAN_PROJECT_ZIP_JAR_NAMES_KEY = GOBBLIN_SERVICE_AZKABAN_PREFIX + "project.zip.jarNames";
public static final String AZKABAN_PROJECT_ZIP_JAR_VERSION_KEY = GOBBLIN_SERVICE_AZKABAN_PREFIX + "project.zip.jarVersion";
public static final String AZKABAN_PROJECT_ZIP_FAIL_IF_JARNOTFOUND_KEY = GOBBLIN_SERVICE_AZKABAN_PREFIX + "project.zip.failIfJarNotFound";
public static final String AZKABAN_PROJECT_ZIP_ADDITIONAL_FILE_URLS_KEY = GOBBLIN_SERVICE_AZKABAN_PREFIX + "project.zip.additionalFilesUrl";
public static final String AZKABAN_PROJECT_OVERWRITE_IF_EXISTS_KEY = GOBBLIN_SERVICE_AZKABAN_PREFIX + "project.overwriteIfExists";
// Azkaban System Environment
public static final String AZKABAN_PASSWORD_SYSTEM_KEY = "GOBBLIN_SERVICE_AZKABAN_PASSWORD";
public static final String DEFAULT_AZKABAN_PROJECT_CONFIG_FILE = "default-service-azkaban.conf";
public static final String AZKABAN_PRODUCER_CLASS = GOBBLIN_SERVICE_AZKABAN_PREFIX + "producer.class";
}
| 3,390 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules/orchestration/AzkabanGetProxyUsersStatus.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import java.util.Map;
import lombok.AllArgsConstructor;
import lombok.Getter;
/**
* Status to return response from {@link AzkabanClient#getProxyUsers(String)}
*/
public class AzkabanGetProxyUsersStatus extends AzkabanClientStatus<AzkabanGetProxyUsersStatus.ProxyUsers> {
public AzkabanGetProxyUsersStatus(AzkabanGetProxyUsersStatus.ProxyUsers proxyUsers) {
super(proxyUsers);
}
@Getter
@AllArgsConstructor
public static class ProxyUsers {
Map<String, String> map;
}
}
| 3,391 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules/orchestration/AzkabanExecuteFlowStatus.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import lombok.AllArgsConstructor;
import lombok.Getter;
/**
* This status captures execution id returned by {@link AzkabanClient#executeFlowWithOptions}
*/
public class AzkabanExecuteFlowStatus extends AzkabanClientStatus<AzkabanExecuteFlowStatus.ExecuteId> {
public AzkabanExecuteFlowStatus(ExecuteId executeId) {
super(executeId);
}
@Getter
@AllArgsConstructor
public static class ExecuteId {
String execId;
}
}
| 3,392 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules/orchestration/AzkabanClientStatus.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import lombok.Getter;
@Getter
public abstract class AzkabanClientStatus<RS> {
private RS response = null;
public AzkabanClientStatus() {
}
public AzkabanClientStatus(RS response) {
this.response = response;
}
}
| 3,393 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules/orchestration/AzkabanSessionManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import org.apache.http.impl.client.CloseableHttpClient;
/**
* A {@link SessionManager} that implements session refreshing logic
* used by {@link AzkabanClient}.
*/
public class AzkabanSessionManager implements SessionManager {
private CloseableHttpClient httpClient;
private String url;
private String username;
private String password;
public AzkabanSessionManager(CloseableHttpClient httpClient,
String url,
String username,
String password) {
this.httpClient = httpClient;
this.username = username;
this.password = password;
this.url = url;
}
/**
* Fetch a session id that can be used in the future to communicate with Azkaban server.
* @return session id
*/
public String fetchSession() throws AzkabanClientException {
return SessionHelper.getSessionId(this.httpClient, this.url, this.username, this.password);
}
}
| 3,394 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules/orchestration/AzkabanMultiCallables.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import com.google.common.io.Closer;
import lombok.Builder;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHeaders;
import org.apache.http.NameValuePair;
import org.apache.http.client.entity.UrlEncodedFormEntity;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.utils.URLEncodedUtils;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.mime.MultipartEntityBuilder;
import org.apache.http.message.BasicHeader;
import org.apache.http.message.BasicNameValuePair;
import java.io.File;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
/**
* This class encapsulates all the operations an {@link AzkabanClient} can do.
*/
class AzkabanMultiCallables {
/**
* This class can never been instantiated.
*/
private AzkabanMultiCallables() {
}
/**
* A callable that will create a project on Azkaban.
*/
@Builder
static class CreateProjectCallable implements Callable<AzkabanClientStatus> {
private AzkabanClient client;
private String projectName;
private String description;
private boolean invalidSession = false;
@Override
public AzkabanClientStatus call()
throws AzkabanClientException {
try (Closer closer = Closer.create()) {
client.refreshSession(this.invalidSession);
HttpPost httpPost = new HttpPost(client.url + "/manager");
List<NameValuePair> nvps = new ArrayList<>();
nvps.add(new BasicNameValuePair(AzkabanClientParams.ACTION, "create"));
nvps.add(new BasicNameValuePair(AzkabanClientParams.SESSION_ID, client.sessionId));
nvps.add(new BasicNameValuePair(AzkabanClientParams.NAME, projectName));
nvps.add(new BasicNameValuePair(AzkabanClientParams.DESCRIPTION, description));
httpPost.setEntity(new UrlEncodedFormEntity(nvps));
Header contentType = new BasicHeader(HttpHeaders.CONTENT_TYPE, "application/x-www-form-urlencoded");
Header requestType = new BasicHeader("X-Requested-With", "XMLHttpRequest");
httpPost.setHeaders(new Header[]{contentType, requestType});
CloseableHttpResponse response = client.httpClient.execute(httpPost);
closer.register(response);
AzkabanClient.handleResponse(response);
return new AzkabanSuccess();
} catch (InvalidSessionException e) {
this.invalidSession = true;
throw e;
} catch (Throwable e) {
throw new AzkabanClientException("Azkaban client cannot create project = "
+ projectName, e);
}
}
}
/**
* A callable that will delete a project on Azkaban.
*/
@Builder
static class DeleteProjectCallable implements Callable<AzkabanClientStatus> {
private AzkabanClient client;
private String projectName;
private boolean invalidSession = false;
@Override
public AzkabanClientStatus call()
throws AzkabanClientException {
try (Closer closer = Closer.create()) {
client.refreshSession(this.invalidSession);
List<NameValuePair> nvps = new ArrayList<>();
nvps.add(new BasicNameValuePair(AzkabanClientParams.DELETE, "true"));
nvps.add(new BasicNameValuePair(AzkabanClientParams.SESSION_ID, client.sessionId));
nvps.add(new BasicNameValuePair(AzkabanClientParams.PROJECT, projectName));
Header contentType = new BasicHeader(HttpHeaders.CONTENT_TYPE, "application/x-www-form-urlencoded");
Header requestType = new BasicHeader("X-Requested-With", "XMLHttpRequest");
HttpGet httpGet = new HttpGet(client.url + "/manager?" + URLEncodedUtils.format(nvps, "UTF-8"));
httpGet.setHeaders(new Header[]{contentType, requestType});
CloseableHttpResponse response = client.httpClient.execute(httpGet);
closer.register(response);
AzkabanClient.verifyStatusCode(response);
return new AzkabanSuccess();
} catch (InvalidSessionException e) {
this.invalidSession = true;
throw e;
} catch (Throwable e) {
throw new AzkabanClientException("Azkaban client cannot delete project = "
+ projectName, e);
}
}
}
/**
* A callable that will execute a flow on Azkaban.
*/
@Builder
static class UploadProjectCallable implements Callable<AzkabanClientStatus> {
private AzkabanClient client;
private String projectName;
private File zipFile;
private boolean invalidSession = false;
@Override
public AzkabanClientStatus call()
throws AzkabanClientException {
try (Closer closer = Closer.create()) {
client.refreshSession(this.invalidSession);
HttpPost httpPost = new HttpPost(client.url + "/manager");
HttpEntity entity = MultipartEntityBuilder.create()
.addTextBody(AzkabanClientParams.SESSION_ID, client.sessionId)
.addTextBody(AzkabanClientParams.AJAX, "upload")
.addTextBody(AzkabanClientParams.PROJECT, projectName)
.addBinaryBody("file", zipFile,
ContentType.create("application/zip"), zipFile.getName())
.build();
httpPost.setEntity(entity);
CloseableHttpResponse response = client.httpClient.execute(httpPost);
closer.register(response);
AzkabanClient.handleResponse(response);
return new AzkabanSuccess();
} catch (InvalidSessionException e) {
this.invalidSession = true;
throw e;
} catch (Throwable e) {
throw new AzkabanClientException("Azkaban client cannot upload zip to project = "
+ projectName, e);
}
}
}
/**
* A callable that will execute a flow on Azkaban.
*/
@Builder
static class ExecuteFlowCallable implements Callable<AzkabanClientStatus> {
private AzkabanClient client;
private String projectName;
private String flowName;
private Map<String, String> flowOptions;
private Map<String, String> flowParameters;
private boolean invalidSession = false;
@Override
public AzkabanExecuteFlowStatus call()
throws AzkabanClientException {
try (Closer closer = Closer.create()) {
client.refreshSession(this.invalidSession);
HttpPost httpPost = new HttpPost(client.url + "/executor");
List<NameValuePair> nvps = new ArrayList<>();
nvps.add(new BasicNameValuePair(AzkabanClientParams.AJAX, "executeFlow"));
nvps.add(new BasicNameValuePair(AzkabanClientParams.SESSION_ID, client.sessionId));
nvps.add(new BasicNameValuePair(AzkabanClientParams.PROJECT, projectName));
nvps.add(new BasicNameValuePair(AzkabanClientParams.FLOW, flowName));
nvps.add(new BasicNameValuePair(AzkabanClientParams.CONCURRENT_OPTION, "ignore"));
addFlowOptions(nvps, flowOptions);
addFlowParameters(nvps, flowParameters);
httpPost.setEntity(new UrlEncodedFormEntity(nvps));
Header contentType = new BasicHeader(HttpHeaders.CONTENT_TYPE, "application/x-www-form-urlencoded");
Header requestType = new BasicHeader("X-Requested-With", "XMLHttpRequest");
httpPost.setHeaders(new Header[]{contentType, requestType});
CloseableHttpResponse response = client.httpClient.execute(httpPost);
closer.register(response);
Map<String, String> map = AzkabanClient.handleResponse(response);
return new AzkabanExecuteFlowStatus(
new AzkabanExecuteFlowStatus.ExecuteId(map.get(AzkabanClientParams.EXECID)));
} catch (InvalidSessionException e) {
this.invalidSession = true;
throw e;
} catch (Exception e) {
throw new AzkabanClientException("Azkaban client cannot execute flow = "
+ flowName, e);
}
}
private void addFlowParameters(List<NameValuePair> nvps, Map<String, String> flowParams) {
if (flowParams != null) {
for (Map.Entry<String, String> entry : flowParams.entrySet()) {
String key = entry.getKey();
String value = entry.getValue();
if (StringUtils.isNotBlank(key) && StringUtils.isNotBlank(value)) {
nvps.add(new BasicNameValuePair("flowOverride[" + key + "]", value));
}
}
}
}
private void addFlowOptions(List<NameValuePair> nvps, Map<String, String> flowOptions) {
if (flowOptions != null) {
for (Map.Entry<String, String> option : flowOptions.entrySet()) {
nvps.add(new BasicNameValuePair(option.getKey(), option.getValue()));
}
}
}
}
/**
* A callable that will cancel a flow on Azkaban.
*/
@Builder
static class CancelFlowCallable implements Callable<AzkabanClientStatus> {
private AzkabanClient client;
private String execId;
private boolean invalidSession = false;
@Override
public AzkabanClientStatus call()
throws AzkabanClientException {
try (Closer closer = Closer.create()) {
client.refreshSession(this.invalidSession);
List<NameValuePair> nvps = new ArrayList<>();
nvps.add(new BasicNameValuePair(AzkabanClientParams.AJAX, "cancelFlow"));
nvps.add(new BasicNameValuePair(AzkabanClientParams.SESSION_ID, client.sessionId));
nvps.add(new BasicNameValuePair(AzkabanClientParams.EXECID, String.valueOf(execId)));
Header contentType = new BasicHeader(HttpHeaders.CONTENT_TYPE, "application/x-www-form-urlencoded");
Header requestType = new BasicHeader("X-Requested-With", "XMLHttpRequest");
HttpGet httpGet = new HttpGet(client.url + "/executor?" + URLEncodedUtils.format(nvps, "UTF-8"));
httpGet.setHeaders(new Header[]{contentType, requestType});
CloseableHttpResponse response = client.httpClient.execute(httpGet);
closer.register(response);
AzkabanClient.handleResponse(response);
return new AzkabanSuccess();
} catch (InvalidSessionException e) {
this.invalidSession = true;
throw e;
} catch (Exception e) {
throw new AzkabanClientException("Azkaban client cannot cancel flow execId = "
+ execId, e);
}
}
}
/**
* A callable that will fetch a flow status on Azkaban.
*/
@Builder
static class FetchFlowExecCallable implements Callable<AzkabanClientStatus> {
private AzkabanClient client;
private String execId;
private boolean invalidSession = false;
@Override
public AzkabanFetchExecuteFlowStatus call()
throws AzkabanClientException {
try (Closer closer = Closer.create()) {
client.refreshSession(this.invalidSession);
List<NameValuePair> nvps = new ArrayList<>();
nvps.add(new BasicNameValuePair(AzkabanClientParams.AJAX, "fetchexecflow"));
nvps.add(new BasicNameValuePair(AzkabanClientParams.SESSION_ID, client.sessionId));
nvps.add(new BasicNameValuePair(AzkabanClientParams.EXECID, execId));
Header contentType = new BasicHeader(HttpHeaders.CONTENT_TYPE, "application/x-www-form-urlencoded");
Header requestType = new BasicHeader("X-Requested-With", "XMLHttpRequest");
HttpGet httpGet = new HttpGet(client.url + "/executor?" + URLEncodedUtils.format(nvps, "UTF-8"));
httpGet.setHeaders(new Header[]{contentType, requestType});
CloseableHttpResponse response = client.httpClient.execute(httpGet);
closer.register(response);
Map<String, String> map = AzkabanClient.handleResponse(response);
return new AzkabanFetchExecuteFlowStatus(new AzkabanFetchExecuteFlowStatus.Execution(map));
} catch (InvalidSessionException e) {
this.invalidSession = true;
throw e;
} catch (Exception e) {
throw new AzkabanClientException("Azkaban client cannot "
+ "fetch execId " + execId, e);
}
}
}
/**
* A callable that will fetch a flow status on Azkaban.
*/
@Builder
static class FetchProjectFlowsCallable implements Callable<AzkabanProjectFlowsStatus> {
private AzkabanClient client;
private boolean invalidSession = false;
private String projectName;
@Override
public AzkabanProjectFlowsStatus call()
throws AzkabanClientException {
try (Closer closer = Closer.create()) {
client.refreshSession(this.invalidSession);
List<NameValuePair> nvps = new ArrayList<>();
nvps.add(new BasicNameValuePair(AzkabanClientParams.AJAX, "fetchprojectflows"));
nvps.add(new BasicNameValuePair(AzkabanClientParams.PROJECT, projectName));
nvps.add(new BasicNameValuePair(AzkabanClientParams.SESSION_ID, client.sessionId));
Header contentType = new BasicHeader(HttpHeaders.CONTENT_TYPE, "application/x-www-form-urlencoded");
Header requestType = new BasicHeader("X-Requested-With", "XMLHttpRequest");
HttpGet httpGet = new HttpGet(client.url + "/manager?" + URLEncodedUtils.format(nvps, "UTF-8"));
httpGet.setHeaders(new Header[]{contentType, requestType});
CloseableHttpResponse response = client.httpClient.execute(httpGet);
closer.register(response);
AzkabanProjectFlowsStatus.Project project =
AzkabanClient.handleResponse(response, AzkabanProjectFlowsStatus.Project.class);
return new AzkabanProjectFlowsStatus(project);
} catch (InvalidSessionException e) {
this.invalidSession = true;
throw e;
} catch (Exception e) {
throw new AzkabanClientException("Azkaban client cannot fetch project flows", e);
}
}
}
/**
* A callable that will fetch a flow log on Azkaban.
*/
@Builder
static class FetchExecLogCallable implements Callable<AzkabanClientStatus> {
private AzkabanClient client;
private String execId;
private String jobId;
private long offset;
private long length;
private OutputStream output;
private boolean invalidSession = false;
@Override
public AzkabanClientStatus call()
throws AzkabanClientException {
try (Closer closer = Closer.create()) {
client.refreshSession(this.invalidSession);
List<NameValuePair> nvps = new ArrayList<>();
nvps.add(new BasicNameValuePair(AzkabanClientParams.AJAX, "fetchExecJobLogs"));
nvps.add(new BasicNameValuePair(AzkabanClientParams.SESSION_ID, client.sessionId));
nvps.add(new BasicNameValuePair(AzkabanClientParams.EXECID, execId));
nvps.add(new BasicNameValuePair(AzkabanClientParams.JOBID, jobId));
nvps.add(new BasicNameValuePair(AzkabanClientParams.OFFSET, String.valueOf(offset)));
nvps.add(new BasicNameValuePair(AzkabanClientParams.LENGTH, String.valueOf(length)));
Header contentType = new BasicHeader(HttpHeaders.CONTENT_TYPE, "application/x-www-form-urlencoded");
Header requestType = new BasicHeader("X-Requested-With", "XMLHttpRequest");
HttpGet httpGet = new HttpGet(client.url + "/executor?" + URLEncodedUtils.format(nvps, "UTF-8"));
httpGet.setHeaders(new Header[]{contentType, requestType});
CloseableHttpResponse response = client.httpClient.execute(httpGet);
closer.register(response);
Map<String, String> map = AzkabanClient.handleResponse(response);
try (Writer logWriter = new OutputStreamWriter(output, StandardCharsets.UTF_8)) {
logWriter.write(map.get(AzkabanClientParams.DATA));
}
return new AzkabanSuccess();
} catch (InvalidSessionException e) {
this.invalidSession = true;
throw e;
} catch (Exception e) {
throw new AzkabanClientException("Azkaban client cannot "
+ "fetch execId " + execId, e);
}
}
}
/**
* A callable that will add a proxy user to a project on Azkaban
*/
@Builder
static class AddProxyUserCallable implements Callable<AzkabanClientStatus> {
private AzkabanClient client;
private String projectName;
private String proxyUserName;
private boolean invalidSession = false;
@Override
public AzkabanClientStatus call()
throws AzkabanClientException {
try (Closer closer = Closer.create()) {
client.refreshSession(this.invalidSession);
List<NameValuePair> nvps = new ArrayList<>();
nvps.add(new BasicNameValuePair(AzkabanClientParams.AJAX, "addProxyUser"));
nvps.add(new BasicNameValuePair(AzkabanClientParams.SESSION_ID, client.sessionId));
nvps.add(new BasicNameValuePair(AzkabanClientParams.PROJECT, projectName));
nvps.add(new BasicNameValuePair(AzkabanClientParams.NAME, proxyUserName));
Header contentType = new BasicHeader(HttpHeaders.CONTENT_TYPE, "application/x-www-form-urlencoded");
Header requestType = new BasicHeader("X-Requested-With", "XMLHttpRequest");
HttpGet httpGet = new HttpGet(client.url + "/manager?" + URLEncodedUtils.format(nvps, "UTF-8"));
httpGet.setHeaders(new Header[]{contentType, requestType});
CloseableHttpResponse response = client.httpClient.execute(httpGet);
closer.register(response);
AzkabanClient.handleResponse(response);
return new AzkabanSuccess();
} catch (InvalidSessionException e) {
this.invalidSession = true;
throw e;
} catch (Exception e) {
throw new AzkabanClientException("Azkaban client cannot add proxy user " + proxyUserName, e);
}
}
}
/**
* A callable that will get the list of proxy users from a project on Azkaban.
*/
@Builder
static class GetProxyUserCallable implements Callable<AzkabanClientStatus> {
private AzkabanClient client;
private String projectName;
private boolean invalidSession = false;
@Override
public AzkabanClientStatus call()
throws AzkabanClientException {
try (Closer closer = Closer.create()) {
client.refreshSession(this.invalidSession);
List<NameValuePair> nvps = new ArrayList<>();
nvps.add(new BasicNameValuePair(AzkabanClientParams.AJAX, "getProxyUsers"));
nvps.add(new BasicNameValuePair(AzkabanClientParams.SESSION_ID, client.sessionId));
nvps.add(new BasicNameValuePair(AzkabanClientParams.PROJECT, projectName));
Header contentType = new BasicHeader(HttpHeaders.CONTENT_TYPE, "application/x-www-form-urlencoded");
Header requestType = new BasicHeader("X-Requested-With", "XMLHttpRequest");
HttpGet httpGet = new HttpGet(client.url + "/manager?" + URLEncodedUtils.format(nvps, "UTF-8"));
httpGet.setHeaders(new Header[]{contentType, requestType});
CloseableHttpResponse response = client.httpClient.execute(httpGet);
closer.register(response);
Map<String, String> map = AzkabanClient.handleResponse(response);
return new AzkabanGetProxyUsersStatus(new AzkabanGetProxyUsersStatus.ProxyUsers(map));
} catch (InvalidSessionException e) {
this.invalidSession = true;
throw e;
} catch (Exception e) {
throw new AzkabanClientException(String.format("Azkaban client failed to get proxy users for %s", client.url), e);
}
}
}
}
| 3,395 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules/orchestration/AzkabanSpecExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import java.util.concurrent.Future;
import org.slf4j.Logger;
import com.google.common.base.Optional;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.runtime.api.Spec;
import org.apache.gobblin.runtime.api.SpecProducer;
import org.apache.gobblin.runtime.spec_executorInstance.AbstractSpecExecutor;
import org.apache.gobblin.util.CompletedFuture;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
public class AzkabanSpecExecutor extends AbstractSpecExecutor {
// Executor Instance
protected final Config _config;
private SpecProducer<Spec> azkabanSpecProducer;
public AzkabanSpecExecutor(Config config) {
this(config, Optional.absent());
}
public AzkabanSpecExecutor(Config config, Optional<Logger> log) {
super(config, log);
Config defaultConfig = ConfigFactory.load(ServiceAzkabanConfigKeys.DEFAULT_AZKABAN_PROJECT_CONFIG_FILE);
_config = config.withFallback(defaultConfig);
try {
Class<?> producerClass = Class.forName(ConfigUtils.getString(_config,
ServiceAzkabanConfigKeys.AZKABAN_PRODUCER_CLASS,
AzkabanSpecProducer.class.getName()));
azkabanSpecProducer = (SpecProducer<Spec>) GobblinConstructorUtils
.invokeLongestConstructor(producerClass, _config);
} catch (ReflectiveOperationException e) {
if (e.getCause() != null) {
throw new RuntimeException("Could not instantiate spec producer", e.getCause());
} else {
throw new RuntimeException("Could not instantiate spec producer", e);
}
}
}
@Override
public Future<String> getDescription() {
return new CompletedFuture<>("SimpleSpecExecutorInstance with URI: " + specExecutorInstanceUri, null);
}
@Override
public Future<? extends SpecProducer<Spec>> getProducer() {
return new CompletedFuture<>(this.azkabanSpecProducer, null);
}
@Override
public Future<Config> getConfig() {
return new CompletedFuture<>(_config, null);
}
@Override
public Future<String> getHealth() {
return new CompletedFuture<>("Healthy", null);
}
@Override
protected void startUp() throws Exception {
// nothing to do in default implementation
}
@Override
protected void shutDown() throws Exception {
// nothing to do in default implementation
}
}
| 3,396 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules/orchestration/AzkabanProjectFlowsStatus.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import java.util.List;
import lombok.AllArgsConstructor;
import lombok.Getter;
public class AzkabanProjectFlowsStatus extends AzkabanClientStatus<AzkabanProjectFlowsStatus.Project> {
public AzkabanProjectFlowsStatus(AzkabanProjectFlowsStatus.Project project) {
super(project);
}
// Those classes represent Azkaban API response
// For more details, see: https://azkaban.readthedocs.io/en/latest/ajaxApi.html#fetch-flows-of-a-project
@Getter
@AllArgsConstructor
public static class Project {
long projectId;
List<Flow> flows;
}
@Getter
@AllArgsConstructor
public static class Flow {
String flowId;
}
} | 3,397 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules/orchestration/AzkabanSuccess.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
/**
* A successful status for {@link AzkabanClient}.
*/
public class AzkabanSuccess extends AzkabanClientStatus<String> {
public AzkabanSuccess() {
this("");
}
public AzkabanSuccess(String response) {
super(response);
}
}
| 3,398 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules/orchestration/SessionHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import com.google.gson.JsonObject;
import org.apache.commons.io.IOUtils;
import org.apache.http.HttpEntity;
import org.apache.http.NameValuePair;
import org.apache.http.client.entity.UrlEncodedFormEntity;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.message.BasicNameValuePair;
import org.apache.http.util.EntityUtils;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* A helper class which can get session id using Azkaban authentication mechanism.
*
* @see <a href="https://azkaban.github.io/azkaban/docs/latest/#api-authenticate">
* https://azkaban.github.io/azkaban/docs/latest/#api-authenticate
* </a>
*/
public class SessionHelper {
/**
* <p>Use Azkaban ajax api to fetch the session id. Required http request parameters are:
* <br>action=login The fixed parameter indicating the login action.
* <br>username The Azkaban username.
* <br>password The corresponding password.
* </pr>
*
* @param httpClient An apache http client
* @param url Azkaban ajax endpoint
* @param username username for Azkaban login
* @param password password for Azkaban login
*
* @return session id
*/
public static String getSessionId(CloseableHttpClient httpClient, String url, String username, String password)
throws AzkabanClientException {
try {
HttpPost httpPost = new HttpPost(url);
List<NameValuePair> nvps = new ArrayList<>();
nvps.add(new BasicNameValuePair(AzkabanClientParams.ACTION, "login"));
nvps.add(new BasicNameValuePair(AzkabanClientParams.USERNAME, username));
nvps.add(new BasicNameValuePair(AzkabanClientParams.PASSWORD, password));
httpPost.setEntity(new UrlEncodedFormEntity(nvps));
CloseableHttpResponse response = httpClient.execute(httpPost);
try {
HttpEntity entity = response.getEntity();
// retrieve session id from entity
String jsonResponseString = IOUtils.toString(entity.getContent(), "UTF-8");
JsonObject jsonObject = AzkabanClient.parseResponse(jsonResponseString);
Map<String, String> responseMap = AzkabanClient.getFlatMap(jsonObject);
String sessionId = responseMap.get(AzkabanClientParams.SESSION_ID);
EntityUtils.consume(entity);
return sessionId;
} catch (Exception e) {
throw new AzkabanClientException("Azkaban client cannot consume session response.", e);
} finally {
response.close();
}
} catch (Exception e) {
throw new AzkabanClientException("Azkaban client cannot fetch session.", e);
}
}
}
| 3,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.