index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/objectstore/ObjectStoreConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.objectstore;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.writer.objectstore.ObjectStoreOperation;
/**
* A converter of {@link ObjectStoreOperation}s. The output record of any subclasses is of type {@link ObjectStoreOperation}
*
* @param <SI> Type of input record schema
* @param <DI> Input record type
* @param <DO> Type of {@link ObjectStoreOperation}
*/
@Alpha
public abstract class ObjectStoreConverter<SI, DI, DO extends ObjectStoreOperation<?>> extends Converter<SI, Class<?>, DI, DO> {
public ObjectStoreConverter<SI, DI, DO> init(WorkUnitState workUnit) {
return this;
}
/**
* Convert schema is not used this converter hence return the {@link Class} of input schema as a place holder
* {@inheritDoc}
* @see org.apache.gobblin.converter.Converter#convertSchema(java.lang.Object, org.apache.gobblin.configuration.WorkUnitState)
*/
@Override
public Class<?> convertSchema(SI inputSchema, WorkUnitState workUnit) throws SchemaConversionException {
return ObjectStoreOperation.class;
}
}
| 2,900 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/json/JsonToStringConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.json;
import com.google.common.collect.Lists;
import com.google.gson.Gson;
import com.google.gson.JsonObject;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
/**
* A {@link Converter} to transform {@link JsonObject} to strings.
*/
public class JsonToStringConverter extends Converter<String, String, JsonObject, String> {
private static Gson GSON = new Gson();
@Override
public String convertSchema(String inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
return inputSchema;
}
@Override
public Iterable<String> convertRecord(String outputSchema, JsonObject inputRecord, WorkUnitState workUnit)
throws DataConversionException {
return Lists.newArrayList(GSON.toJson(inputRecord));
}
}
| 2,901 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/json/BytesToJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.json;
import com.google.common.base.Charsets;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
/**
* Converter that takes a UTF-8 encoded JSON string and converts it to a {@link JsonObject}
*/
public class BytesToJsonConverter extends Converter<String, String, byte[], JsonObject> {
@Override
public String convertSchema(String inputSchema, WorkUnitState workUnit) {
return inputSchema;
}
@Override
public Iterable<JsonObject> convertRecord(String outputSchema, byte[] inputRecord, WorkUnitState workUnit)
throws DataConversionException {
if (inputRecord == null) {
throw new DataConversionException("Input record is null");
}
String jsonString = new String(inputRecord, Charsets.UTF_8);
JsonParser parser = new JsonParser();
JsonObject outputRecord = parser.parse(jsonString).getAsJsonObject();
return new SingleRecordIterable<>(outputRecord);
}
}
| 2,902 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/json/JsonSchema.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.json;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.avro.JsonElementConversionFactory.Type;
import org.apache.gobblin.source.extractor.schema.Schema;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import static org.apache.gobblin.converter.avro.JsonElementConversionFactory.Type.ENUM;
import static org.apache.gobblin.converter.avro.JsonElementConversionFactory.Type.FIXED;
import static org.apache.gobblin.converter.avro.JsonElementConversionFactory.Type.RECORD;
import static org.apache.gobblin.converter.avro.JsonElementConversionFactory.Type.UNION;
import static org.apache.gobblin.converter.json.JsonSchema.SchemaType.CHILD;
import static org.apache.gobblin.converter.json.JsonSchema.SchemaType.ROOT;
/**
* Represents a source schema declared in the configuration with {@link ConfigurationKeys#SOURCE_SCHEMA}.
* The source schema is represented by a {@link JsonArray}.
* @author tilakpatidar
*/
public class JsonSchema extends Schema {
public static final String RECORD_FIELDS_KEY = "values";
public static final String TYPE_KEY = "type";
public static final String NAME_KEY = "name";
public static final String SIZE_KEY = "size";
public static final String ENUM_SYMBOLS_KEY = "symbols";
public static final String COLUMN_NAME_KEY = "columnName";
public static final String DATA_TYPE_KEY = "dataType";
public static final String COMMENT_KEY = "comment";
public static final String DEFAULT_VALUE_KEY = "defaultValue";
public static final String IS_NULLABLE_KEY = "isNullable";
public static final String DEFAULT_RECORD_COLUMN_NAME = "root";
public static final String DEFAULT_VALUE_FOR_OPTIONAL_PROPERTY = "";
public static final String ARRAY_ITEMS_KEY = "items";
public static final String MAP_ITEMS_KEY = "values";
public static final String SOURCE_TYPE = "source.type";
private final Type type;
private final JsonObject json;
private final SchemaType schemaNestedLevel;
private JsonSchema secondType;
private JsonSchema firstType;
private JsonArray jsonArray;
public enum SchemaType {
ROOT, CHILD
}
/**
* Build a {@link JsonSchema} using {@link JsonArray}
* This will create a {@link SchemaType} of {@link SchemaType#ROOT}
* @param jsonArray
*/
public JsonSchema(JsonArray jsonArray) {
JsonObject jsonObject = new JsonObject();
JsonObject dataType = new JsonObject();
jsonObject.addProperty(COLUMN_NAME_KEY, DEFAULT_RECORD_COLUMN_NAME);
dataType.addProperty(TYPE_KEY, RECORD.toString());
dataType.add(RECORD_FIELDS_KEY, jsonArray);
jsonObject.add(DATA_TYPE_KEY, dataType);
setJsonSchemaProperties(jsonObject);
this.type = RECORD;
this.json = jsonObject;
this.jsonArray = jsonArray;
this.schemaNestedLevel = ROOT;
}
/**
* Build a {@link JsonSchema} using {@link JsonArray}
* This will create a {@link SchemaType} of {@link SchemaType#CHILD}
* @param jsonObject
*/
public JsonSchema(JsonObject jsonObject) {
JsonObject root = new JsonObject();
if (!jsonObject.has(COLUMN_NAME_KEY) && !jsonObject.has(DATA_TYPE_KEY)) {
root.addProperty(COLUMN_NAME_KEY, DEFAULT_RECORD_COLUMN_NAME);
root.add(DATA_TYPE_KEY, jsonObject);
jsonObject = root;
}
if (!jsonObject.has(COLUMN_NAME_KEY) && jsonObject.has(DATA_TYPE_KEY)) {
jsonObject.addProperty(COLUMN_NAME_KEY, DEFAULT_RECORD_COLUMN_NAME);
}
setJsonSchemaProperties(jsonObject);
JsonElement typeElement = getDataType().get(TYPE_KEY);
if (typeElement.isJsonPrimitive()) {
this.type = Type.valueOf(typeElement.getAsString().toUpperCase());
} else if (typeElement.isJsonArray()) {
JsonArray jsonArray = typeElement.getAsJsonArray();
if (jsonArray.size() != 2) {
throw new RuntimeException("Invalid " + TYPE_KEY + "property in schema for union types");
}
this.type = UNION;
JsonElement type1 = jsonArray.get(0);
JsonElement type2 = jsonArray.get(1);
if (type1.isJsonPrimitive()) {
this.firstType = buildBaseSchema(Type.valueOf(type1.getAsString().toUpperCase()));
}
if (type2.isJsonPrimitive()) {
this.secondType = buildBaseSchema(Type.valueOf(type2.getAsString().toUpperCase()));
}
if (type1.isJsonObject()) {
this.firstType = buildBaseSchema(type1.getAsJsonObject());
}
if (type2.isJsonObject()) {
this.secondType = buildBaseSchema(type2.getAsJsonObject());
}
} else {
throw new RuntimeException("Invalid " + TYPE_KEY + "property in schema");
}
this.json = jsonObject;
JsonArray jsonArray = new JsonArray();
jsonArray.add(jsonObject);
this.jsonArray = jsonArray;
this.schemaNestedLevel = CHILD;
}
/**
* Get symbols for a {@link Type#ENUM} type.
* @return
*/
public JsonArray getSymbols() {
if (this.type.equals(ENUM)) {
return getDataType().get(ENUM_SYMBOLS_KEY).getAsJsonArray();
}
return new JsonArray();
}
/**
* Get {@link Type} for this {@link JsonSchema}.
* @return
*/
public Type getType() {
return type;
}
/**
* Builds a {@link JsonSchema} object for a given {@link Type} object.
* @param type
* @return
*/
public static JsonSchema buildBaseSchema(Type type) {
JsonObject jsonObject = new JsonObject();
JsonObject dataType = new JsonObject();
jsonObject.addProperty(COLUMN_NAME_KEY, DEFAULT_RECORD_COLUMN_NAME);
dataType.addProperty(TYPE_KEY, type.toString());
jsonObject.add(DATA_TYPE_KEY, dataType);
return new JsonSchema(jsonObject);
}
/**
* Builds a {@link JsonSchema} object for a given {@link Type} object.
* @return
*/
public static JsonSchema buildBaseSchema(JsonObject root) {
root.addProperty(COLUMN_NAME_KEY, DEFAULT_RECORD_COLUMN_NAME);
return new JsonSchema(root);
}
/**
* Get optional property from a {@link JsonObject} for a {@link String} key.
* If key does'nt exists returns {@link #DEFAULT_VALUE_FOR_OPTIONAL_PROPERTY}.
* @param jsonObject
* @param key
* @return
*/
public static String getOptionalProperty(JsonObject jsonObject, String key) {
return jsonObject.has(key) ? jsonObject.get(key).getAsString() : DEFAULT_VALUE_FOR_OPTIONAL_PROPERTY;
}
/**
* Fetches dataType.values from the JsonObject
* @return
*/
public JsonSchema getValuesWithinDataType() {
JsonElement element = this.getDataType().get(MAP_ITEMS_KEY);
if (element.isJsonObject()) {
return new JsonSchema(element.getAsJsonObject());
}
if (element.isJsonArray()) {
return new JsonSchema(element.getAsJsonArray());
}
if (element.isJsonPrimitive()) {
return buildBaseSchema(Type.valueOf(element.getAsString().toUpperCase()));
}
throw new UnsupportedOperationException(
"Map values can only be defined using JsonObject, JsonArray or JsonPrimitive.");
}
/**
* Gets size for fixed type viz dataType.size from the JsonObject
* @return
*/
public int getSizeOfFixedData() {
if (this.type.equals(FIXED)) {
return this.getDataType().get(SIZE_KEY).getAsInt();
}
return 0;
}
public boolean isType(Type type) {
return this.type.equals(type);
}
/**
* Fetches the nested or primitive array items type from schema.
* @return
* @throws DataConversionException
*/
public Type getTypeOfArrayItems()
throws DataConversionException {
JsonSchema arrayValues = getItemsWithinDataType();
if (arrayValues == null) {
throw new DataConversionException("Array types only allow values as primitive, null or JsonObject");
}
return arrayValues.getType();
}
public JsonSchema getItemsWithinDataType() {
JsonElement element = this.getDataType().get(ARRAY_ITEMS_KEY);
if (element.isJsonObject()) {
return new JsonSchema(element.getAsJsonObject());
}
if (element.isJsonPrimitive()) {
return buildBaseSchema(Type.valueOf(element.getAsString().toUpperCase()));
}
throw new UnsupportedOperationException("Array items can only be defined using JsonObject or JsonPrimitive.");
}
public JsonSchema getFirstTypeSchema() {
return this.firstType;
}
public JsonSchema getSecondTypeSchema() {
return this.secondType;
}
public int fieldsCount() {
return this.jsonArray.size();
}
public JsonSchema getFieldSchemaAt(int i) {
if (i >= this.jsonArray.size()) {
return new JsonSchema(this.json);
}
return new JsonSchema(this.jsonArray.get(i).getAsJsonObject());
}
public List<JsonSchema> getDataTypes() {
if (firstType != null && secondType != null) {
return Arrays.asList(firstType, secondType);
}
return Collections.singletonList(this);
}
public boolean isRoot() {
return this.schemaNestedLevel.equals(ROOT);
}
public String getName() {
return getOptionalProperty(this.getDataType(), NAME_KEY);
}
/**
* Set properties for {@link JsonSchema} from a {@link JsonObject}.
* @param jsonObject
*/
private void setJsonSchemaProperties(JsonObject jsonObject) {
setColumnName(jsonObject.get(COLUMN_NAME_KEY).getAsString());
setDataType(jsonObject.get(DATA_TYPE_KEY).getAsJsonObject());
setNullable(jsonObject.has(IS_NULLABLE_KEY) && jsonObject.get(IS_NULLABLE_KEY).getAsBoolean());
setComment(getOptionalProperty(jsonObject, COMMENT_KEY));
setDefaultValue(getOptionalProperty(jsonObject, DEFAULT_VALUE_KEY));
}
} | 2,903 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/json/JsonStringToJsonIntermediateConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.json;
import java.io.IOException;
import java.util.Map.Entry;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.gobblin.converter.avro.JsonElementConversionFactory.Type;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonNull;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import static org.apache.gobblin.converter.avro.JsonElementConversionFactory.Type.FIXED;
import static org.apache.gobblin.converter.avro.JsonElementConversionFactory.Type.MAP;
import static org.apache.gobblin.converter.avro.JsonElementConversionFactory.Type.NULL;
import static org.apache.gobblin.converter.avro.JsonElementConversionFactory.Type.RECORD;
import static org.apache.gobblin.converter.json.JsonSchema.DEFAULT_RECORD_COLUMN_NAME;
/**
* Converts a json string to a {@link JsonObject}.
*/
public class JsonStringToJsonIntermediateConverter extends Converter<String, JsonArray, String, JsonObject> {
private final static Logger log = LoggerFactory.getLogger(JsonStringToJsonIntermediateConverter.class);
private static final String UNPACK_COMPLEX_SCHEMAS_KEY =
"gobblin.converter.jsonStringToJsonIntermediate.unpackComplexSchemas";
public static final boolean DEFAULT_UNPACK_COMPLEX_SCHEMAS_KEY = Boolean.TRUE;
private boolean unpackComplexSchemas;
/**
* Take in an input schema of type string, the schema must be in JSON format
* @return a JsonArray representation of the schema
*/
@Override
public JsonArray convertSchema(String inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
this.unpackComplexSchemas =
workUnit.getPropAsBoolean(UNPACK_COMPLEX_SCHEMAS_KEY, DEFAULT_UNPACK_COMPLEX_SCHEMAS_KEY);
JsonParser jsonParser = new JsonParser();
log.info("Schema: " + inputSchema);
JsonElement jsonSchema = jsonParser.parse(inputSchema);
return jsonSchema.getAsJsonArray();
}
/**
* Takes in a record with format String and Uses the inputSchema to convert the record to a JsonObject
* @return a JsonObject representing the record
* @throws IOException
*/
@Override
public Iterable<JsonObject> convertRecord(JsonArray outputSchema, String strInputRecord, WorkUnitState workUnit)
throws DataConversionException {
JsonParser jsonParser = new JsonParser();
JsonObject inputRecord = (JsonObject) jsonParser.parse(strInputRecord);
if (!this.unpackComplexSchemas) {
return new SingleRecordIterable<>(inputRecord);
}
JsonSchema schema = new JsonSchema(outputSchema);
JsonObject rec = parse(inputRecord, schema);
return new SingleRecordIterable(rec);
}
/**
* Parses a provided JsonObject input using the provided JsonArray schema into
* a JsonObject.
* @param element
* @param schema
* @return
* @throws DataConversionException
*/
private JsonElement parse(JsonElement element, JsonSchema schema)
throws DataConversionException {
JsonObject root = new JsonObject();
root.add(DEFAULT_RECORD_COLUMN_NAME, element);
JsonObject jsonObject = parse(root, schema);
return jsonObject.get(DEFAULT_RECORD_COLUMN_NAME);
}
/**
* Parses a provided JsonObject input using the provided JsonArray schema into
* a JsonObject.
* @param record
* @param schema
* @return
* @throws DataConversionException
*/
private JsonObject parse(JsonObject record, JsonSchema schema)
throws DataConversionException {
JsonObject output = new JsonObject();
for (int i = 0; i < schema.fieldsCount(); i++) {
JsonSchema schemaElement = schema.getFieldSchemaAt(i);
String columnKey = schemaElement.getColumnName();
JsonElement parsed;
if (!record.has(columnKey)) {
output.add(columnKey, JsonNull.INSTANCE);
continue;
}
JsonElement columnValue = record.get(columnKey);
switch (schemaElement.getType()) {
case UNION:
parsed = parseUnionType(schemaElement, columnValue);
break;
case ENUM:
parsed = parseEnumType(schemaElement, columnValue);
break;
default:
if (columnValue.isJsonArray()) {
parsed = parseJsonArrayType(schemaElement, columnValue);
} else if (columnValue.isJsonObject()) {
parsed = parseJsonObjectType(schemaElement, columnValue);
} else {
parsed = parsePrimitiveType(schemaElement, columnValue);
}
}
output.add(columnKey, parsed);
}
return output;
}
private JsonElement parseUnionType(JsonSchema schemaElement, JsonElement columnValue)
throws DataConversionException {
try {
return parse(columnValue, schemaElement.getFirstTypeSchema());
} catch (DataConversionException e) {
return parse(columnValue, schemaElement.getSecondTypeSchema());
}
}
/**
* Parses Enum type values
* @param schema
* @param value
* @return
* @throws DataConversionException
*/
private JsonElement parseEnumType(JsonSchema schema, JsonElement value)
throws DataConversionException {
if (schema.getSymbols().contains(value)) {
return value;
}
throw new DataConversionException(
"Invalid symbol: " + value.getAsString() + " allowed values: " + schema.getSymbols().toString());
}
/**
* Parses JsonArray type values
* @param schema
* @param value
* @return
* @throws DataConversionException
*/
private JsonElement parseJsonArrayType(JsonSchema schema, JsonElement value)
throws DataConversionException {
Type arrayType = schema.getTypeOfArrayItems();
JsonArray tempArray = new JsonArray();
if (Type.isPrimitive(arrayType)) {
return value;
}
JsonSchema nestedSchema = schema.getItemsWithinDataType();
for (JsonElement v : value.getAsJsonArray()) {
tempArray.add(parse(v, nestedSchema));
}
return tempArray;
}
/**
* Parses JsonObject type values
* @param value
* @return
* @throws DataConversionException
*/
private JsonElement parseJsonObjectType(JsonSchema schema, JsonElement value)
throws DataConversionException {
JsonSchema valuesWithinDataType = schema.getValuesWithinDataType();
if (schema.isType(MAP)) {
if (Type.isPrimitive(valuesWithinDataType.getType())) {
return value;
}
JsonObject map = new JsonObject();
for (Entry<String, JsonElement> mapEntry : value.getAsJsonObject().entrySet()) {
JsonElement mapValue = mapEntry.getValue();
map.add(mapEntry.getKey(), parse(mapValue, valuesWithinDataType));
}
return map;
} else if (schema.isType(RECORD)) {
JsonSchema schemaArray = valuesWithinDataType.getValuesWithinDataType();
return parse((JsonObject) value, schemaArray);
} else {
return JsonNull.INSTANCE;
}
}
/**
* Parses primitive types
* @param schema
* @param value
* @return
* @throws DataConversionException
*/
private JsonElement parsePrimitiveType(JsonSchema schema, JsonElement value)
throws DataConversionException {
if ((schema.isType(NULL) || schema.isNullable()) && value.isJsonNull()) {
return JsonNull.INSTANCE;
}
if ((schema.isType(NULL) && !value.isJsonNull()) || (!schema.isType(NULL) && value.isJsonNull())) {
throw new DataConversionException(
"Type mismatch for " + value.toString() + " of type " + schema.getDataTypes().toString());
}
if (schema.isType(FIXED)) {
int expectedSize = schema.getSizeOfFixedData();
if (value.getAsString().length() == expectedSize) {
return value;
} else {
throw new DataConversionException(
"Fixed type value is not same as defined value expected fieldsCount: " + expectedSize);
}
} else {
return value;
}
}
}
| 2,904 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/avro/AvroToBytesConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.avro;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.Collections;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.BinaryEncoder;
import org.apache.avro.io.EncoderFactory;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
/**
* Convert an Avro GenericRecord back to its byte representation. Note: This converter returns
* the raw bytes for a record - it does not return a container file. If you want to write
* Avro records out to a container file do not use this converter; instead use the AvroDataWriter
* writer.
*/
public class AvroToBytesConverter extends Converter<Schema, String, GenericRecord, byte[]> {
private GenericDatumWriter<GenericRecord> writer;
private ThreadLocal<BinaryEncoder> encoderCache = new ThreadLocal<BinaryEncoder>() {
@Override
protected BinaryEncoder initialValue() {
return null;
}
};
@Override
public String convertSchema(Schema inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
writer = new GenericDatumWriter<GenericRecord>(inputSchema);
return inputSchema.toString();
}
@Override
public Iterable<byte[]> convertRecord(String outputSchema, GenericRecord inputRecord, WorkUnitState workUnit)
throws DataConversionException {
try {
ByteArrayOutputStream bytesOut = new ByteArrayOutputStream();
BinaryEncoder encoder = EncoderFactory.get().binaryEncoder(bytesOut, encoderCache.get());
encoderCache.set(encoder);
writer.write(inputRecord, encoder);
encoder.flush();
return Collections.singleton(bytesOut.toByteArray());
} catch (IOException e) {
throw new DataConversionException("Error serializing record", e);
}
}
}
| 2,905 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/avro/UnsupportedDateTypeException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.avro;
public class UnsupportedDateTypeException extends Exception {
private static final long serialVersionUID = 1L;
public UnsupportedDateTypeException(String arg0) {
super(arg0);
}
}
| 2,906 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/avro/JsonElementConversionWithAvroSchemaFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.avro;
import com.google.common.base.Preconditions;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.DataConversionException;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
/**
* Creates a converter for Json types to Avro types. Overrides {@link ArrayConverter}, {@link MapConverter},
* and {@link EnumConverter} from {@link JsonElementConversionFactory} to use an Avro schema instead of Json schema for
* determining type
*/
public class JsonElementConversionWithAvroSchemaFactory extends JsonElementConversionFactory {
/**
* Use to create a converter for a single field from a schema.
*/
public static JsonElementConverter getConverter(String fieldName, String fieldType, Schema schemaNode,
WorkUnitState state, boolean nullable, List<String> ignoreFields) throws UnsupportedDateTypeException {
Type type;
try {
type = Type.valueOf(fieldType.toUpperCase());
} catch (IllegalArgumentException e) {
throw new UnsupportedDateTypeException(fieldType + " is unsupported");
}
switch (type) {
case ARRAY:
return new JsonElementConversionWithAvroSchemaFactory.ArrayConverter(fieldName, nullable, type.toString(),
schemaNode, state, ignoreFields);
case MAP:
return new JsonElementConversionWithAvroSchemaFactory.MapConverter(fieldName, nullable, type.toString(),
schemaNode, state, ignoreFields);
case ENUM:
return new JsonElementConversionWithAvroSchemaFactory.EnumConverter(fieldName, nullable, type.toString(),
schemaNode);
case RECORD:
return new JsonElementConversionWithAvroSchemaFactory.RecordConverter(fieldName, nullable, type.toString(),
schemaNode, state, ignoreFields);
case UNION:
return new JsonElementConversionWithAvroSchemaFactory.UnionConverter(fieldName, nullable, type.toString(),
schemaNode, state, ignoreFields);
default:
return JsonElementConversionFactory.getConvertor(fieldName, fieldType, new JsonObject(), state, nullable);
}
}
public static class ArrayConverter extends ComplexConverter {
public ArrayConverter(String fieldName, boolean nullable, String sourceType, Schema schemaNode, WorkUnitState state,
List<String> ignoreFields) throws UnsupportedDateTypeException {
super(fieldName, nullable, sourceType);
super.setElementConverter(
getConverter(fieldName, schemaNode.getElementType().getType().getName(), schemaNode.getElementType(), state,
isNullable(), ignoreFields));
}
@Override
Object convertField(JsonElement value) {
List<Object> list = new ArrayList<>();
for (JsonElement elem : (JsonArray) value) {
list.add(getElementConverter().convert(elem));
}
return new GenericData.Array<>(schema(), list);
}
@Override
public Schema.Type getTargetType() {
return Schema.Type.ARRAY;
}
@Override
public Schema schema() {
Schema schema = Schema.createArray(getElementConverter().schema());
schema.addProp("source.type", "array");
return schema;
}
}
public static class MapConverter extends ComplexConverter {
public MapConverter(String fieldName, boolean nullable, String sourceType, Schema schemaNode, WorkUnitState state,
List<String> ignoreFields) throws UnsupportedDateTypeException {
super(fieldName, nullable, sourceType);
super.setElementConverter(
getConverter(fieldName, schemaNode.getValueType().getType().getName(), schemaNode.getValueType(), state,
isNullable(), ignoreFields));
}
@Override
Object convertField(JsonElement value) {
Map<String, Object> map = new HashMap<>();
for (Map.Entry<String, JsonElement> entry : ((JsonObject) value).entrySet()) {
map.put(entry.getKey(), getElementConverter().convert(entry.getValue()));
}
return map;
}
@Override
public Schema.Type getTargetType() {
return Schema.Type.MAP;
}
@Override
public Schema schema() {
Schema schema = Schema.createMap(getElementConverter().schema());
schema.addProp("source.type", "map");
return schema;
}
}
public static class EnumConverter extends JsonElementConverter {
String enumName;
List<String> enumSet = new ArrayList<>();
Schema schema;
public EnumConverter(String fieldName, boolean nullable, String sourceType, Schema schemaNode) {
super(fieldName, nullable, sourceType);
this.enumSet.addAll(schemaNode.getEnumSymbols());
this.enumName = schemaNode.getFullName();
this.schema = schemaNode;
}
@Override
Object convertField(JsonElement value) {
String valueString = value.getAsString();
Preconditions.checkArgument(this.enumSet.contains(valueString),
"%s is not one of the valid symbols for the %s enum: %s", valueString, this.enumName, this.enumSet);
return new GenericData.EnumSymbol(this.schema, valueString);
}
@Override
public Schema.Type getTargetType() {
return Schema.Type.ENUM;
}
@Override
public Schema schema() {
this.schema = Schema.createEnum(this.enumName, "", "", this.enumSet);
this.schema.addProp("source.type", "enum");
return this.schema;
}
}
public static class RecordConverter extends ComplexConverter {
List<String> ignoreFields;
Schema schema;
WorkUnitState state;
public RecordConverter(String fieldName, boolean nullable, String sourceType, Schema schemaNode,
WorkUnitState state, List<String> ignoreFields) {
super(fieldName, nullable, sourceType);
this.schema = schemaNode;
this.state = state;
this.ignoreFields = ignoreFields;
}
@Override
Object convertField(JsonElement value) {
try {
return JsonRecordAvroSchemaToAvroConverter.convertNestedRecord(this.schema, value.getAsJsonObject(), this.state,
this.ignoreFields);
} catch (DataConversionException e) {
throw new RuntimeException("Failed to convert nested record", e);
}
}
@Override
public Schema.Type getTargetType() {
return Schema.Type.RECORD;
}
@Override
public Schema schema() {
return this.schema;
}
}
/**
* A converter to convert Union type to avro
* Here it will try all the possible converters for one type, for example, to convert an int value, it will try all Number converters
* until meet the first workable one.
* So a known bug here is there's no guarantee on preserving precision from Json to Avro type as the exact type information is clear from JsonElement
* We're doing this since there is no way to determine what exact type it is for a JsonElement
*/
public static class UnionConverter extends ComplexConverter {
private final List<Schema> schemas;
private final List<JsonElementConverter> converters;
private final Schema schemaNode;
public UnionConverter(String fieldName, boolean nullable, String sourceType, Schema schemaNode,
WorkUnitState state, List<String> ignoreFields) throws UnsupportedDateTypeException {
super(fieldName, nullable, sourceType);
this.schemas = schemaNode.getTypes();
converters = new ArrayList<>();
for(Schema schema: schemas) {
converters.add(getConverter(fieldName, schema.getType().getName(), schemaNode, state, isNullable(), ignoreFields));
}
this.schemaNode = schemaNode;
}
@Override
Object convertField(JsonElement value) {
for(JsonElementConverter converter: converters)
{
try {
switch (converter.getTargetType()) {
case STRING: {
if (value.isJsonPrimitive() && value.getAsJsonPrimitive().isString()) {
return converter.convert(value);
}
break;
}
case FIXED:
case BYTES:
case INT:
case LONG:
case FLOAT:
case DOUBLE: {
if (value.isJsonPrimitive() && value.getAsJsonPrimitive().isNumber()) {
return converter.convert(value);
}
break;
}
case BOOLEAN:{
if (value.isJsonPrimitive() && value.getAsJsonPrimitive().isBoolean()) {
return converter.convert(value);
}
break;
}
case ARRAY:{
if (value.isJsonArray()) {
return converter.convert(value);
}
break;
}
case MAP:
case ENUM:
case RECORD:{
if (value.isJsonObject()) {
return converter.convert(value);
}
break;
}
case NULL:{
if(value.isJsonNull()) {
return converter.convert(value);
}
break;
}
case UNION:
return new UnsupportedDateTypeException("does not support union type in union");
default:
return converter.convert(value);
}
} catch (Exception e){}
}
throw new RuntimeException(String.format("Cannot convert %s to avro using schema %s", value.getAsString(), schemaNode.toString()));
}
@Override
public Schema.Type getTargetType() {
return schema().getType();
}
@Override
public Schema schema() {
if(schemas.size() == 2 && isNullable()) {
if(schemas.get(0).getType() == Schema.Type.NULL) {
return schemas.get(1);
} else {
return schemas.get(0);
}
}
return Schema.createUnion(schemas);
}
@Override
public boolean isNullable() {
boolean isNullable = false;
for(Schema schema: schemas) {
if(schema.getType() == Schema.Type.NULL) {
isNullable = true;
}
}
return isNullable;
}
}
} | 2,907 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/avro/JsonRecordAvroSchemaToAvroConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.avro;
import java.util.List;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.gobblin.converter.ToAvroConverterBase;
import com.google.common.base.Preconditions;
import com.google.gson.JsonObject;
import com.google.gson.JsonNull;
import com.google.common.base.Splitter;
/**
* {@link Converter} that takes an Avro schema from config and corresponding {@link JsonObject} records and
* converts them to {@link GenericRecord} using the schema
*/
public class JsonRecordAvroSchemaToAvroConverter<SI> extends ToAvroConverterBase<SI, JsonObject> {
private static final Splitter SPLITTER_ON_COMMA = Splitter.on(',').trimResults().omitEmptyStrings();
private Schema schema;
private List<String> ignoreFields;
public ToAvroConverterBase<SI, JsonObject> init(WorkUnitState workUnit) {
super.init(workUnit);
this.ignoreFields = SPLITTER_ON_COMMA.splitToList(workUnit.getProp(ConfigurationKeys.CONVERTER_IGNORE_FIELDS, ""));
return this;
}
/**
* Ignore input schema and parse in Avro schema from config
*/
@Override
public Schema convertSchema(SI inputSchema, WorkUnitState workUnit) throws SchemaConversionException {
Preconditions.checkArgument(workUnit.contains(ConfigurationKeys.CONVERTER_AVRO_SCHEMA_KEY));
this.schema = new Schema.Parser().parse(workUnit.getProp(ConfigurationKeys.CONVERTER_AVRO_SCHEMA_KEY));
return this.schema;
}
/**
* Take in {@link JsonObject} input records and convert them to {@link GenericRecord} using outputSchema
*/
@Override
public Iterable<GenericRecord> convertRecord(Schema outputSchema, JsonObject inputRecord, WorkUnitState workUnit)
throws DataConversionException {
GenericRecord avroRecord = convertNestedRecord(outputSchema, inputRecord, workUnit, this.ignoreFields);
return new SingleRecordIterable<>(avroRecord);
}
public static GenericRecord convertNestedRecord(Schema outputSchema, JsonObject inputRecord, WorkUnitState workUnit,
List<String> ignoreFields) throws DataConversionException {
GenericRecord avroRecord = new GenericData.Record(outputSchema);
JsonElementConversionWithAvroSchemaFactory.JsonElementConverter converter;
for (Schema.Field field : outputSchema.getFields()) {
if (ignoreFields.contains(field.name())) {
continue;
}
Schema.Type type = field.schema().getType();
boolean nullable = false;
Schema schema = field.schema();
if (type.equals(Schema.Type.UNION)) {
nullable = true;
List<Schema> types = field.schema().getTypes();
if (types.size() != 2) {
throw new DataConversionException("Unions must be size 2, and contain one null");
}
if (field.schema().getTypes().get(0).getType().equals(Schema.Type.NULL)) {
schema = field.schema().getTypes().get(1);
type = schema.getType();
} else if (field.schema().getTypes().get(1).getType().equals(Schema.Type.NULL)) {
schema = field.schema().getTypes().get(0);
type = schema.getType();
} else {
throw new DataConversionException("Unions must be size 2, and contain one null");
}
if (inputRecord.get(field.name()) == null) {
inputRecord.add(field.name(), JsonNull.INSTANCE);
}
}
if (inputRecord.get(field.name()) == null) {
throw new DataConversionException("Field missing from record: " + field.name());
}
if (type.equals(Schema.Type.RECORD)) {
if (nullable && inputRecord.get(field.name()).isJsonNull()) {
avroRecord.put(field.name(), null);
} else {
avroRecord.put(field.name(),
convertNestedRecord(schema, inputRecord.get(field.name()).getAsJsonObject(), workUnit, ignoreFields));
}
} else {
try {
converter = JsonElementConversionWithAvroSchemaFactory.getConverter(field.name(), type.getName(), schema,
workUnit, nullable, ignoreFields);
avroRecord.put(field.name(), converter.convert(inputRecord.get(field.name())));
} catch (Exception e) {
throw new DataConversionException("Could not convert field " + field.name(), e);
}
}
}
return avroRecord;
}
}
| 2,908 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/avro/AvroFieldRetrieverConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.avro;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.EmptyIterable;
import org.apache.gobblin.util.AvroUtils;
import org.apache.gobblin.util.ForkOperatorUtils;
/**
* A converter class where the input is an Avro record, and the output is a specific field in that record. Since the
* field can be of any type this Converter returns a Java {@link Object}. The parameter converter.avro.extractor.field.path
* specifies the location of the field to retrieve. Nested fields can be specified by following use the following
* syntax: field.nestedField
*/
public class AvroFieldRetrieverConverter extends Converter<Schema, Schema, GenericRecord, Object> {
private String fieldLocation;
@Override
public Converter<Schema, Schema, GenericRecord, Object> init(WorkUnitState workUnit) {
String fieldPathKey =
ForkOperatorUtils.getPropertyNameForBranch(workUnit,
ConfigurationKeys.CONVERTER_AVRO_EXTRACTOR_FIELD_PATH);
Preconditions.checkArgument(workUnit.contains(fieldPathKey),
"The converter " + this.getClass().getName() + " cannot be used without setting the property "
+ ConfigurationKeys.CONVERTER_AVRO_EXTRACTOR_FIELD_PATH);
this.fieldLocation = workUnit.getProp(fieldPathKey);
return this;
}
@Override
public Schema convertSchema(Schema inputSchema, WorkUnitState workUnit) throws SchemaConversionException {
Optional<Schema> schema = AvroUtils.getFieldSchema(inputSchema, this.fieldLocation);
return schema.orNull();
}
@Override
public Iterable<Object> convertRecord(Schema outputSchema, GenericRecord inputRecord, WorkUnitState workUnit)
throws DataConversionException {
Optional<Object> field = AvroUtils.getFieldValue(inputRecord, this.fieldLocation);
return field.isPresent() ? new SingleRecordIterable<>(field.get()) : new EmptyIterable<>();
}
}
| 2,909 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/avro/AvroRecursionEliminatingConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.avro;
import java.io.IOException;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.commons.math3.util.Pair;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.AvroToAvroConverterBase;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.gobblin.util.AvroUtils;
/**
* A converter that removes recursion from Avro Generic Records
*/
@Slf4j
public class AvroRecursionEliminatingConverter extends AvroToAvroConverterBase {
@Override
public Schema convertSchema(Schema inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
Pair<Schema, List<AvroUtils.SchemaEntry>> results = AvroUtils.dropRecursiveFields(inputSchema);
List<AvroUtils.SchemaEntry> recursiveFields = results.getSecond();
if (!recursiveFields.isEmpty()) {
log.warn("Schema {} is recursive. Will drop fields [{}]", inputSchema.getFullName(),
recursiveFields.stream().map(entry -> entry.getFieldName()).collect(Collectors.joining(",")));
log.debug("Projected Schema = {}", results.getFirst());
}
return results.getFirst();
}
@Override
public Iterable<GenericRecord> convertRecordImpl(Schema outputSchema, GenericRecord inputRecord,
WorkUnitState workUnit)
throws DataConversionException {
try {
return new SingleRecordIterable(AvroUtils.convertRecordSchema(inputRecord, outputSchema));
} catch (IOException e) {
throw new DataConversionException("Failed to convert", e);
}
}
}
| 2,910 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/avro/JsonIntermediateToAvroConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.avro;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.gobblin.converter.ToAvroConverterBase;
import org.apache.gobblin.converter.avro.JsonElementConversionFactory.RecordConverter;
import org.apache.gobblin.converter.json.JsonSchema;
import org.apache.gobblin.util.AvroUtils;
import org.apache.gobblin.util.EmptyIterable;
import org.apache.gobblin.util.WriterUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.gson.JsonArray;
import com.google.gson.JsonObject;
/**
* Converts Integra's intermediate data format to avro
*
* @author kgoodhop
*
*/
public class JsonIntermediateToAvroConverter extends ToAvroConverterBase<JsonArray, JsonObject> {
private static final Logger LOG = LoggerFactory.getLogger(JsonIntermediateToAvroConverter.class);
private static final String CONVERTER_AVRO_NULLIFY_FIELDS_ENABLED = "converter.avro.nullify.fields.enabled";
private static final boolean DEFAULT_CONVERTER_AVRO_NULLIFY_FIELDS_ENABLED = Boolean.FALSE;
private static final String CONVERTER_AVRO_NULLIFY_FIELDS_ORIGINAL_SCHEMA_PATH =
"converter.avro.nullify.fields.original.schema.path";
private RecordConverter recordConverter;
@Override
public Schema convertSchema(JsonArray schema, WorkUnitState workUnit)
throws SchemaConversionException {
try {
JsonSchema jsonSchema = new JsonSchema(schema);
jsonSchema.setColumnName(workUnit.getExtract().getTable());
recordConverter = new RecordConverter(jsonSchema, workUnit, workUnit.getExtract().getNamespace());
} catch (UnsupportedDateTypeException e) {
throw new SchemaConversionException(e);
}
Schema recordSchema = recordConverter.schema();
if (workUnit
.getPropAsBoolean(CONVERTER_AVRO_NULLIFY_FIELDS_ENABLED, DEFAULT_CONVERTER_AVRO_NULLIFY_FIELDS_ENABLED)) {
return this.generateSchemaWithNullifiedField(workUnit, recordSchema);
}
return recordSchema;
}
@Override
public Iterable<GenericRecord> convertRecord(Schema outputSchema, JsonObject inputRecord, WorkUnitState workUnit)
throws DataConversionException {
Object record = recordConverter.convert(inputRecord);
if (record instanceof EmptyIterable) {
return (EmptyIterable<GenericRecord>) record;
}
return new SingleRecordIterable<>((GenericRecord) recordConverter.convert(inputRecord));
}
/**
* Generate new avro schema by nullifying fields that previously existed but not in the current schema.
*
* @param workUnitState work unit state
* @param currentAvroSchema current schema
* @return merged schema with previous fields nullified.
* @throws SchemaConversionException
*/
protected Schema generateSchemaWithNullifiedField(WorkUnitState workUnitState, Schema currentAvroSchema) {
Configuration conf = new Configuration();
for (String key : workUnitState.getPropertyNames()) {
conf.set(key, workUnitState.getProp(key));
}
// Get the original schema for merging.
Path originalSchemaPath = null;
if (workUnitState.contains(CONVERTER_AVRO_NULLIFY_FIELDS_ORIGINAL_SCHEMA_PATH)) {
originalSchemaPath = new Path(workUnitState.getProp(CONVERTER_AVRO_NULLIFY_FIELDS_ORIGINAL_SCHEMA_PATH));
} else {
// If the path to get the original schema is not specified in the configuration,
// adopt the best-try policy to search adjacent output folders.
LOG.info("Property " + CONVERTER_AVRO_NULLIFY_FIELDS_ORIGINAL_SCHEMA_PATH
+ "is not specified. Trying to get the orignal schema from previous avro files.");
originalSchemaPath = WriterUtils
.getDataPublisherFinalDir(workUnitState, workUnitState.getPropAsInt(ConfigurationKeys.FORK_BRANCHES_KEY, 1),
workUnitState.getPropAsInt(ConfigurationKeys.FORK_BRANCH_ID_KEY, 0)).getParent();
}
try {
Schema prevSchema = AvroUtils.getDirectorySchema(originalSchemaPath, conf, false);
Schema mergedSchema = AvroUtils.nullifyFieldsForSchemaMerge(prevSchema, currentAvroSchema);
return mergedSchema;
} catch (IOException ioe) {
LOG.error("Unable to nullify fields. Will retain the current avro schema.", ioe);
return currentAvroSchema;
}
}
}
| 2,911 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/avro/AvroToAvroCopyableConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.avro;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.gobblin.fork.CopyableGenericRecord;
import org.apache.gobblin.fork.CopyableSchema;
/**
* Implementation of {@link Converter} that takes in an Avro {@link Schema} and {@link GenericRecord} and returns a
* {@link org.apache.gobblin.fork.CopyableSchema} and a {@link org.apache.gobblin.fork.CopyableGenericRecord}.
*/
public class AvroToAvroCopyableConverter extends
Converter<Schema, CopyableSchema, GenericRecord, CopyableGenericRecord> {
/**
* Returns a {@link org.apache.gobblin.fork.CopyableSchema} wrapper around the given {@link Schema}.
* {@inheritDoc}
* @see org.apache.gobblin.converter.Converter#convertSchema(java.lang.Object, org.apache.gobblin.configuration.WorkUnitState)
*/
@Override
public CopyableSchema convertSchema(Schema inputSchema, WorkUnitState workUnit) throws SchemaConversionException {
return new CopyableSchema(inputSchema);
}
/**
* Returns a {@link org.apache.gobblin.fork.CopyableGenericRecord} wrapper around the given {@link GenericRecord}.
* {@inheritDoc}
* @see org.apache.gobblin.converter.Converter#convertRecord(java.lang.Object, java.lang.Object, org.apache.gobblin.configuration.WorkUnitState)
*/
@Override
public Iterable<CopyableGenericRecord> convertRecord(CopyableSchema outputSchema, GenericRecord inputRecord,
WorkUnitState workUnit) throws DataConversionException {
return new SingleRecordIterable<>(new CopyableGenericRecord(inputRecord));
}
}
| 2,912 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/avro/JsonElementConversionFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.avro;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TimeZone;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.util.Utf8;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.EmptyIterable;
import org.apache.gobblin.converter.json.JsonSchema;
import org.joda.time.DateTimeZone;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.linkedin.avroutil1.compatibility.AvroCompatibilityHelper;
import lombok.extern.java.Log;
import sun.util.calendar.ZoneInfo;
import static org.apache.gobblin.converter.avro.JsonElementConversionFactory.Type.*;
import static org.apache.gobblin.converter.json.JsonSchema.*;
/**
* <p>
* Creates a JsonElement to Avro converter for all supported data types.
* </p>
*
* @author kgoodhop
*
*/
public class JsonElementConversionFactory {
public enum Type {
DATE,
TIMESTAMP,
TIME,
FIXED,
STRING,
BYTES,
INT,
LONG,
FLOAT,
DOUBLE,
BOOLEAN,
ARRAY,
MAP,
ENUM,
RECORD,
NULL,
UNION;
private static List<Type> primitiveTypes =
Arrays.asList(NULL, BOOLEAN, INT, LONG, FLOAT, DOUBLE, BYTES, STRING, ENUM, FIXED);
public static boolean isPrimitive(Type type) {
return primitiveTypes.contains(type);
}
}
/**
* Use to create a converter for a single field from a schema.
* @param schemaNode
* @param namespace
* @param state
* @return {@link JsonElementConverter}
* @throws UnsupportedDateTypeException
*/
public static JsonElementConverter getConvertor(JsonSchema schemaNode, String namespace, WorkUnitState state)
throws UnsupportedDateTypeException {
Type type = schemaNode.getType();
DateTimeZone timeZone = getTimeZone(state.getProp(ConfigurationKeys.CONVERTER_AVRO_DATE_TIMEZONE, "UTC"));
switch (type) {
case DATE:
return new DateConverter(schemaNode,
state.getProp(ConfigurationKeys.CONVERTER_AVRO_DATE_FORMAT, "yyyy-MM-dd HH:mm:ss"), timeZone, state);
case TIMESTAMP:
return new DateConverter(schemaNode,
state.getProp(ConfigurationKeys.CONVERTER_AVRO_TIMESTAMP_FORMAT, "yyyy-MM-dd HH:mm:ss"), timeZone, state);
case TIME:
return new DateConverter(schemaNode, state.getProp(ConfigurationKeys.CONVERTER_AVRO_TIME_FORMAT, "HH:mm:ss"),
timeZone, state);
case FIXED:
throw new UnsupportedDateTypeException(type.toString() + " is unsupported");
case STRING:
return new StringConverter(schemaNode);
case BYTES:
return new BinaryConverter(schemaNode, state.getProp(ConfigurationKeys.CONVERTER_AVRO_BINARY_CHARSET, "UTF8"));
case INT:
return new IntConverter(schemaNode);
case LONG:
return new LongConverter(schemaNode);
case FLOAT:
return new FloatConverter(schemaNode);
case DOUBLE:
return new DoubleConverter(schemaNode);
case BOOLEAN:
return new BooleanConverter(schemaNode);
case ARRAY:
return new ArrayConverter(schemaNode, state, namespace);
case MAP:
return new MapConverter(schemaNode, state);
case ENUM:
return new EnumConverter(schemaNode, namespace);
case RECORD:
return new RecordConverter(schemaNode, state, namespace);
case NULL:
return new NullConverter(schemaNode);
case UNION:
return new UnionConverter(schemaNode, state);
default:
throw new UnsupportedDateTypeException(type.toString() + " is unsupported");
}
}
/**
* Backward Compatible form of {@link JsonElementConverter#getConvertor(JsonSchema, String, WorkUnitState)}
* @param fieldName
* @param fieldType
* @param schemaNode
* @param state
* @param nullable
* @return
* @throws UnsupportedDateTypeException
*/
public static JsonElementConverter getConvertor(String fieldName, String fieldType, JsonObject schemaNode,
WorkUnitState state, boolean nullable)
throws UnsupportedDateTypeException {
if (!schemaNode.has(COLUMN_NAME_KEY)) {
schemaNode.addProperty(COLUMN_NAME_KEY, fieldName);
}
if (!schemaNode.has(DATA_TYPE_KEY)) {
schemaNode.add(DATA_TYPE_KEY, new JsonObject());
}
JsonObject dataType = schemaNode.get(DATA_TYPE_KEY).getAsJsonObject();
if (!dataType.has(TYPE_KEY)) {
dataType.addProperty(TYPE_KEY, fieldType);
}
if (!schemaNode.has(IS_NULLABLE_KEY)) {
schemaNode.addProperty(IS_NULLABLE_KEY, nullable);
}
JsonSchema schema = new JsonSchema(schemaNode);
return getConvertor(schema, null, state);
}
private static DateTimeZone getTimeZone(String id) {
DateTimeZone zone;
try {
zone = DateTimeZone.forID(id);
} catch (IllegalArgumentException e) {
TimeZone timeZone = ZoneInfo.getTimeZone(id);
//throw error if unrecognized zone
if (timeZone == null) {
throw new IllegalArgumentException("TimeZone " + id + " not recognized");
}
zone = DateTimeZone.forTimeZone(timeZone);
}
return zone;
}
/**
* Converts a JsonElement into a supported AvroType
* @author kgoodhop
*
*/
public static abstract class JsonElementConverter {
private final JsonSchema jsonSchema;
public JsonElementConverter(JsonSchema jsonSchema) {
this.jsonSchema = jsonSchema;
}
public JsonElementConverter(String fieldName, boolean nullable, String sourceType) {
JsonSchema jsonSchema = buildBaseSchema(Type.valueOf(sourceType.toUpperCase()));
jsonSchema.setColumnName(fieldName);
jsonSchema.setNullable(nullable);
this.jsonSchema = jsonSchema;
}
/**
* Field name from schema
* @return
*/
public String getName() {
return this.jsonSchema.getColumnName();
}
/**
* is field nullable
* @return
*/
public boolean isNullable() {
return this.jsonSchema.isNullable();
}
/**
* avro schema for the converted type
* @return
*/
public Schema getSchema() {
if (isNullable()) {
List<Schema> list = new ArrayList<>();
list.add(Schema.create(Schema.Type.NULL));
list.add(schema());
return Schema.createUnion(list);
}
return schema();
}
protected Schema schema() {
Schema schema = Schema.create(getTargetType());
schema.addProp("source.type", this.jsonSchema.getType().toString().toLowerCase());
return buildUnionIfNullable(schema);
}
/**
* Convert value
* @param value is JsonNull will return null if allowed or exception if not allowed
* @return Avro safe type
*/
public Object convert(JsonElement value) {
if (value.isJsonNull()) {
if (isNullable()) {
return null;
}
throw new RuntimeException("Field: " + getName() + " is not nullable and contains a null value");
}
return convertField(value);
}
/**
* Convert JsonElement to Avro type
* @param value
* @return
*/
abstract Object convertField(JsonElement value);
/**
* Avro data type after conversion
* @return
*/
public abstract Schema.Type getTargetType();
protected static String buildNamespace(String namespace, String name) {
if (namespace == null || namespace.isEmpty()) {
return null;
}
if (name == null || name.isEmpty()) {
return null;
}
return namespace.trim() + "." + name.trim();
}
protected Schema buildUnionIfNullable(Schema schema) {
if (this.isNullable()) {
return Schema.createUnion(Arrays.asList(Schema.create(Schema.Type.NULL), schema));
}
return schema;
}
}
public static class StringConverter extends JsonElementConverter {
public StringConverter(JsonSchema schema) {
super(schema);
}
@Override
Object convertField(JsonElement value) {
return new Utf8(value.getAsString());
}
@Override
public org.apache.avro.Schema.Type getTargetType() {
return Schema.Type.STRING;
}
}
public static class IntConverter extends JsonElementConverter {
public IntConverter(JsonSchema schema) {
super(schema);
}
@Override
Object convertField(JsonElement value) {
return value.getAsInt();
}
@Override
public org.apache.avro.Schema.Type getTargetType() {
return Schema.Type.INT;
}
}
public static class LongConverter extends JsonElementConverter {
public LongConverter(JsonSchema schema) {
super(schema);
}
@Override
Object convertField(JsonElement value) {
return value.getAsLong();
}
@Override
public org.apache.avro.Schema.Type getTargetType() {
return Schema.Type.LONG;
}
}
public static class DoubleConverter extends JsonElementConverter {
public DoubleConverter(JsonSchema schema) {
super(schema);
}
@Override
Object convertField(JsonElement value) {
return value.getAsDouble();
}
@Override
public org.apache.avro.Schema.Type getTargetType() {
return Schema.Type.DOUBLE;
}
}
public static class FloatConverter extends JsonElementConverter {
public FloatConverter(JsonSchema schema) {
super(schema);
}
@Override
Object convertField(JsonElement value) {
return value.getAsFloat();
}
@Override
public org.apache.avro.Schema.Type getTargetType() {
return Schema.Type.FLOAT;
}
}
public static class BooleanConverter extends JsonElementConverter {
public BooleanConverter(JsonSchema schema) {
super(schema);
}
@Override
Object convertField(JsonElement value) {
return value.getAsBoolean();
}
@Override
public org.apache.avro.Schema.Type getTargetType() {
return Schema.Type.BOOLEAN;
}
}
public static class DateConverter extends JsonElementConverter {
private String inputPatterns;
private DateTimeZone timeZone;
private WorkUnitState state;
public DateConverter(JsonSchema schema, String pattern, DateTimeZone zone, WorkUnitState state) {
super(schema);
this.inputPatterns = pattern;
this.timeZone = zone;
this.state = state;
}
@Override
Object convertField(JsonElement value) {
List<String> patterns = Arrays.asList(this.inputPatterns.split(","));
int patternFailCount = 0;
Object formattedDate = null;
for (String pattern : patterns) {
DateTimeFormatter dtf = DateTimeFormat.forPattern(pattern).withZone(this.timeZone);
try {
formattedDate = dtf.parseDateTime(value.getAsString()).withZone(DateTimeZone.forID("UTC")).getMillis();
if (Boolean.valueOf(this.state.getProp(ConfigurationKeys.CONVERTER_IS_EPOCH_TIME_IN_SECONDS))) {
formattedDate = (Long) formattedDate / 1000;
}
break;
} catch (Exception e) {
patternFailCount++;
}
}
if (patternFailCount == patterns.size()) {
throw new RuntimeException("Failed to parse the date");
}
return formattedDate;
}
@Override
public org.apache.avro.Schema.Type getTargetType() {
return Schema.Type.LONG;
}
}
public static class BinaryConverter extends JsonElementConverter {
private String charSet;
public BinaryConverter(JsonSchema schema, String charSet) {
super(schema);
this.charSet = charSet;
}
@Override
Object convertField(JsonElement value) {
try {
return ByteBuffer.wrap(value.getAsString().getBytes(this.charSet));
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e);
}
}
@Override
public org.apache.avro.Schema.Type getTargetType() {
return Schema.Type.BYTES;
}
}
public static abstract class ComplexConverter extends JsonElementConverter {
private JsonElementConverter elementConverter;
public ComplexConverter(JsonSchema schema) {
super(schema);
}
public ComplexConverter(String fieldName, boolean nullable, String sourceType) {
super(fieldName, nullable, sourceType);
}
protected void setElementConverter(JsonElementConverter elementConverter) {
this.elementConverter = elementConverter;
}
public JsonElementConverter getElementConverter() {
return this.elementConverter;
}
protected void processNestedItems(JsonSchema schema, WorkUnitState state, String namespace)
throws UnsupportedDateTypeException {
JsonSchema nestedItem = null;
if (schema.isType(ARRAY)) {
nestedItem = schema.getItemsWithinDataType();
}
if (schema.isType(MAP)) {
nestedItem = schema.getValuesWithinDataType();
}
this.setElementConverter(getConvertor(nestedItem, namespace, state));
}
}
public static class ArrayConverter extends ComplexConverter {
public ArrayConverter(JsonSchema schema, WorkUnitState state, String namespace)
throws UnsupportedDateTypeException {
super(schema);
processNestedItems(schema, state, namespace);
}
@Override
Object convertField(JsonElement value) {
if (this.isNullable() && value.isJsonNull()) {
return null;
}
List<Object> list = new ArrayList<>();
for (JsonElement elem : (JsonArray) value) {
list.add(getElementConverter().convert(elem));
}
return new GenericData.Array<>(arraySchema(), list);
}
private Schema arraySchema() {
Schema schema = Schema.createArray(getElementConverter().schema());
schema.addProp(SOURCE_TYPE, ARRAY.toString().toLowerCase());
return schema;
}
@Override
public org.apache.avro.Schema.Type getTargetType() {
return Schema.Type.ARRAY;
}
@Override
public Schema schema() {
return buildUnionIfNullable(arraySchema());
}
}
public static class MapConverter extends ComplexConverter {
public MapConverter(JsonSchema schema, WorkUnitState state)
throws UnsupportedDateTypeException {
super(schema);
processNestedItems(schema, state, null);
}
@Override
Object convertField(JsonElement value) {
Map<String, Object> map = new HashMap<>();
for (Map.Entry<String, JsonElement> entry : ((JsonObject) value).entrySet()) {
map.put(entry.getKey(), getElementConverter().convert(entry.getValue()));
}
return map;
}
@Override
public org.apache.avro.Schema.Type getTargetType() {
return Schema.Type.MAP;
}
@Override
public Schema schema() {
Schema schema = Schema.createMap(getElementConverter().schema());
schema.addProp(SOURCE_TYPE, MAP.toString().toLowerCase());
return buildUnionIfNullable(schema);
}
}
@Log
public static class RecordConverter extends ComplexConverter {
private static final Logger LOG = LoggerFactory.getLogger(RecordConverter.class);
private HashMap<String, JsonElementConverter> converters = new HashMap<>();
private Schema _schema;
private long numFailedConversion = 0;
private State workUnit;
public RecordConverter(JsonSchema schema, WorkUnitState state, String namespace)
throws UnsupportedDateTypeException {
super(schema);
workUnit = state;
String name = schema.isRoot() ? schema.getColumnName() : schema.getName();
_schema = buildRecordSchema(schema.getValuesWithinDataType(), state, name, namespace);
}
private Schema buildRecordSchema(JsonSchema schema, WorkUnitState workUnit, String name, String namespace) {
List<Schema.Field> fields = new ArrayList<>();
for (int i = 0; i < schema.fieldsCount(); i++) {
JsonSchema map = schema.getFieldSchemaAt(i);
String childNamespace = buildNamespace(namespace, name);
JsonElementConverter converter;
String sourceType;
Schema fldSchema;
try {
sourceType = map.isType(UNION) ? UNION.toString().toLowerCase() : map.getType().toString().toLowerCase();
converter = getConvertor(map, childNamespace, workUnit);
this.converters.put(map.getColumnName(), converter);
fldSchema = converter.schema();
} catch (UnsupportedDateTypeException e) {
throw new UnsupportedOperationException(e);
}
// [Avro 1.9.2 upgrade] No need to pass JsonNodeFactory.instance.nullNode() if map is nullable.
// AvroCompatibilityHelper will take care of this.
Schema.Field fld = AvroCompatibilityHelper.createSchemaField(map.getColumnName(), fldSchema, map.getComment(),
null);
fld.addProp(SOURCE_TYPE, sourceType);
fields.add(fld);
}
Schema avroSchema = Schema.createRecord(name.isEmpty() ? null : name, "", namespace, false);
avroSchema.setFields(fields);
return avroSchema;
}
@Override
Object convertField(JsonElement value) {
GenericRecord avroRecord = new GenericData.Record(_schema);
long maxFailedConversions = this.workUnit.getPropAsLong(ConfigurationKeys.CONVERTER_AVRO_MAX_CONVERSION_FAILURES,
ConfigurationKeys.DEFAULT_CONVERTER_AVRO_MAX_CONVERSION_FAILURES);
for (Map.Entry<String, JsonElement> entry : ((JsonObject) value).entrySet()) {
try {
avroRecord.put(entry.getKey(), this.converters.get(entry.getKey()).convert(entry.getValue()));
} catch (Exception e) {
this.numFailedConversion++;
if (this.numFailedConversion < maxFailedConversions) {
LOG.error("Dropping record " + value + " because it cannot be converted to Avro", e);
return new EmptyIterable<>();
}
throw new RuntimeException(
"Unable to convert field:" + entry.getKey() + " for value:" + entry.getValue() + " for record: " + value,
e);
}
}
return avroRecord;
}
@Override
public org.apache.avro.Schema.Type getTargetType() {
return Schema.Type.RECORD;
}
@Override
public Schema schema() {
Schema schema = _schema;
schema.addProp(SOURCE_TYPE, RECORD.toString().toLowerCase());
return buildUnionIfNullable(schema);
}
}
public static class EnumConverter extends JsonElementConverter {
String enumName;
String namespace;
List<String> enumSet = new ArrayList<>();
Schema schema;
public EnumConverter(JsonSchema schema, String namespace) {
super(schema);
JsonObject dataType = schema.getDataType();
for (JsonElement elem : dataType.get(ENUM_SYMBOLS_KEY).getAsJsonArray()) {
this.enumSet.add(elem.getAsString());
}
String enumName = schema.getName();
this.enumName = enumName.isEmpty() ? null : enumName;
this.namespace = namespace;
}
@Override
Object convertField(JsonElement value) {
return new GenericData.EnumSymbol(this.schema, value.getAsString());
}
@Override
public org.apache.avro.Schema.Type getTargetType() {
return Schema.Type.ENUM;
}
@Override
public Schema schema() {
this.schema = Schema.createEnum(this.enumName, "", namespace, this.enumSet);
this.schema.addProp(SOURCE_TYPE, ENUM.toString().toLowerCase());
return buildUnionIfNullable(this.schema);
}
}
public static class NullConverter extends JsonElementConverter {
public NullConverter(JsonSchema schema) {
super(schema);
}
@Override
Object convertField(JsonElement value) {
return value.getAsJsonNull();
}
@Override
public org.apache.avro.Schema.Type getTargetType() {
return Schema.Type.NULL;
}
}
public static class UnionConverter extends JsonElementConverter {
private final Schema firstSchema;
private final Schema secondSchema;
private final JsonElementConverter firstConverter;
private final JsonElementConverter secondConverter;
public UnionConverter(JsonSchema schemaNode, WorkUnitState state) {
super(schemaNode);
List<JsonSchema> types = schemaNode.getDataTypes();
firstConverter = getConverter(types.get(0), state);
secondConverter = getConverter(types.get(1), state);
firstSchema = firstConverter.schema();
secondSchema = secondConverter.schema();
}
private JsonElementConverter getConverter(JsonSchema schemaElement, WorkUnitState state) {
try {
return JsonElementConversionFactory.getConvertor(schemaElement, null, state);
} catch (UnsupportedDateTypeException e) {
throw new UnsupportedOperationException(e);
}
}
@Override
Object convertField(JsonElement value) {
try {
return firstConverter.convert(value);
} catch (Exception e) {
return secondConverter.convert(value);
}
}
@Override
public Schema.Type getTargetType() {
return Schema.Type.UNION;
}
@Override
protected Schema schema() {
return Schema.createUnion(Arrays.asList(firstSchema, secondSchema));
}
}
}
| 2,913 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/avro/BytesToAvroConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.avro;
import java.io.IOException;
import java.util.Collections;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.BinaryDecoder;
import org.apache.avro.io.DecoderFactory;
import com.google.common.base.Preconditions;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
/**
* Converter that can take a single binary encoded Avro record and convert it to a
* GenericRecord object.
*/
public class BytesToAvroConverter extends Converter<String, Schema, byte[], GenericRecord> {
private Schema latestSchema = null;
private GenericDatumReader<GenericRecord> recordReader = null;
private ThreadLocal<BinaryDecoder> decoderCache = new ThreadLocal<BinaryDecoder>() {
@Override
protected BinaryDecoder initialValue() {
return null;
}
};
@Override
public Schema convertSchema(String inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
latestSchema = new Schema.Parser().parse(inputSchema);
recordReader = new GenericDatumReader<>(latestSchema);
return latestSchema;
}
@Override
public Iterable<GenericRecord> convertRecord(Schema outputSchema, byte[] inputRecord, WorkUnitState workUnit)
throws DataConversionException {
Preconditions.checkNotNull(recordReader, "Must have called convertSchema!");
BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(inputRecord, decoderCache.get());
try {
GenericRecord parsedRecord = recordReader.read(null, decoder);
decoderCache.set(decoder);
return Collections.singleton(parsedRecord);
} catch (IOException e) {
throw new DataConversionException("Error parsing record", e);
}
}
}
| 2,914 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/avro/AvroRecordToAvroWritableConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.avro;
import java.rmi.server.UID;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.hadoop.hive.serde2.avro.AvroGenericRecordWritable;
import com.google.common.collect.Lists;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
/**
* A {@link Converter} that takes an Avro {@link GenericRecord} and converts it to {@link AvroGenericRecordWritable}.
* This class is useful for integration with the {@link org.apache.gobblin.converter.serde.HiveSerDeConverter}, which expects input
* records to be of type {@link org.apache.hadoop.io.Writable}.
*/
public class AvroRecordToAvroWritableConverter
extends Converter<Schema, Schema, GenericRecord, AvroGenericRecordWritable> {
private final UID uid = new UID();
@Override
public Schema convertSchema(Schema inputSchema, WorkUnitState workUnit) throws SchemaConversionException {
return inputSchema;
}
@Override
public Iterable<AvroGenericRecordWritable> convertRecord(Schema outputSchema, GenericRecord inputRecord,
WorkUnitState workUnit) throws DataConversionException {
AvroGenericRecordWritable avroWritable = new AvroGenericRecordWritable();
avroWritable.setRecord(inputRecord);
avroWritable.setFileSchema(outputSchema);
avroWritable.setRecordReaderID(this.uid);
return Lists.newArrayList(avroWritable);
}
}
| 2,915 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/avro/FlattenNestedKeyConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.avro;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.avro.Schema.Field;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import com.linkedin.avroutil1.compatibility.AvroCompatibilityHelper;
import com.google.common.base.CaseFormat;
import com.google.common.base.Optional;
import com.google.common.collect.Maps;
import com.typesafe.config.Config;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.gobblin.util.AvroUtils;
import org.apache.gobblin.util.ConfigUtils;
import static org.apache.gobblin.util.AvroUtils.FIELD_LOCATION_DELIMITER;
/**
* Flatten a nested key and create a camel-cased name of a field which has the same value
*
* <p>
* Given configuration:
* <code>FlattenNestedKeyConverter.fieldsToFlatten = "address,address.city"</code>.
* A {@link FlattenNestedKeyConverter} will only process <code>"address.city"</code>. It makes
* a copy of the {@link Field} with a new name <code>"addressCity"</code> and adds it to the
* top level fields of the output schema. The value of field <code>"addressCity"</code> is equal
* to the one referred by <code>"address.city"</code>
* </p>
*/
public class FlattenNestedKeyConverter extends Converter<Schema, Schema, GenericRecord, GenericRecord> {
public static final String FIELDS_TO_FLATTEN = "fieldsToFlatten";
// A map from new field name to the nested key
private Map<String, String> fieldNameMap = Maps.newHashMap();
@Override
public Schema convertSchema(Schema inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
// Clear previous state
fieldNameMap.clear();
Config config = ConfigUtils.propertiesToConfig(workUnit.getProperties()).getConfig(getClass().getSimpleName());
List<String> nestedKeys = ConfigUtils.getStringList(config, FIELDS_TO_FLATTEN);
// No keys need flatten
if (nestedKeys == null || nestedKeys.size() == 0) {
return inputSchema;
}
List<Field> fields = new ArrayList<>();
// Clone the existing fields
for (Field field : inputSchema.getFields()) {
fields.add(AvroCompatibilityHelper.createSchemaField(field.name(), field.schema(), field.doc(),
AvroUtils.getCompatibleDefaultValue(field), field.order()));
}
// Convert each of nested keys into a top level field
for (String key : nestedKeys) {
if (!key.contains(FIELD_LOCATION_DELIMITER)) {
continue;
}
String nestedKey = key.trim();
// Create camel-cased name
String hyphenizedKey = nestedKey.replace(FIELD_LOCATION_DELIMITER, "-");
String name = CaseFormat.LOWER_HYPHEN.to(CaseFormat.LOWER_CAMEL, hyphenizedKey);
if (fieldNameMap.containsKey(name)) {
// Duplicate
continue;
}
fieldNameMap.put(name, nestedKey);
// Find the field
Optional<Field> optional = AvroUtils.getField(inputSchema, nestedKey);
if (!optional.isPresent()) {
throw new SchemaConversionException("Unable to get field with location: " + nestedKey);
}
Field field = optional.get();
// Make a copy under a new name
Field copy = AvroCompatibilityHelper.createSchemaField(name, field.schema(), field.doc(),
AvroUtils.getCompatibleDefaultValue(field), field.order());
fields.add(copy);
}
Schema outputSchema = Schema
.createRecord(inputSchema.getName(), inputSchema.getDoc(), inputSchema.getNamespace(), inputSchema.isError());
outputSchema.setFields(fields);
return outputSchema;
}
@Override
public Iterable<GenericRecord> convertRecord(Schema outputSchema, GenericRecord inputRecord, WorkUnitState workUnit)
throws DataConversionException {
// No fields need flatten
if (fieldNameMap.size() == 0) {
return new SingleRecordIterable<>(inputRecord);
}
GenericRecord outputRecord = new GenericData.Record(outputSchema);
for (Field field : outputSchema.getFields()) {
String fieldName = field.name();
if (fieldNameMap.containsKey(fieldName)) {
// Skip new field for now
continue;
}
outputRecord.put(fieldName, inputRecord.get(fieldName));
}
// Deal with new fields
for (Map.Entry<String, String> entry : fieldNameMap.entrySet()) {
Optional<Object> optional = AvroUtils.getFieldValue(inputRecord, entry.getValue());
if (!optional.isPresent()) {
throw new DataConversionException("Unable to get field value with location: " + entry.getValue());
}
outputRecord.put(entry.getKey(), optional.get());
}
return new SingleRecordIterable<>(outputRecord);
}
}
| 2,916 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/http/RestEntry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.http;
import com.google.common.base.Optional;
import lombok.Data;
/**
* Rest Entry that hold resource path and resource value.
*/
@Data
public class RestEntry<T> {
private final Optional<String> resourcePath;
private final T restEntryVal;
public RestEntry(String resourcePath, T restEntryVal) {
this.resourcePath = Optional.fromNullable(resourcePath);
this.restEntryVal = restEntryVal;
}
} | 2,917 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/http/AvroToRestJsonEntryConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.http;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.commons.lang3.StringUtils;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigParseOptions;
import com.typesafe.config.ConfigRenderOptions;
import com.typesafe.config.ConfigSyntax;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
/**
* Converts Avro to RestJsonEntry.
* This converter won't provide converted Schema mainly because:
* 1. The purpose of this converter is to convert DataRecord to fit JSON REST API writer. This converter is
* intended to be end of the converter chain and expect to be followed by JSON REST API writer.
* 2. JSON schema is still under development and there is no widely accepted JSON Schema.
*/
public class AvroToRestJsonEntryConverter extends Converter<Schema, Void, GenericRecord, RestEntry<JsonObject>> {
//Resource template ( e.g: /sobject/account/${account_id} )
static final String CONVERTER_AVRO_REST_ENTRY_RESOURCE_KEY = "converter.avro.rest.resource_key";
//JSON conversion template @see convertRecord
static final String CONVERTER_AVRO_REST_JSON_ENTRY_TEMPLATE = "converter.avro.rest.json_hocon_template";
private final JsonParser parser = new JsonParser();
@Override
public Void convertSchema(Schema inputSchema, WorkUnitState workUnit) throws SchemaConversionException {
return null;
}
/**
* Use resource key(Optional) and rest json entry as a template and fill in template using Avro as a reference.
* e.g:
* Rest JSON entry HOCON template:
* AccountId=${sf_account_id},Member_Id__c=${member_id}
* Avro:
* {"sf_account_id":{"string":"0016000000UiCYHAA3"},"member_id":{"long":296458833}}
*
* Converted Json:
* {"AccountId":"0016000000UiCYHAA3","Member_Id__c":296458833}
*
* As it's template based approach, it can produce nested JSON structure even Avro is flat (or vice versa).
*
* e.g:
* Rest resource template:
* /sobject/account/memberId/${member_id}
* Avro:
* {"sf_account_id":{"string":"0016000000UiCYHAA3"},"member_id":{"long":296458833}}
* Converted resource:
* /sobject/account/memberId/296458833
*
* Converted resource will be used to form end point.
* http://www.server.com:9090/sobject/account/memberId/296458833
*
* {@inheritDoc}
* @see org.apache.gobblin.converter.Converter#convertRecord(java.lang.Object, java.lang.Object, org.apache.gobblin.configuration.WorkUnitState)
*/
@Override
public Iterable<RestEntry<JsonObject>> convertRecord(Void outputSchema, GenericRecord inputRecord, WorkUnitState workUnit)
throws DataConversionException {
Config srcConfig = ConfigFactory.parseString(inputRecord.toString(),
ConfigParseOptions.defaults().setSyntax(ConfigSyntax.JSON));
String resourceKey = workUnit.getProp(CONVERTER_AVRO_REST_ENTRY_RESOURCE_KEY, "");
if(!StringUtils.isEmpty(resourceKey)) {
final String dummyKey = "DUMMY";
Config tmpConfig = ConfigFactory.parseString(dummyKey + "=" + resourceKey).resolveWith(srcConfig);
resourceKey = tmpConfig.getString(dummyKey);
}
String hoconInput = workUnit.getProp(CONVERTER_AVRO_REST_JSON_ENTRY_TEMPLATE);
if(StringUtils.isEmpty(hoconInput)) {
return new SingleRecordIterable<>(new RestEntry<>(resourceKey, parser.parse(inputRecord.toString()).getAsJsonObject()));
}
Config destConfig = ConfigFactory.parseString(hoconInput).resolveWith(srcConfig);
JsonObject json = parser.parse(destConfig.root().render(ConfigRenderOptions.concise())).getAsJsonObject();
return new SingleRecordIterable<>(new RestEntry<>(resourceKey, json));
}
} | 2,918 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/csv/CsvToJsonConverterV2.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.csv;
import java.util.Iterator;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonNull;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import com.google.gson.JsonPrimitive;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
/**
* CsvToJsonConverterV2 accepts already deserialized (parsed) CSV row, String[], where you can use
* @see CsvFileDownloader that conforms with RFC 4180 by leveraging Open CSV.
*
* Converts CSV to JSON. CSV schema is represented by the form of JsonArray same interface being used by CsvToJonConverter.
* Each CSV record is represented by a array of String.
*
* Example of CSV schema:
* [
{
"columnName": "Day",
"comment": "",
"isNullable": "true",
"dataType": {
"type": "string"
}
},
{
"columnName": "Pageviews",
"comment": "",
"isNullable": "true",
"dataType": {
"type": "long"
}
}
]
*/
public class CsvToJsonConverterV2 extends Converter<String, JsonArray, String[], JsonObject> {
private static final Logger LOG = LoggerFactory.getLogger(CsvToJsonConverterV2.class);
private static final JsonParser JSON_PARSER = new JsonParser();
private static final String COLUMN_NAME_KEY = "columnName";
private static final String DATA_TYPE_KEY = "dataType";
private static final String TYPE = "type";
private static final String JSON_NULL_VAL = "null";
public static final String CUSTOM_ORDERING = "converter.csv_to_json.custom_order";
private List<String> customOrder;
@Override
public Converter<String, JsonArray, String[], JsonObject> init(WorkUnitState workUnit) {
super.init(workUnit);
customOrder = workUnit.getPropAsList(CUSTOM_ORDERING, "");
if (!customOrder.isEmpty()) {
LOG.info("Will use custom order to generate JSON from CSV: " + customOrder);
}
return this;
}
@Override
public JsonArray convertSchema(String inputSchema, WorkUnitState workUnit) throws SchemaConversionException {
Preconditions.checkNotNull(inputSchema, "inputSchema is required.");
return JSON_PARSER.parse(inputSchema).getAsJsonArray();
}
/**
* Converts CSV (array of String) to JSON.
* By default, fields between CSV and JSON are mapped in order bases and it validates if both input and output has same number of fields.
*
* Customization can be achieved by adding custom order where user can define list of indices of CSV fields correspond to output schema user defined.
* Use case of customization (custom order):
* In custom order, there are three input parameters generates output.
* 1. Output schema: This is exact copy of input schema which is passed by user through job property.
* 2. Custom order indices: This is indices passed by user through job property.
* 3. Input record: This is CSV row, represented by array of String.
* User usually does not have control on input record, and custom order is needed when output schema is not 1:1 match with input record.
* Use cases:
* 1. The order of input record(CSV in this case) does not match with output schema.
* 2. Number of columns in output schema is greater or lesser than number of columns in input records.
*
* e.g:
* 1. Different order
* - Input record (CSV)
* "2029", "94043", "Mountain view"
*
* - Output schema (derived from input schema):
* [{"columnName":"street_number","dataType":{"type":"string"}},{"columnName":"city","dataType":{"type":"string"}},{"columnName":"zip_code","dataType":{"type":"string"}}]
*
* - Custom order indices
* 0,2,1
*
* - Output JSON (Key value is derived from output schema)
* {"street_number" : "2029", "city" : "Mountain view" , "zip_code" : "94043" }
*
* 2. # of columns in input record(CSV) > # of columns in output schema
* - Input record (CSV)
* "2029", "Mountain view" , "USA", "94043"
*
* - Custom order indices
* 0,1,3
*
* - Output schema (derived from input schema):
* [{"columnName":"street_number","dataType":{"type":"string"}},{"columnName":"city","dataType":{"type":"string"}},{"columnName":"zip_code","dataType":{"type":"string"}}]
*
* - Output JSON (Key value is derived from output schema)
* {"street_number" : "2029", "city" : "Mountain view" , "zip_code" : "94043" }
*
* 3. # of columns in input record(CSV) < # of columns in output schema
* - Input record (CSV)
* "2029", "Mountain view", "94043"
*
* - Custom order (adding null when negative index is defined)
* 0,1,-1,2
*
* - Output schema (derived from input schema):
* [{"columnName":"street_number","dataType":{"type":"string"}},{"columnName":"city","dataType":{"type":"string"}},
* {"columnName":"Country","isNullable":"true","dataType":{"type":"string"}},{"columnName":"zip_code","dataType":{"type":"string"}}]
*
* - Output JSON
* {"street_number" : "2029", "city" : "Mountain view" , "Country" : null, "zip_code" : "94043" }
*
* {@inheritDoc}
* @see org.apache.gobblin.converter.Converter#convertRecord(java.lang.Object, java.lang.Object, org.apache.gobblin.configuration.WorkUnitState)
*/
@Override
public Iterable<JsonObject> convertRecord(JsonArray outputSchema, String[] inputRecord, WorkUnitState workUnit)
throws DataConversionException {
JsonObject outputRecord = null;
if (!customOrder.isEmpty()) {
outputRecord = createOutput(outputSchema, inputRecord, customOrder);
} else {
outputRecord = createOutput(outputSchema, inputRecord);
}
return new SingleRecordIterable<JsonObject>(outputRecord);
}
@VisibleForTesting
JsonObject createOutput(JsonArray outputSchema, String[] inputRecord) {
Preconditions.checkArgument(outputSchema.size() == inputRecord.length, "# of columns mismatch. Input "
+ inputRecord.length + " , output: " + outputSchema.size());
JsonObject outputRecord = new JsonObject();
for (int i = 0; i < outputSchema.size(); i++) {
JsonObject field = outputSchema.get(i).getAsJsonObject();
String key = field.get(COLUMN_NAME_KEY).getAsString();
if (StringUtils.isEmpty(inputRecord[i]) || JSON_NULL_VAL.equalsIgnoreCase(inputRecord[i])) {
outputRecord.add(key, JsonNull.INSTANCE);
} else {
outputRecord.add(key, convertValue(inputRecord[i], field.getAsJsonObject(DATA_TYPE_KEY)));
}
}
return outputRecord;
}
@VisibleForTesting
JsonObject createOutput(JsonArray outputSchema, String[] inputRecord, List<String> customOrder) {
Preconditions.checkArgument(outputSchema.size() == customOrder.size(), "# of columns mismatch. Input "
+ outputSchema.size() + " , output: " + customOrder.size());
JsonObject outputRecord = new JsonObject();
Iterator<JsonElement> outputSchemaIterator = outputSchema.iterator();
Iterator<String> customOrderIterator = customOrder.iterator();
while(outputSchemaIterator.hasNext() && customOrderIterator.hasNext()) {
JsonObject field = outputSchemaIterator.next().getAsJsonObject();
String key = field.get(COLUMN_NAME_KEY).getAsString();
int i = Integer.parseInt(customOrderIterator.next());
Preconditions.checkArgument(i < inputRecord.length, "Index out of bound detected in customer order. Index: " + i + " , # of CSV columns: " + inputRecord.length);
if (i < 0 || null == inputRecord[i] || JSON_NULL_VAL.equalsIgnoreCase(inputRecord[i])) {
outputRecord.add(key, JsonNull.INSTANCE);
continue;
}
outputRecord.add(key, convertValue(inputRecord[i], field.getAsJsonObject(DATA_TYPE_KEY)));
}
return outputRecord;
}
/**
* Convert string value to the expected type
*/
private JsonElement convertValue(String value, JsonObject dataType) {
if (dataType == null || !dataType.has(TYPE)) {
return new JsonPrimitive(value);
}
String type = dataType.get(TYPE).getAsString().toUpperCase();
ValueType valueType = ValueType.valueOf(type);
return valueType.convert(value);
}
/**
* An enum of type conversions from string value
*/
private enum ValueType {
INT {
@Override
JsonElement convert(String value) {
return new JsonPrimitive(Double.valueOf(value).intValue());
}
},
LONG {
@Override
JsonElement convert(String value) {
return new JsonPrimitive(Double.valueOf(value).longValue());
}
},
FLOAT {
@Override
JsonElement convert(String value) {
return new JsonPrimitive(Double.valueOf(value).floatValue());
}
},
DOUBLE {
@Override
JsonElement convert(String value) {
return new JsonPrimitive(Double.valueOf(value));
}
},
BOOLEAN {
@Override
JsonElement convert(String value) {
return new JsonPrimitive(Boolean.valueOf(value));
}
},
STRING {
@Override
JsonElement convert(String value) {
return new JsonPrimitive(value);
}
},
TIMESTAMP {
@Override
JsonElement convert(String value) {
return new JsonPrimitive(value);
}
};
abstract JsonElement convert(String value);
}
}
| 2,919 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/csv/CsvToJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.csv;
import java.util.List;
import com.google.common.base.Strings;
import com.google.common.collect.Lists;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonNull;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.gobblin.source.extractor.utils.InputStreamCSVReader;
public class CsvToJsonConverter extends Converter<String, JsonArray, String, JsonObject> {
private static final String NULL = "null";
/**
* Take in an input schema of type string, the schema must be in JSON format
* @return a JsonArray representation of the schema
*/
@Override
public JsonArray convertSchema(String inputSchema, WorkUnitState workUnit) throws SchemaConversionException {
JsonParser jsonParser = new JsonParser();
JsonElement jsonSchema = jsonParser.parse(inputSchema);
return jsonSchema.getAsJsonArray();
}
/**
* Takes in a record with format String and splits the data based on SOURCE_SCHEMA_DELIMITER
* Uses the inputSchema and the split record to convert the record to a JsonObject
* @return a JsonObject representing the record
* @throws DataConversionException
*/
@Override
public Iterable<JsonObject> convertRecord(JsonArray outputSchema, String inputRecord, WorkUnitState workUnit)
throws DataConversionException {
try {
String strDelimiter = workUnit.getProp(ConfigurationKeys.CONVERTER_CSV_TO_JSON_DELIMITER);
if (Strings.isNullOrEmpty(strDelimiter)) {
throw new IllegalArgumentException("Delimiter cannot be empty");
}
InputStreamCSVReader reader = new InputStreamCSVReader(inputRecord, strDelimiter.charAt(0),
workUnit.getProp(ConfigurationKeys.CONVERTER_CSV_TO_JSON_ENCLOSEDCHAR, ConfigurationKeys.DEFAULT_CONVERTER_CSV_TO_JSON_ENCLOSEDCHAR).charAt(0));
List<String> recordSplit;
recordSplit = Lists.newArrayList(reader.splitRecord());
JsonObject outputRecord = new JsonObject();
for (int i = 0; i < outputSchema.size(); i++) {
if (i < recordSplit.size()) {
if (recordSplit.get(i) == null) {
outputRecord.add(outputSchema.get(i).getAsJsonObject().get("columnName").getAsString(), JsonNull.INSTANCE);
} else if (recordSplit.get(i).isEmpty() || recordSplit.get(i).toLowerCase().equals(NULL)) {
outputRecord.add(outputSchema.get(i).getAsJsonObject().get("columnName").getAsString(), JsonNull.INSTANCE);
} else {
outputRecord.addProperty(outputSchema.get(i).getAsJsonObject().get("columnName").getAsString(), recordSplit.get(i));
}
} else {
outputRecord.add(outputSchema.get(i).getAsJsonObject().get("columnName").getAsString(), JsonNull.INSTANCE);
}
}
return new SingleRecordIterable<>(outputRecord);
} catch (Exception e) {
throw new DataConversionException(e);
}
}
}
| 2,920 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/string/ObjectToStringConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.string;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
/**
* Implementation of {@link Converter} that converts a given {@link Object} to its {@link String} representation
*/
public class ObjectToStringConverter extends Converter<Object, Class<String>, Object, String> {
@Override
public Class<String> convertSchema(Object inputSchema, WorkUnitState workUnit) throws SchemaConversionException {
return String.class;
}
@Override
public Iterable<String> convertRecord(Class<String> outputSchema, Object inputRecord, WorkUnitState workUnit)
throws DataConversionException {
return new SingleRecordIterable<>(inputRecord.toString());
}
}
| 2,921 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/string/StringSplitterConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.string;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.util.ForkOperatorUtils;
/**
* Implementation of {@link Converter} that splits a string based on a delimiter specified by
* {@link ConfigurationKeys#CONVERTER_STRING_SPLITTER_DELIMITER}
*/
public class StringSplitterConverter extends Converter<Class<String>, Class<String>, String, String> {
private Splitter splitter;
@Override
public Converter<Class<String>, Class<String>, String, String> init(WorkUnitState workUnit) {
String stringSplitterDelimiterKey = ForkOperatorUtils.getPropertyNameForBranch(
workUnit, ConfigurationKeys.CONVERTER_STRING_SPLITTER_DELIMITER);
Preconditions.checkArgument(workUnit.contains(stringSplitterDelimiterKey), "Cannot use "
+ this.getClass().getName() + " with out specifying " + ConfigurationKeys.CONVERTER_STRING_SPLITTER_DELIMITER);
this.splitter =
Splitter.on(workUnit.getProp(stringSplitterDelimiterKey)).omitEmptyStrings();
return this;
}
@Override
public Class<String> convertSchema(Class<String> inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
return inputSchema;
}
@Override
public Iterable<String> convertRecord(Class<String> outputSchema, String inputRecord, WorkUnitState workUnit)
throws DataConversionException {
return this.splitter.split(inputRecord);
}
}
| 2,922 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/string/StringToBytesConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.string;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
/**
* Convert string to bytes using UTF8 encoding.
*/
public class StringToBytesConverter extends Converter<String, String, String, byte[]> {
@Override
public String convertSchema(String inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
return inputSchema;
}
@Override
public Iterable<byte[]> convertRecord(String outputSchema, String inputRecord, WorkUnitState workUnit)
throws DataConversionException {
return Lists.newArrayList(inputRecord.getBytes(Charsets.UTF_8));
}
}
| 2,923 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/string/StringSplitterToListConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.string;
import java.util.List;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.gobblin.util.ForkOperatorUtils;
/**
* Splits a {@link String} record into a record that is {@link List} of {@link String}, based on
* {@link ConfigurationKeys#CONVERTER_STRING_SPLITTER_DELIMITER}.
*/
public class StringSplitterToListConverter extends Converter<String, String, String, List<String>> {
private Splitter splitter;
private boolean shouldTrimResults;
@Override
public Converter<String, String, String, List<String>> init(WorkUnitState workUnit) {
String stringSplitterDelimiterKey =
ForkOperatorUtils.getPropertyNameForBranch(workUnit, ConfigurationKeys.CONVERTER_STRING_SPLITTER_DELIMITER);
Preconditions.checkArgument(workUnit.contains(stringSplitterDelimiterKey),
"Cannot use " + this.getClass().getName() + " with out specifying "
+ ConfigurationKeys.CONVERTER_STRING_SPLITTER_DELIMITER);
this.splitter = Splitter.on(workUnit.getProp(stringSplitterDelimiterKey));
this.shouldTrimResults = workUnit.getPropAsBoolean(ConfigurationKeys.CONVERTER_STRING_SPLITTER_SHOULD_TRIM_RESULTS,
ConfigurationKeys.DEFAULT_CONVERTER_STRING_SPLITTER_SHOULD_TRIM_RESULTS);
return this;
}
@Override
public String convertSchema(String inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
return inputSchema;
}
@Override
public Iterable<List<String>> convertRecord(String outputSchema, String inputRecord, WorkUnitState workUnit)
throws DataConversionException {
List<String> convertedRecord =
this.shouldTrimResults ? this.splitter.omitEmptyStrings().trimResults().splitToList(inputRecord)
: this.splitter.splitToList(inputRecord);
return new SingleRecordIterable<>(convertedRecord);
}
}
| 2,924 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/string/StringFilterConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.string;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import com.google.common.base.Optional;
import com.google.common.base.Strings;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.EmptyIterable;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.gobblin.util.ForkOperatorUtils;
/**
* Implementation of {@link Converter} which filters strings based on whether or not they match a regex specified by
* {@link ConfigurationKeys#CONVERTER_STRING_FILTER_PATTERN}
*/
public class StringFilterConverter extends Converter<Class<String>, Class<String>, String, String> {
private Pattern pattern;
private Optional<Matcher> matcher;
@Override
public Converter<Class<String>, Class<String>, String, String> init(WorkUnitState workUnit) {
this.pattern = Pattern.compile(Strings.nullToEmpty(workUnit.getProp(
ForkOperatorUtils.getPropertyNameForBranch(workUnit, ConfigurationKeys.CONVERTER_STRING_FILTER_PATTERN))));
this.matcher = Optional.absent();
return this;
}
@Override
public Class<String> convertSchema(Class<String> inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
return inputSchema;
}
@Override
public Iterable<String> convertRecord(Class<String> outputSchema, String inputRecord, WorkUnitState workUnit)
throws DataConversionException {
if (!this.matcher.isPresent()) {
this.matcher = Optional.of(this.pattern.matcher(inputRecord));
} else {
this.matcher.get().reset(inputRecord);
}
return this.matcher.get().matches() ? new SingleRecordIterable<>(inputRecord) : new EmptyIterable<String>();
}
}
| 2,925 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/string/TextToStringConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.string;
import org.apache.hadoop.io.Text;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
/**
* An implementation of {@link Converter} that converts input records of type {@link Text} to strings.
*
* @author Yinan Li
*/
@SuppressWarnings("unused")
public class TextToStringConverter extends Converter<Object, Object, Text, String> {
@Override
public Converter<Object, Object, Text, String> init(WorkUnitState workUnit) {
super.init(workUnit);
return this;
}
@Override
public Object convertSchema(Object inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
return inputSchema;
}
@Override
public Iterable<String> convertRecord(Object outputSchema, Text inputRecord, WorkUnitState workUnit)
throws DataConversionException {
return new SingleRecordIterable<String>(inputRecord.toString());
}
}
| 2,926 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/string/KafkaRecordToStringConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.string;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.gobblin.kafka.client.DecodeableKafkaRecord;
/**
* Implementation of {@link Converter} that converts a given {@link Object} to its {@link String} representation
*/
public class KafkaRecordToStringConverter extends Converter<Object, String, DecodeableKafkaRecord, String> {
@Override
public String convertSchema(Object inputSchema, WorkUnitState workUnit) throws SchemaConversionException {
return inputSchema.toString();
}
@Override
public Iterable<String> convertRecord(String outputSchema, DecodeableKafkaRecord inputRecord, WorkUnitState workUnit)
throws DataConversionException {
return new SingleRecordIterable<>(inputRecord.getValue().toString());
}
}
| 2,927 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/filter/AvroFieldsPickConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.filter;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.avro.Schema.Field;
import org.apache.avro.Schema.Type;
import org.apache.avro.generic.GenericRecord;
import com.linkedin.avroutil1.compatibility.AvroCompatibilityHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.AvroToAvroConverterBase;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.gobblin.util.AvroUtils;
/**
* Converts schema and data by choosing only selected fields provided by user.
*/
public class AvroFieldsPickConverter extends AvroToAvroConverterBase {
private static final Logger LOG = LoggerFactory.getLogger(AvroFieldsPickConverter.class);
private static final Splitter SPLITTER_ON_COMMA = Splitter.on(',').trimResults().omitEmptyStrings();
private static final Splitter SPLITTER_ON_DOT = Splitter.on('.').trimResults().omitEmptyStrings();
/**
* Convert the schema to contain only specified field. This will reuse AvroSchemaFieldRemover by listing fields not specified and remove it
* from the schema
* 1. Retrieve list of fields from property
* 2. Traverse schema and get list of fields to be removed
* 3. While traversing also confirm specified fields from property also exist
* 4. Convert schema by using AvroSchemaFieldRemover
*
* Each Avro Record type increments depth and from input depth is represented by '.'. Avro schema is always expected to start with Record type
* and first record type is depth 0 and won't be represented by '.'. As it's always expected to start with Record type, it's not necessary to disambiguate.
* After first record type, if it reaches another record type, the prefix of the field name will be
* "[Record name].".
*
* Example:
* {
"namespace": "example.avro",
"type": "record",
"name": "user",
"fields": [
{
"name": "name",
"type": "string"
},
{
"name": "favorite_number",
"type": [
"int",
"null"
]
},
{
"type": "record",
"name": "address",
"fields": [
{
"name": "city",
"type": "string"
}
]
}
]
}
* If user wants to only choose name and city, the input parameter should be "name,address.city". Note that it is not user.name as first record is depth zero.
* {@inheritDoc}
* @see org.apache.gobblin.converter.AvroToAvroConverterBase#convertSchema(org.apache.avro.Schema, org.apache.gobblin.configuration.WorkUnitState)
*/
@Override
public Schema convertSchema(Schema inputSchema, WorkUnitState workUnit) throws SchemaConversionException {
LOG.info("Converting schema " + inputSchema);
String fieldsStr = workUnit.getProp(ConfigurationKeys.CONVERTER_AVRO_FIELD_PICK_FIELDS);
Preconditions.checkNotNull(fieldsStr, ConfigurationKeys.CONVERTER_AVRO_FIELD_PICK_FIELDS
+ " is required for converter " + this.getClass().getSimpleName());
LOG.info("Converting schema to selected fields: " + fieldsStr);
try {
return createSchema(inputSchema, fieldsStr);
} catch (Exception e) {
throw new SchemaConversionException(e);
}
}
/**
* Creates Schema containing only specified fields.
*
* Traversing via either fully qualified names or input Schema is quite inefficient as it's hard to align each other.
* Also, as Schema's fields is immutable, all the fields need to be collected before updating field in Schema. Figuring out all
* required field in just input Schema and fully qualified names is also not efficient as well.
*
* This is where Trie comes into picture. Having fully qualified names in Trie means, it is aligned with input schema and also it can
* provides all children on specific prefix. This solves two problems mentioned above.
*
* 1. Based on fully qualified field name, build a Trie to present dependencies.
* 2. Traverse the Trie. If it's leaf, add field. If it's not a leaf, recurse with child schema.
*
* @param schema
* @param fieldsStr
* @return
*/
private static Schema createSchema(Schema schema, String fieldsStr) {
List<String> fields = SPLITTER_ON_COMMA.splitToList(fieldsStr);
TrieNode root = buildTrie(fields);
return createSchemaHelper(schema, root);
}
private static Schema createSchemaHelper(final Schema inputSchema, TrieNode node) {
List<Field> newFields = Lists.newArrayList();
for (TrieNode child : node.children.values()) {
Schema recordSchema = getActualRecord(inputSchema);
Field innerSrcField = recordSchema.getField(child.val);
Preconditions.checkNotNull(innerSrcField, child.val + " does not exist under " + recordSchema);
if (child.children.isEmpty()) { //Leaf
newFields.add(AvroCompatibilityHelper.createSchemaField(innerSrcField.name(), innerSrcField.schema(),
innerSrcField.doc(), AvroUtils.getCompatibleDefaultValue(innerSrcField)));
} else {
Schema innerSrcSchema = innerSrcField.schema();
Schema innerDestSchema = createSchemaHelper(innerSrcSchema, child); //Recurse of schema
Field innerDestField = AvroCompatibilityHelper.createSchemaField(innerSrcField.name(), innerDestSchema,
innerSrcField.doc(), AvroUtils.getCompatibleDefaultValue(innerSrcField));
newFields.add(innerDestField);
}
}
if (Type.UNION.equals(inputSchema.getType())) {
Preconditions.checkArgument(inputSchema.getTypes().size() <= 2,
"For union type in nested record, it should only have NULL and Record type");
Schema recordSchema = getActualRecord(inputSchema);
Schema newRecord = Schema.createRecord(recordSchema.getName(), recordSchema.getDoc(), recordSchema.getNamespace(),
recordSchema.isError());
newRecord.setFields(newFields);
if (inputSchema.getTypes().size() == 1) {
return Schema.createUnion(newRecord);
}
return Schema.createUnion(Lists.newArrayList(Schema.create(Type.NULL), newRecord));
}
Schema newRecord = Schema.createRecord(inputSchema.getName(), inputSchema.getDoc(), inputSchema.getNamespace(),
inputSchema.isError());
newRecord.setFields(newFields);
return newRecord;
}
/**
* For the schema that is a UNION type with NULL and Record type, it provides Records type.
* @param inputSchema
* @return
*/
private static Schema getActualRecord(Schema inputSchema) {
if (Type.RECORD.equals(inputSchema.getType())) {
return inputSchema;
}
Preconditions.checkArgument(Type.UNION.equals(inputSchema.getType()), "Nested schema is only support with either record or union type of null with record");
Preconditions.checkArgument(inputSchema.getTypes().size() <= 2,
"For union type in nested record, it should only have NULL and Record type");
for (Schema inner : inputSchema.getTypes()) {
if (Type.NULL.equals(inner.getType())) {
continue;
}
Preconditions.checkArgument(Type.RECORD.equals(inner.getType()), "For union type in nested record, it should only have NULL and Record type");
return inner;
}
throw new IllegalArgumentException(inputSchema + " is not supported.");
}
private static TrieNode buildTrie(List<String> fqns) {
TrieNode root = new TrieNode(null);
for (String fqn : fqns) {
root.add(fqn);
}
return root;
}
private static class TrieNode {
private String val;
private Map<String, TrieNode> children;
TrieNode(String val) {
this.val = val;
this.children = Maps.newLinkedHashMap();
}
void add(String fqn) {
addHelper(this, SPLITTER_ON_DOT.splitToList(fqn).iterator(), fqn);
}
void addHelper(TrieNode node, Iterator<String> fqnIterator, String fqn) {
if (!fqnIterator.hasNext()) {
return;
}
String val = fqnIterator.next();
TrieNode child = node.children.get(val);
if (child == null) {
child = new TrieNode(val);
node.children.put(val, child);
} else if (!fqnIterator.hasNext()) {
//Leaf but there's existing record
throw new IllegalArgumentException("Duplicate record detected: " + fqn);
}
addHelper(child, fqnIterator, fqn);
}
@Override
public String toString() {
return "[val: " + this.val + " , children: " + this.children.values() + " ]";
}
}
@Override
public Iterable<GenericRecord> convertRecordImpl(Schema outputSchema, GenericRecord inputRecord, WorkUnitState workUnit)
throws DataConversionException {
try {
return new SingleRecordIterable<>(AvroUtils.convertRecordSchema(inputRecord, outputSchema));
} catch (IOException e) {
throw new DataConversionException(e);
}
}
}
| 2,928 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/filter/AvroFilterConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.filter;
import org.apache.gobblin.converter.AvroToAvroConverterBase;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.EmptyIterable;
import org.apache.gobblin.util.AvroUtils;
/**
* Basic implementation of a filter converter for Avro data. It filters out Avro records based on a specified Avro
* field name, and its expected value. The converter only supports equality operations and only performs the comparison
* based on the string representation of the value.
*/
public class AvroFilterConverter extends AvroToAvroConverterBase {
private String fieldName;
private String fieldValue;
/**
* The config must specify {@link ConfigurationKeys#CONVERTER_FILTER_FIELD_NAME} to indicate which field to retrieve
* from the Avro record and {@link ConfigurationKeys#CONVERTER_FILTER_FIELD_VALUE} to indicate the expected value of
* the field.
* {@inheritDoc}
* @see org.apache.gobblin.converter.Converter#init(org.apache.gobblin.configuration.WorkUnitState)
*/
@Override
public Converter<Schema, Schema, GenericRecord, GenericRecord> init(WorkUnitState workUnit) {
Preconditions.checkArgument(workUnit.contains(ConfigurationKeys.CONVERTER_FILTER_FIELD_NAME),
"Missing required property converter.filter.field for the AvroFilterConverter class.");
Preconditions.checkArgument(workUnit.contains(ConfigurationKeys.CONVERTER_FILTER_FIELD_VALUE),
"Missing required property converter.filter.value for the AvroFilterConverter class.");
this.fieldName = workUnit.getProp(ConfigurationKeys.CONVERTER_FILTER_FIELD_NAME);
this.fieldValue = workUnit.getProp(ConfigurationKeys.CONVERTER_FILTER_FIELD_VALUE);
return super.init(workUnit);
}
/**
* Returns the inputSchema unmodified.
* {@inheritDoc}
* @see org.apache.gobblin.converter.AvroToAvroConverterBase#convertSchema(org.apache.avro.Schema, org.apache.gobblin.configuration.WorkUnitState)
*/
@Override
public Schema convertSchema(Schema inputSchema, WorkUnitState workUnit) throws SchemaConversionException {
return inputSchema;
}
/**
* Retrieves the specified field from the inputRecord, and checks if it is equal to the expected value
* {@link #fieldValue}. If it is then it returns a {@link org.apache.gobblin.converter.SingleRecordIterable} for the input record.
* Otherwise it returns a {@link EmptyIterable}.
* {@inheritDoc}
* @see org.apache.gobblin.converter.AvroToAvroConverterBase#convertRecord(org.apache.avro.Schema, org.apache.avro.generic.GenericRecord, org.apache.gobblin.configuration.WorkUnitState)
*/
@Override
public Iterable<GenericRecord> convertRecordImpl(Schema outputSchema, GenericRecord inputRecord, WorkUnitState workUnit)
throws DataConversionException {
Optional<Object> fieldValue = AvroUtils.getFieldValue(inputRecord, this.fieldName);
if (fieldValue.isPresent() && fieldValue.get().toString().equals(this.fieldValue)) {
return new SingleRecordIterable<>(inputRecord);
}
return new EmptyIterable<>();
}
}
| 2,929 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/serde/OrcSerDeWrapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gobblin.converter.serde;
import org.apache.hadoop.hive.ql.io.orc.OrcSerde;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
import org.apache.hadoop.io.Writable;
/**
* The Hive's {@link OrcSerde} caches converted records - the {@link OrcSerde} has a single
* {@link org.apache.hadoop.hive.ql.io.orc.OrcSerde.OrcSerdeRow} and every time the
* {@link org.apache.hadoop.hive.serde2.Serializer#serialize(Object, ObjectInspector)} method is called, the object is
* re-used.
*
* The problem is that {@link org.apache.hadoop.hive.ql.io.orc.OrcSerde.OrcSerdeRow} is package protected and has no
* public constructor, so no copy can be made. This would be fine if {@link org.apache.hadoop.hive.ql.io.orc.OrcSerde.OrcSerdeRow}
* is immediately written out. But all Gobblin jobs have a buffer that the writer reads from. This buffering can cause
* race conditions where records get dropped and duplicated.
*
* @author Prateek Gupta
*/
public class OrcSerDeWrapper extends OrcSerde {
@Override
public Writable serialize(Object realRow, ObjectInspector inspector) {
Object realRowClone = ObjectInspectorUtils.copyToStandardObject(realRow, inspector);
return super.serialize(realRowClone, inspector);
}
}
| 2,930 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/converter/serde/HiveSerDeConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.serde;
import java.io.IOException;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.ql.io.IOConstants;
import org.apache.hadoop.hive.serde2.Deserializer;
import org.apache.hadoop.hive.serde2.SerDe;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.Serializer;
import org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator;
import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.io.Writable;
import com.google.common.base.Throwables;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.gobblin.hive.HiveSerDeWrapper;
import org.apache.gobblin.instrumented.converter.InstrumentedConverter;
import org.apache.gobblin.util.HadoopUtils;
/**
* An {@link InstrumentedConverter} that takes a {@link Writable} record, uses a Hive {@link SerDe} to
* deserialize it, and uses another Hive {@link SerDe} to serialize it into a {@link Writable} record.
*
* The serializer and deserializer are specified using {@link HiveSerDeWrapper#SERDE_SERIALIZER_TYPE}
* and {@link HiveSerDeWrapper#SERDE_DESERIALIZER_TYPE}.
*
* <p>
* Note this class has known issues when the {@link #serializer} is set to
* {@link org.apache.hadoop.hive.serde2.avro.AvroSerializer}. Mainly due to the fact that the Avro Serializer caches
* returned objects, which are not immediately consumed by the
* {@link org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat}.
* </p>
*
* <p>
* This class has been tested when the {@link #serializer} has been set to
* {@link org.apache.hadoop.hive.ql.io.orc.OrcSerde} and should work as expected assuming the proper configurations
* are set (refer to the Gobblin documentation for a full example).
* </p>
*
* @author Ziyang Liu
*/
@SuppressWarnings("deprecation")
@Slf4j
public class HiveSerDeConverter extends InstrumentedConverter<Object, Object, Writable, Writable> {
private Serializer serializer;
private Deserializer deserializer;
@Override
public HiveSerDeConverter init(WorkUnitState state) {
super.init(state);
Configuration conf = HadoopUtils.getConfFromState(state);
try {
this.serializer = (Serializer) HiveSerDeWrapper.getSerializer(state).getSerDe();
this.deserializer = (Deserializer) HiveSerDeWrapper.getDeserializer(state).getSerDe();
this.deserializer.initialize(conf, state.getProperties());
setColumnsIfPossible(state);
this.serializer.initialize(conf, state.getProperties());
} catch (IOException e) {
log.error("Failed to instantiate serializer and deserializer", e);
throw Throwables.propagate(e);
} catch (SerDeException e) {
log.error("Failed to initialize serializer and deserializer", e);
throw Throwables.propagate(e);
}
return this;
}
private void setColumnsIfPossible(WorkUnitState state)
throws SerDeException {
AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(
AvroSerdeUtils.determineSchemaOrReturnErrorSchema(state.getProperties()));
List<String> columnNames = aoig.getColumnNames();
List<TypeInfo> columnTypes = aoig.getColumnTypes();
state.setProp(IOConstants.COLUMNS, StringUtils.join(columnNames, ","));
state.setProp(IOConstants.COLUMNS_TYPES, StringUtils.join(columnTypes, ","));
}
@Override
public Iterable<Writable> convertRecordImpl(Object outputSchema, Writable inputRecord, WorkUnitState workUnit)
throws DataConversionException {
try {
Object deserialized = this.deserializer.deserialize(inputRecord);
Writable convertedRecord = this.serializer.serialize(deserialized, this.deserializer.getObjectInspector());
return new SingleRecordIterable<>(convertedRecord);
} catch (SerDeException e) {
throw new DataConversionException(e);
}
}
@Override
public Object convertSchema(Object inputSchema, WorkUnitState workUnit) throws SchemaConversionException {
return inputSchema;
}
}
| 2,931 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/util/TestUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.broker.SharedResourcesBrokerFactory;
import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.broker.gobblin_scopes.JobScopeInstance;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* Utilities for unit tests.
*/
public class TestUtils {
/**
* Create a {@link WorkUnitState} with a {@link org.apache.gobblin.broker.iface.SharedResourcesBroker} for running unit tests of
* constructs.
*/
public static WorkUnitState createTestWorkUnitState() {
return new WorkUnitState(new WorkUnit(), new State(), SharedResourcesBrokerFactory.createDefaultTopLevelBroker(
ConfigFactory.empty(), GobblinScopeTypes.GLOBAL.defaultScopeInstance()).
newSubscopedBuilder(new JobScopeInstance("jobName", "testJob")));
}
}
| 2,932 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/config/ConfigBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.config;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
/** A helper class to create {@link Config} objects */
public class ConfigBuilder {
private final Map<String, Object> primitiveProps = new HashMap<>();
private final Optional<String> originDestription;
private Config currentConfig;
ConfigBuilder(Optional<String> originDescription) {
this.originDestription = originDescription;
this.currentConfig =
originDescription.isPresent() ? ConfigFactory.empty(this.originDestription.get()) : ConfigFactory.empty();
}
/**
* Loads properties which have a given name prefix into the config. The following restrictions
* apply:
* <ul>
* <li>No property can have a name that is equal to the prefix
* <li>After removal of the prefix, the remaining property name should start with a letter.
* </ul>
*
* @param props the collection from where to load the properties
* @param scopePrefix only properties with this prefix will be considered. The prefix will be
* removed from the names of the keys added to the {@link Config} object.
* The prefix can be an empty string but cannot be null.
*/
public ConfigBuilder loadProps(Properties props, String scopePrefix) {
Preconditions.checkNotNull(props);
Preconditions.checkNotNull(scopePrefix);
int scopePrefixLen = scopePrefix.length();
for (Map.Entry<Object, Object> propEntry : props.entrySet()) {
String propName = propEntry.getKey().toString();
if (propName.startsWith(scopePrefix)) {
String scopedName = propName.substring(scopePrefixLen);
if (scopedName.isEmpty()) {
throw new RuntimeException("Illegal scoped property:" + propName);
}
if (!Character.isAlphabetic(scopedName.charAt(0))) {
throw new RuntimeException(
"Scoped name for property " + propName + " should start with a character: " + scopedName);
}
this.primitiveProps.put(scopedName, propEntry.getValue());
}
}
return this;
}
public ConfigBuilder addPrimitive(String name, Object value) {
this.primitiveProps.put(name, value);
return this;
}
public ConfigBuilder addList(String name, Iterable<? extends Object> values) {
this.currentConfig = this.originDestription.isPresent()
? this.currentConfig.withValue(name, ConfigValueFactory.fromIterable(values, this.originDestription.get()))
: this.currentConfig.withValue(name, ConfigValueFactory.fromIterable(values));
return this;
}
public static ConfigBuilder create() {
return new ConfigBuilder(Optional.<String> absent());
}
public static ConfigBuilder create(String originDescription) {
return new ConfigBuilder(Optional.of(originDescription));
}
public Config build() {
return ConfigFactory.parseMap(this.primitiveProps).withFallback(this.currentConfig);
}
}
| 2,933 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/security | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/security/ssl/SSLContextFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.security.ssl;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.security.KeyStore;
import org.apache.commons.io.FileUtils;
import com.typesafe.config.Config;
import javax.net.ssl.KeyManagerFactory;
import javax.net.ssl.SSLContext;
import javax.net.ssl.TrustManagerFactory;
import org.apache.gobblin.password.PasswordManager;
import org.apache.gobblin.util.ConfigUtils;
/**
* Provide different approaches to create a {@link SSLContext}
*/
public class SSLContextFactory {
public static final String KEY_STORE_FILE_PATH = "keyStoreFilePath";
public static final String KEY_STORE_PASSWORD = "keyStorePassword";
public static final String KEY_STORE_TYPE = "keyStoreType";
public static final String TRUST_STORE_FILE_PATH = "trustStoreFilePath";
public static final String TRUST_STORE_PASSWORD = "trustStorePassword";
public static final String DEFAULT_ALGORITHM = "SunX509";
public static final String DEFAULT_PROTOCOL = "TLS";
public static final String JKS_STORE_TYPE_NAME = "JKS";
public static final String P12_STORE_TYPE_NAME = "PKCS12";
/**
* Create a {@link SSLContext} instance
*
* @param keyStoreFile a p12 or jks file depending on key store type
* @param keyStorePassword password to access the key store
* @param keyStoreType type of key store
* @param trustStoreFile a jks file
* @param trustStorePassword password to access the trust store
*/
public static SSLContext createInstance(File keyStoreFile, String keyStorePassword, String keyStoreType, File trustStoreFile,
String trustStorePassword) {
if (!keyStoreType.equalsIgnoreCase(P12_STORE_TYPE_NAME) && !keyStoreType.equalsIgnoreCase(JKS_STORE_TYPE_NAME)) {
throw new IllegalArgumentException("Unsupported keyStoreType: " + keyStoreType);
}
try {
// Load KeyStore
KeyStore keyStore = KeyStore.getInstance(keyStoreType);
keyStore.load(toInputStream(keyStoreFile), keyStorePassword.toCharArray());
// Load TrustStore
KeyStore trustStore = KeyStore.getInstance(JKS_STORE_TYPE_NAME);
trustStore.load(toInputStream(trustStoreFile), trustStorePassword.toCharArray());
// Set KeyManger from keyStore
KeyManagerFactory kmf = KeyManagerFactory.getInstance(DEFAULT_ALGORITHM);
kmf.init(keyStore, keyStorePassword.toCharArray());
// Set TrustManager from trustStore
TrustManagerFactory trustFact = TrustManagerFactory.getInstance(DEFAULT_ALGORITHM);
trustFact.init(trustStore);
// Set Context to TLS and initialize it
SSLContext sslContext = SSLContext.getInstance(DEFAULT_PROTOCOL);
sslContext.init(kmf.getKeyManagers(), trustFact.getTrustManagers(), null);
return sslContext;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Create a {@link SSLContext} from a {@link Config}
*
* <p>
* A sample configuration is:
* <br> keyStoreFilePath=/path/to/key/store
* <br> keyStorePassword=password
* <br> keyStoreType=PKCS12
* <br> trustStoreFilePath=/path/to/trust/store
* <br> trustStorePassword=password
* </p>
*
* @param srcConfig configuration
* @return an instance of {@link SSLContext}
*/
public static SSLContext createInstance(Config srcConfig) {
// srcConfig.getString() will throw ConfigException if any key is missing
String keyStoreFilePath = srcConfig.getString(KEY_STORE_FILE_PATH);
String trustStoreFilePath = srcConfig.getString(TRUST_STORE_FILE_PATH);
PasswordManager passwdMgr = PasswordManager.getInstance(ConfigUtils.configToState(srcConfig));
String keyStorePassword = passwdMgr.readPassword(srcConfig.getString(KEY_STORE_PASSWORD));
String trustStorePassword = passwdMgr.readPassword(srcConfig.getString(TRUST_STORE_PASSWORD));
return createInstance(new File(keyStoreFilePath), keyStorePassword, srcConfig.getString(KEY_STORE_TYPE),
new File(trustStoreFilePath), trustStorePassword);
}
private static InputStream toInputStream(File storeFile)
throws IOException {
byte[] data = FileUtils.readFileToByteArray(storeFile);
return new ByteArrayInputStream(data);
}
}
| 2,934 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/publisher/DataPublisherFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.publisher;
import java.io.IOException;
import java.util.Collections;
import org.apache.gobblin.broker.ImmediatelyInvalidResourceEntry;
import org.apache.gobblin.broker.ResourceInstance;
import org.apache.gobblin.broker.iface.ConfigView;
import org.apache.gobblin.broker.iface.NotConfiguredException;
import org.apache.gobblin.broker.iface.ScopeType;
import org.apache.gobblin.broker.iface.ScopedConfigView;
import org.apache.gobblin.broker.iface.SharedResourceFactory;
import org.apache.gobblin.broker.iface.SharedResourceFactoryResponse;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.capability.Capability;
import org.apache.gobblin.configuration.State;
import lombok.extern.slf4j.Slf4j;
/**
* A {@link SharedResourceFactory} for creating {@link DataPublisher}s.
*
* The factory creates a {@link DataPublisher} with the publisher class name and state.
*/
@Slf4j
public class DataPublisherFactory<S extends ScopeType<S>>
implements SharedResourceFactory<DataPublisher, DataPublisherKey, S> {
public static final String FACTORY_NAME = "dataPublisher";
public static <S extends ScopeType<S>> DataPublisher get(String publisherClassName, State state,
SharedResourcesBroker<S> broker) throws IOException {
try {
return broker.getSharedResource(new DataPublisherFactory<S>(), new DataPublisherKey(publisherClassName, state));
} catch (NotConfiguredException nce) {
throw new IOException(nce);
}
}
/**
* Is the publisher cacheable in the SharedResourcesBroker?
* @param publisher
* @return true if cacheable, else false
*/
public static boolean isPublisherCacheable(DataPublisher publisher) {
// only threadsafe publishers are cacheable. non-threadsafe publishers are marked immediately for invalidation
return publisher.supportsCapability(Capability.THREADSAFE, Collections.EMPTY_MAP);
}
@Override
public String getName() {
return FACTORY_NAME;
}
@Override
public SharedResourceFactoryResponse<DataPublisher> createResource(SharedResourcesBroker<S> broker,
ScopedConfigView<S, DataPublisherKey> config) throws NotConfiguredException {
try {
DataPublisherKey key = config.getKey();
String publisherClassName = key.getPublisherClassName();
State state = key.getState();
Class<? extends DataPublisher> dataPublisherClass = (Class<? extends DataPublisher>) Class
.forName(publisherClassName);
log.info("Creating data publisher with class {} in scope {}. ", publisherClassName, config.getScope().toString());
DataPublisher publisher = DataPublisher.getInstance(dataPublisherClass, state);
// If the publisher is threadsafe then it is shareable, so return it as a resource instance that may be cached
// by the broker.
// Otherwise, it is not shareable, so return it as an immediately invalidated resource that will only be returned
// once from the broker.
if (isPublisherCacheable(publisher)) {
return new ResourceInstance<>(publisher);
} else {
return new ImmediatelyInvalidResourceEntry<>(publisher);
}
} catch (ReflectiveOperationException e) {
throw new RuntimeException(e);
}
}
@Override
public S getAutoScope(SharedResourcesBroker<S> broker, ConfigView<S, DataPublisherKey> config) {
return broker.selfScope().getType();
}
}
| 2,935 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/publisher/TimestampDataPublisherWithHiveRegistration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.publisher;
import java.io.IOException;
import java.util.Collection;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
/**
* Wrapper for {@link TimestampDataPublisher} to publish with {@link HiveRegistrationPublisher} afterwards.
*/
public class TimestampDataPublisherWithHiveRegistration extends TimestampDataPublisher {
private final HiveRegistrationPublisher hivePublisher;
public TimestampDataPublisherWithHiveRegistration(State state) throws IOException {
super(state);
this.hivePublisher = this.closer.register(new HiveRegistrationPublisher(state));
}
@Override
public void publish(Collection<? extends WorkUnitState> states) throws IOException {
super.publish(states);
// PUBLISHER_DIRS key must be updated for HiveRegistrationPublisher
for (Path path : this.publisherOutputDirs) {
this.state.appendToSetProp(ConfigurationKeys.PUBLISHER_DIRS, path.toString());
}
this.hivePublisher.publish(states);
}
}
| 2,936 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/publisher/TaskPublisherBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.publisher;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.qualitychecker.task.TaskLevelPolicyCheckResults;
public class TaskPublisherBuilder {
private final TaskLevelPolicyCheckResults results;
private final WorkUnitState workUnitState;
public TaskPublisherBuilder(WorkUnitState workUnitState, TaskLevelPolicyCheckResults results) {
this.results = results;
this.workUnitState = workUnitState;
}
public static TaskPublisherBuilder newBuilder(WorkUnitState taskState, TaskLevelPolicyCheckResults results) {
return new TaskPublisherBuilder(taskState, results);
}
public TaskPublisher build() throws Exception {
return new TaskPublisher(this.workUnitState, this.results);
}
}
| 2,937 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/publisher/NoopPublisher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.publisher;
import java.io.IOException;
import java.util.Collection;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
/**
* An no-op optimistic publisher that just inspects the state of all workunits
* that are successful and marks them committed.
*/
@Slf4j
public class NoopPublisher extends DataPublisher {
/**
* @deprecated {@link DataPublisher} initialization should be done in the constructor.
*/
@Override
public void initialize()
throws IOException {
}
public NoopPublisher(State state) {
super(state);
}
/**
* Publish the data for the given tasks.
* @param states
*/
@Override
public void publishData(Collection<? extends WorkUnitState> states)
throws IOException {
for (WorkUnitState state: states)
{
if (state.getWorkingState() == WorkUnitState.WorkingState.SUCCESSFUL)
{
state.setWorkingState(WorkUnitState.WorkingState.COMMITTED);
log.info("Marking state committed");
}
}
}
/**
* Publish the metadata (e.g., schema) for the given tasks. Checkpoints should not be published as part of metadata.
* They are published by Gobblin runtime after the metadata and data are published.
* @param states
*/
@Override
public void publishMetadata(Collection<? extends WorkUnitState> states)
throws IOException {
}
/**
* Closes this stream and releases any system resources associated
* with it. If the stream is already closed then invoking this
* method has no effect.
*
* <p> As noted in {@link AutoCloseable#close()}, cases where the
* close may fail require careful attention. It is strongly advised
* to relinquish the underlying resources and to internally
* <em>mark</em> the {@code Closeable} as closed, prior to throwing
* the {@code IOException}.
*
* @throws IOException if an I/O error occurs
*/
@Override
public void close()
throws IOException {
}
}
| 2,938 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/publisher/BaseDataPublisher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.publisher;
import java.io.IOException;
import java.net.URI;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.common.io.Closer;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigRenderOptions;
import org.apache.gobblin.config.ConfigBuilder;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.dataset.DatasetConstants;
import org.apache.gobblin.dataset.DatasetDescriptor;
import org.apache.gobblin.dataset.Descriptor;
import org.apache.gobblin.dataset.PartitionDescriptor;
import org.apache.gobblin.metadata.MetadataMerger;
import org.apache.gobblin.metadata.types.StaticStringMetadataMerger;
import org.apache.gobblin.metrics.event.lineage.LineageInfo;
import org.apache.gobblin.util.ForkOperatorUtils;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.ParallelRunner;
import org.apache.gobblin.util.WriterUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import org.apache.gobblin.writer.FsDataWriter;
import org.apache.gobblin.writer.FsWriterMetrics;
import org.apache.gobblin.writer.PartitionIdentifier;
import org.apache.gobblin.writer.PartitionedDataWriter;
import static org.apache.gobblin.util.retry.RetryerFactory.*;
/**
* A basic implementation of {@link SingleTaskDataPublisher} that publishes the data from the writer output directory
* to the final output directory.
*
* <p>
* The final output directory is specified by {@link ConfigurationKeys#DATA_PUBLISHER_FINAL_DIR}. The output of each
* writer is written to this directory. Each individual writer can also specify a path in the config key
* {@link ConfigurationKeys#WRITER_FILE_PATH}. Then the final output data for a writer will be
* {@link ConfigurationKeys#DATA_PUBLISHER_FINAL_DIR}/{@link ConfigurationKeys#WRITER_FILE_PATH}. If the
* {@link ConfigurationKeys#WRITER_FILE_PATH} is not specified, a default one is assigned. The default path is
* constructed in the {@link org.apache.gobblin.source.workunit.Extract#getOutputFilePath()} method.
* </p>
*
* <p>
* This publisher records all dirs it publishes to in property {@link ConfigurationKeys#PUBLISHER_DIRS}. Each time it
* publishes a {@link Path}, if the path is a directory, it records this path. If the path is a file, it records the
* parent directory of the path. To change this behavior one may override
* {@link #recordPublisherOutputDirs(Path, Path, int)}.
* </p>
*/
public class BaseDataPublisher extends SingleTaskDataPublisher {
private static final Logger LOG = LoggerFactory.getLogger(BaseDataPublisher.class);
protected final int numBranches;
protected final List<FileSystem> writerFileSystemByBranches;
protected final List<FileSystem> publisherFileSystemByBranches;
protected final List<FileSystem> metaDataWriterFileSystemByBranches;
protected final List<Optional<String>> publisherFinalDirOwnerGroupsByBranches;
protected final List<Optional<String>> publisherOutputDirOwnerGroupByBranches;
protected final List<FsPermission> permissions;
protected final Closer closer;
protected final Closer parallelRunnerCloser;
protected final int parallelRunnerThreads;
protected final Map<String, ParallelRunner> parallelRunners = Maps.newHashMap();
protected final Set<Path> publisherOutputDirs = Sets.newHashSet();
protected final Optional<LineageInfo> lineageInfo;
/* Each partition in each branch may have separate metadata. The metadata mergers are responsible
* for aggregating this information from all workunits so it can be published.
*/
protected final Map<PartitionIdentifier, MetadataMerger<String>> metadataMergers;
protected final boolean shouldRetry;
static final String DATA_PUBLISHER_RETRY_PREFIX = ConfigurationKeys.DATA_PUBLISHER_PREFIX + ".retry.";
static final String PUBLISH_RETRY_ENABLED = DATA_PUBLISHER_RETRY_PREFIX + "enabled";
static final Config PUBLISH_RETRY_DEFAULTS;
protected final Config retryerConfig;
static {
Map<String, Object> configMap =
ImmutableMap.<String, Object>builder()
.put(RETRY_TIME_OUT_MS, TimeUnit.MINUTES.toMillis(2L)) //Overall retry for 2 minutes
.put(RETRY_INTERVAL_MS, TimeUnit.SECONDS.toMillis(5L)) //Try to retry 5 seconds
.put(RETRY_MULTIPLIER, 2L) // Multiply by 2 every attempt
.put(RETRY_TYPE, RetryType.EXPONENTIAL.name())
.build();
PUBLISH_RETRY_DEFAULTS = ConfigFactory.parseMap(configMap);
};
public BaseDataPublisher(State state)
throws IOException {
super(state);
this.closer = Closer.create();
Configuration conf = new Configuration();
// Add all job configuration properties so they are picked up by Hadoop
for (String key : this.getState().getPropertyNames()) {
conf.set(key, this.getState().getProp(key));
}
// Extract LineageInfo from state
if (state instanceof SourceState) {
lineageInfo = LineageInfo.getLineageInfo(((SourceState) state).getBroker());
} else if (state instanceof WorkUnitState) {
lineageInfo = LineageInfo.getLineageInfo(((WorkUnitState) state).getTaskBrokerNullable());
} else {
lineageInfo = Optional.absent();
}
this.numBranches = this.getState().getPropAsInt(ConfigurationKeys.FORK_BRANCHES_KEY, 1);
this.shouldRetry = this.getState().getPropAsBoolean(PUBLISH_RETRY_ENABLED, false);
this.writerFileSystemByBranches = Lists.newArrayListWithCapacity(this.numBranches);
this.publisherFileSystemByBranches = Lists.newArrayListWithCapacity(this.numBranches);
this.metaDataWriterFileSystemByBranches = Lists.newArrayListWithCapacity(this.numBranches);
this.publisherFinalDirOwnerGroupsByBranches = Lists.newArrayListWithCapacity(this.numBranches);
this.publisherOutputDirOwnerGroupByBranches = Lists.newArrayListWithCapacity(this.numBranches);
this.permissions = Lists.newArrayListWithCapacity(this.numBranches);
this.metadataMergers = new HashMap<>();
// Get a FileSystem instance for each branch
for (int i = 0; i < this.numBranches; i++) {
URI writerUri = URI.create(this.getState().getProp(
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_FILE_SYSTEM_URI, this.numBranches, i),
ConfigurationKeys.LOCAL_FS_URI));
this.writerFileSystemByBranches.add(FileSystem.get(writerUri, conf));
URI publisherUri = URI.create(this.getState().getProp(ForkOperatorUtils
.getPropertyNameForBranch(ConfigurationKeys.DATA_PUBLISHER_FILE_SYSTEM_URI, this.numBranches, i),
writerUri.toString()));
this.publisherFileSystemByBranches.add(FileSystem.get(publisherUri, conf));
this.metaDataWriterFileSystemByBranches.add(FileSystem.get(publisherUri, conf));
// The group(s) will be applied to the final publisher output directory(ies)
// (Deprecated) See ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR_GROUP
this.publisherFinalDirOwnerGroupsByBranches.add(Optional.fromNullable(this.getState().getProp(ForkOperatorUtils
.getPropertyNameForBranch(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR_GROUP, this.numBranches, i))));
this.publisherOutputDirOwnerGroupByBranches.add(Optional.fromNullable(this.getState().getProp(ForkOperatorUtils
.getPropertyNameForBranch(ConfigurationKeys.DATA_PUBLISHER_OUTPUT_DIR_GROUP, this.numBranches, i))));
// The permission(s) will be applied to all directories created by the publisher,
// which do NOT include directories created by the writer and moved by the publisher.
// The permissions of those directories are controlled by writer.file.permissions and writer.dir.permissions.
this.permissions.add(new FsPermission(state.getPropAsShortWithRadix(
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.DATA_PUBLISHER_PERMISSIONS, this.numBranches, i),
FsPermission.getDefault().toShort(), ConfigurationKeys.PERMISSION_PARSING_RADIX)));
}
if (this.shouldRetry) {
this.retryerConfig = ConfigBuilder.create()
.loadProps(this.getState().getProperties(), DATA_PUBLISHER_RETRY_PREFIX)
.build()
.withFallback(PUBLISH_RETRY_DEFAULTS);
LOG.info("Retry enabled for publish with config : " + retryerConfig.root().render(ConfigRenderOptions.concise()));
} else {
LOG.info("Retry disabled for publish.");
this.retryerConfig = WriterUtils.NO_RETRY_CONFIG;
}
this.parallelRunnerThreads =
state.getPropAsInt(ParallelRunner.PARALLEL_RUNNER_THREADS_KEY, ParallelRunner.DEFAULT_PARALLEL_RUNNER_THREADS);
this.parallelRunnerCloser = Closer.create();
}
private MetadataMerger<String> buildMetadataMergerForBranch(String metadataFromConfig, int branchId,
Path existingMetadataPath) {
// Legacy behavior -- if we shouldn't publish writer state, instantiate a static metadata merger
// that just returns the metadata from config (if any)
if (!shouldPublishWriterMetadataForBranch(branchId)) {
return new StaticStringMetadataMerger(metadataFromConfig);
}
String keyName = ForkOperatorUtils
.getPropertyNameForBranch(ConfigurationKeys.DATA_PUBLISH_WRITER_METADATA_MERGER_NAME_KEY, this.numBranches,
branchId);
String className =
this.getState().getProp(keyName, ConfigurationKeys.DATA_PUBLISH_WRITER_METADATA_MERGER_NAME_DEFAULT);
try {
Class<?> mdClass = Class.forName(className);
// If the merger understands properties, use that constructor; otherwise use the default
// parameter-less ctor
@SuppressWarnings("unchecked")
Object merger = GobblinConstructorUtils
.invokeFirstConstructor(mdClass, Collections.<Object>singletonList(this.getState().getProperties()),
Collections.<Object>emptyList());
try {
@SuppressWarnings("unchecked")
MetadataMerger<String> casted = (MetadataMerger<String>) merger;
// Merge existing metadata from the partition if it exists..
String existingMetadata = loadExistingMetadata(existingMetadataPath, branchId);
if (existingMetadata != null) {
casted.update(existingMetadata);
}
// Then metadata from the config...
if (metadataFromConfig != null) {
casted.update(metadataFromConfig);
}
return casted;
} catch (ClassCastException e) {
throw new IllegalArgumentException(className + " does not implement the MetadataMerger interface", e);
}
} catch (ClassNotFoundException e) {
throw new IllegalArgumentException("Specified metadata merger class " + className + " not found!", e);
} catch (ReflectiveOperationException e) {
throw new IllegalArgumentException("Error building merger class " + className, e);
}
}
/**
* Read in existing metadata as a UTF8 string.
*/
private String loadExistingMetadata(Path metadataFilename, int branchId) {
try {
FileSystem fsForBranch = writerFileSystemByBranches.get(branchId);
if (!fsForBranch.exists(metadataFilename)) {
return null;
}
FSDataInputStream existingMetadata = writerFileSystemByBranches.get(branchId).open(metadataFilename);
return IOUtils.toString(existingMetadata, StandardCharsets.UTF_8);
} catch (IOException e) {
LOG.warn("IOException {} while trying to read existing metadata {} - treating as null", e.getMessage(),
metadataFilename.toString());
return null;
}
}
@Override
public void initialize()
throws IOException {
// Nothing needs to be done since the constructor already initializes the publisher.
}
@Override
public void close()
throws IOException {
try {
for (Path path : this.publisherOutputDirs) {
this.state.appendToSetProp(ConfigurationKeys.PUBLISHER_DIRS, path.toString());
}
this.state.setProp(ConfigurationKeys.PUBLISHER_LATEST_FILE_ARRIVAL_TIMESTAMP, System.currentTimeMillis());
} finally {
this.closer.close();
}
}
private void addLineageInfo(WorkUnitState state, int branchId) {
if (!this.lineageInfo.isPresent()) {
LOG.info("Will not add lineage info");
return;
}
// Final dataset descriptor
DatasetDescriptor datasetDescriptor = createDestinationDescriptor(state, branchId);
List<PartitionDescriptor> partitions = PartitionedDataWriter.getPartitionInfoAndClean(state, branchId);
List<Descriptor> descriptors = new ArrayList<>();
if (partitions.size() == 0) {
// Report as dataset level lineage
descriptors.add(datasetDescriptor);
} else {
// Report as partition level lineage
for (PartitionDescriptor partition : partitions) {
descriptors.add(partition.copyWithNewDataset(datasetDescriptor));
}
}
this.lineageInfo.get().putDestination(descriptors, branchId, state);
}
/**
* Create destination dataset descriptor
*/
protected DatasetDescriptor createDestinationDescriptor(WorkUnitState state, int branchId) {
Path publisherOutputDir = getPublisherOutputDir(state, branchId);
FileSystem fs = this.publisherFileSystemByBranches.get(branchId);
DatasetDescriptor destination = new DatasetDescriptor(fs.getScheme(), fs.getUri(), publisherOutputDir.toString());
destination.addMetadata(DatasetConstants.FS_URI, fs.getUri().toString());
destination.addMetadata(DatasetConstants.BRANCH, String.valueOf(branchId));
return destination;
}
@Override
public void publishData(WorkUnitState state)
throws IOException {
for (int branchId = 0; branchId < this.numBranches; branchId++) {
publishSingleTaskData(state, branchId);
}
this.parallelRunnerCloser.close();
}
/**
* This method publishes output data for a single task based on the given {@link WorkUnitState}.
* Output data from other tasks won't be published even if they are in the same folder.
*/
private void publishSingleTaskData(WorkUnitState state, int branchId)
throws IOException {
publishData(state, branchId, true, new HashSet<Path>());
addLineageInfo(state, branchId);
}
@Override
public void publishData(Collection<? extends WorkUnitState> states)
throws IOException {
// We need a Set to collect unique writer output paths as multiple tasks may belong to the same extract. Tasks that
// belong to the same Extract will by default have the same output directory
Set<Path> writerOutputPathsMoved = Sets.newHashSet();
for (WorkUnitState workUnitState : states) {
for (int branchId = 0; branchId < this.numBranches; branchId++) {
publishMultiTaskData(workUnitState, branchId, writerOutputPathsMoved);
}
}
this.parallelRunnerCloser.close();
}
/**
* This method publishes task output data for the given {@link WorkUnitState}, but if there are output data of
* other tasks in the same folder, it may also publish those data.
*/
protected void publishMultiTaskData(WorkUnitState state, int branchId, Set<Path> writerOutputPathsMoved)
throws IOException {
publishData(state, branchId, false, writerOutputPathsMoved);
addLineageInfo(state, branchId);
}
protected void publishData(WorkUnitState state, int branchId, boolean publishSingleTaskData,
Set<Path> writerOutputPathsMoved)
throws IOException {
// Get a ParallelRunner instance for moving files in parallel
ParallelRunner parallelRunner = this.getParallelRunner(this.writerFileSystemByBranches.get(branchId));
// The directory where the workUnitState wrote its output data.
Path writerOutputDir = WriterUtils.getWriterOutputDir(state, this.numBranches, branchId);
if (!this.writerFileSystemByBranches.get(branchId).exists(writerOutputDir)) {
LOG.warn(String.format("Branch %d of WorkUnit %s produced no data", branchId, state.getId()));
return;
}
// The directory where the final output directory for this job will be placed.
// It is a combination of DATA_PUBLISHER_FINAL_DIR and WRITER_FILE_PATH.
Path publisherOutputDir = getPublisherOutputDir(state, branchId);
if (publishSingleTaskData) {
// Create final output directory
WriterUtils.mkdirsWithRecursivePermissionWithRetry(this.publisherFileSystemByBranches.get(branchId), publisherOutputDir,
this.permissions.get(branchId), retryerConfig);
if(this.publisherOutputDirOwnerGroupByBranches.get(branchId).isPresent()) {
LOG.info(String.format("Setting path %s group to %s", publisherOutputDir.toString(), this.publisherOutputDirOwnerGroupByBranches.get(branchId).get()));
HadoopUtils.setGroup(this.publisherFileSystemByBranches.get(branchId), publisherOutputDir, this.publisherOutputDirOwnerGroupByBranches.get(branchId).get());
}
addSingleTaskWriterOutputToExistingDir(writerOutputDir, publisherOutputDir, state, branchId, parallelRunner);
} else {
if (writerOutputPathsMoved.contains(writerOutputDir)) {
// This writer output path has already been moved for another task of the same extract
// If publishSingleTaskData=true, writerOutputPathMoved is ignored.
return;
}
if (this.publisherFileSystemByBranches.get(branchId).exists(publisherOutputDir)) {
// The final output directory already exists, check if the job is configured to replace it.
// If publishSingleTaskData=true, final output directory is never replaced.
boolean replaceFinalOutputDir = this.getState().getPropAsBoolean(ForkOperatorUtils
.getPropertyNameForBranch(ConfigurationKeys.DATA_PUBLISHER_REPLACE_FINAL_DIR, this.numBranches, branchId));
// If the final output directory is not configured to be replaced, put new data to the existing directory.
if (!replaceFinalOutputDir) {
addWriterOutputToExistingDir(writerOutputDir, publisherOutputDir, state, branchId, parallelRunner);
writerOutputPathsMoved.add(writerOutputDir);
return;
}
// Delete the final output directory if it is configured to be replaced
LOG.info("Deleting publisher output dir " + publisherOutputDir);
this.publisherFileSystemByBranches.get(branchId).delete(publisherOutputDir, true);
} else {
// Create the parent directory of the final output directory if it does not exist
WriterUtils.mkdirsWithRecursivePermissionWithRetry(this.publisherFileSystemByBranches.get(branchId),
publisherOutputDir.getParent(), this.permissions.get(branchId), retryerConfig);
if(this.publisherOutputDirOwnerGroupByBranches.get(branchId).isPresent()) {
LOG.info(String.format("Setting path %s group to %s", publisherOutputDir.toString(), this.publisherOutputDirOwnerGroupByBranches.get(branchId).get()));
HadoopUtils.setGroup(this.publisherFileSystemByBranches.get(branchId), publisherOutputDir, this.publisherOutputDirOwnerGroupByBranches.get(branchId).get());
}
}
movePath(parallelRunner, state, writerOutputDir, publisherOutputDir, branchId);
writerOutputPathsMoved.add(writerOutputDir);
}
}
/**
* Get the output directory path this {@link BaseDataPublisher} will write to.
*
* <p>
* This is the default implementation. Subclasses of {@link BaseDataPublisher} may override this
* to write to a custom directory or write using a custom directory structure or naming pattern.
* </p>
*
* @param workUnitState a {@link WorkUnitState} object
* @param branchId the fork branch ID
* @return the output directory path this {@link BaseDataPublisher} will write to
*/
protected Path getPublisherOutputDir(WorkUnitState workUnitState, int branchId) {
return WriterUtils.getDataPublisherFinalDir(workUnitState, this.numBranches, branchId);
}
protected void addSingleTaskWriterOutputToExistingDir(Path writerOutputDir, Path publisherOutputDir,
WorkUnitState workUnitState, int branchId, ParallelRunner parallelRunner)
throws IOException {
String outputFilePropName = ForkOperatorUtils
.getPropertyNameForBranch(ConfigurationKeys.WRITER_FINAL_OUTPUT_FILE_PATHS, this.numBranches, branchId);
if (!workUnitState.contains(outputFilePropName)) {
LOG.warn("Missing property " + outputFilePropName + ". This task may have pulled no data.");
return;
}
Iterable<String> taskOutputFiles = workUnitState.getPropAsSet(outputFilePropName);
for (String taskOutputFile : taskOutputFiles) {
Path taskOutputPath = new Path(taskOutputFile);
if (!this.writerFileSystemByBranches.get(branchId).exists(taskOutputPath)) {
LOG.warn("Task output file " + taskOutputFile + " doesn't exist.");
continue;
}
String pathSuffix = taskOutputFile
.substring(taskOutputFile.indexOf(writerOutputDir.toString()) + writerOutputDir.toString().length() + 1);
Path publisherOutputPath = new Path(publisherOutputDir, pathSuffix);
WriterUtils.mkdirsWithRecursivePermissionWithRetry(this.publisherFileSystemByBranches.get(branchId),
publisherOutputPath.getParent(), this.permissions.get(branchId), retryerConfig);
movePath(parallelRunner, workUnitState, taskOutputPath, publisherOutputPath, branchId);
}
}
protected void addWriterOutputToExistingDir(Path writerOutputDir, Path publisherOutputDir,
WorkUnitState workUnitState, int branchId, ParallelRunner parallelRunner)
throws IOException {
boolean preserveFileName = workUnitState.getPropAsBoolean(ForkOperatorUtils
.getPropertyNameForBranch(ConfigurationKeys.SOURCE_FILEBASED_PRESERVE_FILE_NAME, this.numBranches, branchId),
false);
// Go through each file in writerOutputDir and move it into publisherOutputDir
for (FileStatus status : this.writerFileSystemByBranches.get(branchId).listStatus(writerOutputDir)) {
// Preserve the file name if configured, use specified name otherwise
Path finalOutputPath = preserveFileName ? new Path(publisherOutputDir, workUnitState.getProp(ForkOperatorUtils
.getPropertyNameForBranch(ConfigurationKeys.DATA_PUBLISHER_FINAL_NAME, this.numBranches, branchId)))
: new Path(publisherOutputDir, status.getPath().getName());
movePath(parallelRunner, workUnitState, status.getPath(), finalOutputPath, branchId);
}
}
protected void movePath(ParallelRunner parallelRunner, State state, Path src, Path dst, int branchId)
throws IOException {
LOG.info(String.format("Moving %s to %s", src, dst));
boolean overwrite = state.getPropAsBoolean(ConfigurationKeys.DATA_PUBLISHER_OVERWRITE_ENABLED, false);
this.publisherOutputDirs.addAll(recordPublisherOutputDirs(src, dst, branchId));
parallelRunner.movePath(src, this.publisherFileSystemByBranches.get(branchId), dst, overwrite,
this.publisherFinalDirOwnerGroupsByBranches.get(branchId));
}
protected Collection<Path> recordPublisherOutputDirs(Path src, Path dst, int branchId)
throws IOException {
// Getting file status from src rather than dst, because at this time dst doesn't yet exist.
// If src is a dir, add dst to the set of paths. Otherwise, add dst's parent.
if (this.writerFileSystemByBranches.get(branchId).getFileStatus(src).isDirectory()) {
return ImmutableList.<Path>of(dst);
}
return ImmutableList.<Path>of(dst.getParent());
}
private ParallelRunner getParallelRunner(FileSystem fs) {
String uri = fs.getUri().toString();
if (!this.parallelRunners.containsKey(uri)) {
this.parallelRunners
.put(uri, this.parallelRunnerCloser.register(new ParallelRunner(this.parallelRunnerThreads, fs)));
}
return this.parallelRunners.get(uri);
}
/**
* Merge all of the metadata output from each work-unit and publish the merged record.
* @param states States from all tasks
* @throws IOException If there is an error publishing the file
*/
@Override
public void publishMetadata(Collection<? extends WorkUnitState> states)
throws IOException {
Set<String> partitions = new HashSet<>();
// There should be one merged metadata file per branch; first merge all of the pieces together
mergeMetadataAndCollectPartitionNames(states, partitions);
partitions.removeIf(Objects::isNull);
// Now, pick an arbitrary WorkUnitState to get config information around metadata such as
// the desired output filename. We assume that publisher config settings
// are the same across all workunits so it doesn't really matter which workUnit we retrieve this information
// from.
WorkUnitState anyState = states.iterator().next();
for (int branchId = 0; branchId < numBranches; branchId++) {
String mdOutputPath = getMetadataOutputPathFromState(anyState, branchId);
String userSpecifiedPath = getUserSpecifiedOutputPathFromState(anyState, branchId);
if (partitions.isEmpty() || userSpecifiedPath != null) {
publishMetadata(getMergedMetadataForPartitionAndBranch(null, branchId),
branchId,
getMetadataOutputFileForBranch(anyState, branchId));
} else {
String metadataFilename = getMetadataFileNameForBranch(anyState, branchId);
if (mdOutputPath == null || metadataFilename == null) {
LOG.info("Metadata filename not set for branch " + String.valueOf(branchId) + ": not publishing metadata.");
continue;
}
for (String partition : partitions) {
publishMetadata(getMergedMetadataForPartitionAndBranch(partition, branchId),
branchId,
new Path(new Path(mdOutputPath, partition), metadataFilename));
}
}
}
}
/*
* Metadata that we publish can come from several places:
* - It can be passed in job config (DATA_PUBLISHER_METADATA_STR)
* - It can be picked up from previous runs of a job (if the output partition already exists)
* -- The above two are handled when we construct a new MetadataMerger
*
* - The source/converters/writers associated with each branch of a job may add their own metadata
* (eg: this dataset is encrypted using AES256). This is returned by getIntermediateMetadataFromState()
* and fed into the MetadataMerger.
* - FsWriterMetrics can be emitted and rolled up into metadata. These metrics are specific to a {partition, branch}
* combo as they mention per-output file metrics. This is also fed into metadata mergers.
*
* Each writer should only be a part of one branch, but it may be responsible for multiple partitions.
*/
private void mergeMetadataAndCollectPartitionNames(Collection<? extends WorkUnitState> states,
Set<String> partitionPaths) {
for (WorkUnitState workUnitState : states) {
// First extract the partition paths and metrics from the work unit. This is essentially
// equivalent to grouping FsWriterMetrics by {partitionKey, branchId} and extracting
// all partitionPaths into a set.
Map<PartitionIdentifier, Set<FsWriterMetrics>> metricsByPartition = new HashMap<>();
boolean partitionFound = false;
for (Map.Entry<Object, Object> property : workUnitState.getProperties().entrySet()) {
if (((String) property.getKey()).startsWith(ConfigurationKeys.WRITER_PARTITION_PATH_KEY)) {
partitionPaths.add((String) property.getValue());
partitionFound = true;
} else if (((String) property.getKey()).startsWith(FsDataWriter.FS_WRITER_METRICS_KEY)) {
try {
FsWriterMetrics parsedMetrics = FsWriterMetrics.fromJson((String) property.getValue());
partitionPaths.add(parsedMetrics.getPartitionInfo().getPartitionKey());
Set<FsWriterMetrics> metricsForPartition =
metricsByPartition.computeIfAbsent(parsedMetrics.getPartitionInfo(), k -> new HashSet<>());
metricsForPartition.add(parsedMetrics);
} catch (IOException e) {
LOG.warn("Error parsing metrics from property {} - ignoring", (String) property.getValue());
}
}
}
// no specific partitions - add null as a placeholder
if (!partitionFound) {
partitionPaths.add(null);
}
final String configBasedMetadata = getMetadataFromWorkUnitState(workUnitState);
// Now update all metadata mergers with branch metadata + partition metrics
for (int branchId = 0; branchId < numBranches; branchId++) {
for (String partition : partitionPaths) {
PartitionIdentifier partitionIdentifier = new PartitionIdentifier(partition, branchId);
final int branch = branchId;
MetadataMerger<String> mdMerger = metadataMergers.computeIfAbsent(partitionIdentifier,
k -> buildMetadataMergerForBranch(configBasedMetadata, branch,
getMetadataOutputFileForBranch(workUnitState, branch)));
if (shouldPublishWriterMetadataForBranch(branchId)) {
String md = getIntermediateMetadataFromState(workUnitState, branchId);
mdMerger.update(md);
Set<FsWriterMetrics> metricsForPartition =
metricsByPartition.getOrDefault(partitionIdentifier, Collections.emptySet());
for (FsWriterMetrics metrics : metricsForPartition) {
mdMerger.update(metrics);
}
}
}
}
}
}
/**
* Publish metadata for each branch. We expect the metadata to be of String format and
* populated in either the WRITER_MERGED_METADATA_KEY state or the WRITER_METADATA_KEY configuration key.
*/
@Override
public void publishMetadata(WorkUnitState state)
throws IOException {
publishMetadata(Collections.singleton(state));
}
/**
* Publish metadata to a set of paths
*/
private void publishMetadata(String metadataValue, int branchId, Path metadataOutputPath)
throws IOException {
try {
if (metadataOutputPath == null) {
LOG.info("Metadata output path not set for branch " + String.valueOf(branchId) + ", not publishing.");
return;
}
if (metadataValue == null) {
LOG.info("No metadata collected for branch " + String.valueOf(branchId) + ", not publishing.");
return;
}
FileSystem fs = this.metaDataWriterFileSystemByBranches.get(branchId);
if (!fs.exists(metadataOutputPath.getParent())) {
WriterUtils.mkdirsWithRecursivePermissionWithRetry(fs, metadataOutputPath, this.permissions.get(branchId), retryerConfig);
}
//Delete the file if metadata already exists
if (fs.exists(metadataOutputPath)) {
HadoopUtils.deletePath(fs, metadataOutputPath, false);
}
LOG.info("Writing metadata for branch " + String.valueOf(branchId) + " to " + metadataOutputPath.toString());
try (FSDataOutputStream outputStream = fs.create(metadataOutputPath)) {
outputStream.write(metadataValue.getBytes(StandardCharsets.UTF_8));
}
} catch (IOException e) {
LOG.error("Metadata file is not generated: " + e, e);
}
}
private String getMetadataFileNameForBranch(WorkUnitState state, int branchId) {
// Note: This doesn't follow the pattern elsewhere in Gobblin where we have branch specific config
// parameters! Leaving this way for backwards compatibility.
String filePrefix = state.getProp(ConfigurationKeys.DATA_PUBLISHER_METADATA_OUTPUT_FILE);
return ForkOperatorUtils.getPropertyNameForBranch(filePrefix, this.numBranches, branchId);
}
private Path getMetadataOutputFileForBranch(WorkUnitState state, int branchId) {
String metaDataOutputDirStr = getMetadataOutputPathFromState(state, branchId);
String fileName = getMetadataFileNameForBranch(state, branchId);
if (metaDataOutputDirStr == null || fileName == null) {
return null;
}
return new Path(metaDataOutputDirStr, fileName);
}
private String getUserSpecifiedOutputPathFromState(WorkUnitState state, int branchId) {
String outputDir = state.getProp(ForkOperatorUtils
.getPropertyNameForBranch(ConfigurationKeys.DATA_PUBLISHER_METADATA_OUTPUT_DIR, this.numBranches, branchId));
// An older version of this code did not get a branch specific PUBLISHER_METADATA_OUTPUT_DIR so fallback
// for compatibility's sake
if (outputDir == null && this.numBranches > 1) {
outputDir = state.getProp(ConfigurationKeys.DATA_PUBLISHER_METADATA_OUTPUT_DIR);
if (outputDir != null) {
LOG.warn("Branches are configured for this job but a per branch metadata output directory was not set;"
+ " is this intended?");
}
}
return outputDir;
}
private String getMetadataOutputPathFromState(WorkUnitState state, int branchId) {
String outputDir = getUserSpecifiedOutputPathFromState(state, branchId);
// Just write out to the regular output path if a metadata specific path hasn't been provided
if (outputDir == null) {
String publisherOutputDir = getPublisherOutputDir(state, branchId).toString();
LOG.info("Missing metadata output directory path : " + ConfigurationKeys.DATA_PUBLISHER_METADATA_OUTPUT_DIR
+ " in the config; assuming outputPath " + publisherOutputDir);
return publisherOutputDir;
}
return outputDir;
}
/*
* Retrieve intermediate metadata (eg the metadata stored by each writer) for a given state and branch id.
*/
private String getIntermediateMetadataFromState(WorkUnitState state, int branchId) {
return state.getProp(
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_METADATA_KEY, this.numBranches, branchId));
}
/*
* Get the merged metadata given a workunit state and branch id. This method assumes
* all intermediate metadata has already been passed to the MetadataMerger.
*
* If metadata mergers are not configured, instead return the metadata from job config that was
* passed in by the user.
*/
private String getMergedMetadataForPartitionAndBranch(String partitionId, int branchId) {
String mergedMd = null;
MetadataMerger<String> mergerForBranch = metadataMergers.get(new PartitionIdentifier(partitionId, branchId));
if (mergerForBranch != null) {
mergedMd = mergerForBranch.getMergedMetadata();
if (mergedMd == null) {
LOG.warn("Metadata merger for branch {} returned null - bug in merger?", branchId);
}
}
return mergedMd;
}
private boolean shouldPublishWriterMetadataForBranch(int branchId) {
String keyName = ForkOperatorUtils
.getPropertyNameForBranch(ConfigurationKeys.DATA_PUBLISH_WRITER_METADATA_KEY, this.numBranches, branchId);
return this.getState().getPropAsBoolean(keyName, false);
}
/**
* Retrieve metadata from job state config
*/
private String getMetadataFromWorkUnitState(WorkUnitState workUnitState) {
return workUnitState.getProp(ConfigurationKeys.DATA_PUBLISHER_METADATA_STR);
}
/**
* The BaseDataPublisher relies on publishData() to create and clean-up the output directories, so data
* has to be published before the metadata can be.
*/
@Override
protected boolean shouldPublishMetadataFirst() {
return false;
}
}
| 2,939 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/publisher/DataPublisherKey.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.publisher;
import org.apache.gobblin.broker.iface.SharedResourceKey;
import org.apache.gobblin.configuration.State;
import lombok.Getter;
/**
* {@link SharedResourceKey} for requesting {@link DataPublisher}s from a
* {@link org.apache.gobblin.broker.iface.SharedResourceFactory
*/
@Getter
public class DataPublisherKey implements SharedResourceKey {
private final String publisherClassName;
private final State state;
public DataPublisherKey(String publisherClassName, State state) {
this.publisherClassName = publisherClassName;
this.state = state;
}
@Override
public String toConfigurationKey() {
return this.publisherClassName;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
DataPublisherKey that = (DataPublisherKey) o;
return publisherClassName == null ?
that.publisherClassName == null : publisherClassName.equals(that.publisherClassName);
}
@Override
public int hashCode() {
return publisherClassName != null ? publisherClassName.hashCode() : 0;
}
}
| 2,940 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/publisher/TimePartitionedStreamingDataPublisher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.publisher;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.fs.Path;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.event.lineage.LineageInfo;
import org.apache.gobblin.util.ForkOperatorUtils;
import org.apache.gobblin.util.ParallelRunner;
import org.apache.gobblin.util.WriterUtils;
@Slf4j
public class TimePartitionedStreamingDataPublisher extends TimePartitionedDataPublisher {
private final MetricContext metricContext;
public TimePartitionedStreamingDataPublisher(State state) throws IOException {
super(state);
this.metricContext = Instrumented.getMetricContext(state, TimePartitionedStreamingDataPublisher.class);
}
/**
* This method publishes task output data for the given {@link WorkUnitState}, but if there are output data of
* other tasks in the same folder, it may also publish those data.
*/
protected void publishMultiTaskData(WorkUnitState state, int branchId, Set<Path> writerOutputPathsMoved)
throws IOException {
state.setProp(ConfigurationKeys.DATA_PUBLISHER_DATASET_DIR, this.getPublisherOutputDir(state, branchId).toString());
super.publishMultiTaskData(state, branchId, writerOutputPathsMoved);
}
@Override
protected void publishData(WorkUnitState state, int branchId, boolean publishSingleTaskData,
Set<Path> writerOutputPathsMoved) throws IOException {
// The directory where the workUnitState wrote its output data.
Path writerOutputDir = WriterUtils.getWriterOutputDir(state, this.numBranches, branchId);
if (!this.writerFileSystemByBranches.get(branchId).exists(writerOutputDir)) {
log.warn(String.format("Branch %d of WorkUnit %s produced no data", branchId, state.getId()));
return;
}
// The directory where the final output directory for this job will be placed.
// It is a combination of DATA_PUBLISHER_FINAL_DIR and WRITER_FILE_PATH.
Path publisherOutputDir = getPublisherOutputDir(state, branchId);
if (!this.publisherFileSystemByBranches.get(branchId).exists(publisherOutputDir)) {
// Create the directory of the final output directory if it does not exist before we do the actual publish
// This is used to force the publisher save recordPublisherOutputDirs as the granularity to be parent of new file paths
// which will be used to do hive registration
WriterUtils.mkdirsWithRecursivePermissionWithRetry(this.publisherFileSystemByBranches.get(branchId),
publisherOutputDir, this.permissions.get(branchId), retryerConfig);
}
super.publishData(state, branchId, publishSingleTaskData, writerOutputPathsMoved);
}
@Override
public void publishData(Collection<? extends WorkUnitState> states) throws IOException {
publishDataImpl(states);
//Clean up state to remove filenames which have been committed from the state object
wusCleanUp(states);
}
public void publishDataImpl(Collection<? extends WorkUnitState> states) throws IOException {
// We need a Set to collect unique writer output paths as multiple tasks may belong to the same extract. Tasks that
// belong to the same Extract will by default have the same output directory
Set<Path> writerOutputPathsMoved = Sets.newHashSet();
for (WorkUnitState workUnitState : states) {
for (int branchId = 0; branchId < this.numBranches; branchId++) {
publishMultiTaskData(workUnitState, branchId, writerOutputPathsMoved);
}
}
//Wait for any submitted ParallelRunner threads to finish
for (ParallelRunner runner : this.parallelRunners.values()) {
runner.waitForTasks();
}
for (WorkUnitState workUnitState : states) {
// Upon successfully committing the data to the final output directory, set states
// of successful tasks to COMMITTED. leaving states of unsuccessful ones unchanged.
// This makes sense to the COMMIT_ON_PARTIAL_SUCCESS policy.
workUnitState.setWorkingState(WorkUnitState.WorkingState.COMMITTED);
}
ArrayList<WorkUnitState> statesWithLineage = Lists.newArrayList();
for (WorkUnitState state: states) {
if (LineageInfo.hasLineageInfo(state)) {
statesWithLineage.add(state);
}
}
long startTime = System.currentTimeMillis();
submitLineageEvents(statesWithLineage);
log.info("Emitting lineage events took {} millis", System.currentTimeMillis() - startTime);
}
private void submitLineageEvents(Collection<? extends WorkUnitState> states) {
for (Map.Entry<String, Collection<WorkUnitState>> entry : LineageInfo.aggregateByLineageEvent(states).entrySet()) {
LineageInfo.submitLineageEvent(entry.getKey(), entry.getValue(), metricContext);
}
}
/**
* A helper method to clean up {@link WorkUnitState}.
* @param states
*/
protected void wusCleanUp(Collection<? extends WorkUnitState> states) {
// use the first work unit state to get common properties
WorkUnitState wuState = states.stream().findFirst().get();
int numBranches = wuState.getPropAsInt("fork.branches", 1);
// clean up state kept for data publishing
for (WorkUnitState state : states) {
for (int branchId = 0; branchId < numBranches; branchId++) {
String outputFilePropName =
ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_FINAL_OUTPUT_FILE_PATHS, numBranches,
branchId);
if (state.contains(outputFilePropName)) {
state.removeProp(outputFilePropName);
}
LineageInfo.removeDestinationProp(state, branchId);
}
}
}
@VisibleForTesting
Set<Path> getPublishOutputDirs() {
return this.publisherOutputDirs;
}
} | 2,941 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/publisher/TimestampDataPublisher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.publisher;
import java.io.IOException;
import java.util.Set;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Preconditions;
import org.apache.gobblin.util.ParallelRunner;
import org.apache.gobblin.util.WriterUtils;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
/**
* Path expected from writer:
* {writerfinaldir}/{topicname}/{dbname_tablename_xxxxx}
*
* Publisher output path:
* {publisherfinaldir}/{dbname.tablename}/{currenttimestamp}
*/
public class TimestampDataPublisher extends BaseDataPublisher {
private final String timestamp;
public TimestampDataPublisher(State state) throws IOException {
super(state);
timestamp = String.valueOf(System.currentTimeMillis());
}
/**
* Make sure directory exists before running {@link BaseDataPublisher#publishData(WorkUnitState, int, boolean, Set)}
* so that tables will be moved one at a time rather than all at once
*/
@Override
protected void publishData(WorkUnitState state, int branchId, boolean publishSingleTaskData,
Set<Path> writerOutputPathsMoved) throws IOException {
Path publisherOutputDir = getPublisherOutputDir(state, branchId);
if (!this.publisherFileSystemByBranches.get(branchId).exists(publisherOutputDir)) {
WriterUtils.mkdirsWithRecursivePermissionWithRetry(this.publisherFileSystemByBranches.get(branchId),
publisherOutputDir, this.permissions.get(branchId), this.retryerConfig);
}
super.publishData(state, branchId, publishSingleTaskData, writerOutputPathsMoved);
}
/**
* Update destination path to put db and table name in format "dbname.tablename" using {@link #getDbTableName(String)}
* and include timestamp
*
* Input dst format: {finaldir}/{schemaName}
* Output dst format: {finaldir}/{dbname.tablename}/{currenttimestamp}
*/
@Override
protected void movePath(ParallelRunner parallelRunner, State state, Path src, Path dst, int branchId)
throws IOException {
String outputDir = dst.getParent().toString();
String schemaName = dst.getName();
Path newDst = new Path(new Path(outputDir, getDbTableName(schemaName)), timestamp);
if (!this.publisherFileSystemByBranches.get(branchId).exists(newDst)) {
WriterUtils.mkdirsWithRecursivePermissionWithRetry(this.publisherFileSystemByBranches.get(branchId),
newDst.getParent(), this.permissions.get(branchId), this.retryerConfig);
}
super.movePath(parallelRunner, state, src, newDst, branchId);
}
/**
* Translate schema name to "dbname.tablename" to use in path
*
* @param schemaName In format "dbname_tablename_xxxxx"
* @return db and table name in format "dbname.tablename"
*/
private String getDbTableName(String schemaName) {
Preconditions.checkArgument(schemaName.matches(".+_.+_.+"));
return schemaName.replaceFirst("_", ".").substring(0, schemaName.lastIndexOf('_'));
}
}
| 2,942 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/publisher/HiveRegistrationPublisher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.publisher;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletionService;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.fs.Path;
import com.codahale.metrics.Timer;
import com.google.common.base.Optional;
import com.google.common.base.Splitter;
import com.google.common.collect.Sets;
import com.google.common.io.Closer;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.hive.HiveRegProps;
import org.apache.gobblin.hive.HiveRegister;
import org.apache.gobblin.hive.metastore.HiveMetaStoreUtils;
import org.apache.gobblin.hive.policy.HiveRegistrationPolicy;
import org.apache.gobblin.hive.policy.HiveRegistrationPolicyBase;
import org.apache.gobblin.hive.spec.HiveSpec;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.util.ExecutorsUtils;
/**
* A {@link DataPublisher} that registers the already published data with Hive.
*
* <p>
* This publisher is not responsible for publishing data, and it relies on another publisher
* to document the published paths in property {@link ConfigurationKeys#PUBLISHER_DIRS}. Thus this publisher
* should generally be used as a job level data publisher, where the task level publisher should be a publisher
* that documents the published paths, such as {@link BaseDataPublisher}.
* </p>
*
* @author Ziyang Liu
*/
@Slf4j
@Alias("hivereg")
public class HiveRegistrationPublisher extends DataPublisher {
private static final String DATA_PUBLISH_TIME = HiveRegistrationPublisher.class.getName() + ".lastDataPublishTime";
private static final Splitter LIST_SPLITTER_COMMA = Splitter.on(",").trimResults().omitEmptyStrings();
public static final String HIVE_SPEC_COMPUTATION_TIMER = "hiveSpecComputationTimer";
private static final String PATH_DEDUPE_ENABLED = "hive.registration.path.dedupe.enabled";
private static final boolean DEFAULT_PATH_DEDUPE_ENABLED = true;
private final Closer closer = Closer.create();
protected final HiveRegister hiveRegister;
protected final ExecutorService hivePolicyExecutor;
protected final MetricContext metricContext;
/**
* The configuration to determine if path deduplication should be enabled during Hive Registration process.
* Recall that HiveRegistration iterate thru. each topics' data folder and obtain schema from newest partition,
* it might be the case that a table corresponding to a registered path has a schema changed.
* In this case, path-deduplication won't work.
*
* e.g. In streaming mode, there could be cases that files(e.g. avro) under single topic folder carry different schema.
*/
protected boolean isPathDedupeEnabled;
/**
* Make the deduplication of path to be registered in the Publisher level,
* So that each invocation of {@link #publishData(Collection)} contribute paths registered to this set.
*/
private static Set<String> pathsToRegisterFromSingleState = Sets.newHashSet();
/**
* This collection represents all specs that were sent for Hive Registration
* This is collected right before calling {@link HiveRegister#register(HiveSpec)}
*/
protected static final Collection<HiveSpec> allRegisteredPartitions = new ArrayList<>();
/**
* @param state This is a Job State
*/
public HiveRegistrationPublisher(State state) {
super(state);
this.hiveRegister = this.closer.register(HiveRegister.get(state));
this.hivePolicyExecutor = ExecutorsUtils.loggingDecorator(Executors
.newFixedThreadPool(new HiveRegProps(state).getNumThreads(),
ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of("HivePolicyExecutor-%d"))));
this.metricContext = Instrumented.getMetricContext(state, HiveRegistrationPublisher.class);
isPathDedupeEnabled = state.getPropAsBoolean(PATH_DEDUPE_ENABLED, this.DEFAULT_PATH_DEDUPE_ENABLED);
}
@Override
public void close()
throws IOException {
try {
ExecutorsUtils.shutdownExecutorService(this.hivePolicyExecutor, Optional.of(log));
} finally {
this.closer.close();
}
}
protected int computeSpecs(Collection<? extends WorkUnitState> states,
CompletionService<Collection<HiveSpec>> completionService) {
// Each state in states is task-level State, while superState is the Job-level State.
// Using both State objects to distinguish each HiveRegistrationPolicy so that
// they can carry task-level information to pass into Hive Partition and its corresponding Hive Table.
// Here all runtime task-level props are injected into superstate which installed in each Policy Object.
// runtime.props are comma-separated props collected in runtime.
int toRegisterPathCount = 0;
for (State state : states) {
State taskSpecificState = state;
if (state.contains(ConfigurationKeys.PUBLISHER_DIRS)) {
// Upstream data attribute is specified, need to inject these info into superState as runtimeTableProps.
if (this.hiveRegister.getProps().getUpstreamDataAttrName().isPresent()) {
for (String attrName : LIST_SPLITTER_COMMA
.splitToList(this.hiveRegister.getProps().getUpstreamDataAttrName().get())) {
if (state.contains(attrName)) {
taskSpecificState
.appendToListProp(HiveMetaStoreUtils.RUNTIME_PROPS, attrName + ":" + state.getProp(attrName));
}
}
}
final HiveRegistrationPolicy policy = HiveRegistrationPolicyBase.getPolicy(taskSpecificState);
for (final String path : state.getPropAsList(ConfigurationKeys.PUBLISHER_DIRS)) {
if (isPathDedupeEnabled) {
if (pathsToRegisterFromSingleState.contains(path)) {
continue;
} else {
pathsToRegisterFromSingleState.add(path);
}
}
toRegisterPathCount += 1;
completionService.submit(new Callable<Collection<HiveSpec>>() {
@Override
public Collection<HiveSpec> call()
throws Exception {
try (Timer.Context context = metricContext.timer(HIVE_SPEC_COMPUTATION_TIMER).time()) {
return policy.getHiveSpecs(new Path(path));
}
}
});
}
}
}
return toRegisterPathCount;
}
@Deprecated
@Override
public void initialize()
throws IOException {
}
/**
* @param states This is a collection of TaskState.
*/
@Override
public void publishData(Collection<? extends WorkUnitState> states)
throws IOException {
CompletionService<Collection<HiveSpec>> completionService =
new ExecutorCompletionService<>(this.hivePolicyExecutor);
int toRegisterPathCount = computeSpecs(states, completionService);
for (int i = 0; i < toRegisterPathCount; i++) {
try {
for (HiveSpec spec : completionService.take().get()) {
allRegisteredPartitions.add(spec);
this.hiveRegister.register(spec);
}
} catch (InterruptedException | ExecutionException e) {
log.info("Failed to generate HiveSpec", e);
throw new IOException(e);
}
}
log.info("Finished registering all HiveSpecs");
}
@Override
public void publishMetadata(Collection<? extends WorkUnitState> states)
throws IOException {
// Nothing to do
}
private static void addRuntimeHiveRegistrationProperties(State state) {
// Use seconds instead of milliseconds to be consistent with other times stored in hive
state.appendToListProp(HiveRegProps.HIVE_TABLE_PARTITION_PROPS, String.format("%s:%d", DATA_PUBLISH_TIME,
TimeUnit.SECONDS.convert(System.currentTimeMillis(), TimeUnit.MILLISECONDS)));
}
}
| 2,943 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/publisher/BaseDataPublisherWithHiveRegistration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.publisher;
import java.io.IOException;
import java.util.Collection;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
/**
* An extension of {@link BaseDataPublisher} which performs Hive registration after publishing data.
*
* <p>
* This publisher should generally be used as the job level data publisher, since doing Hive registration
* in tasks may need to create many Hive metastore connections if the number of tasks is large. To publish
* data in tasks and do Hive registration in the driver, one should use
* {@link BaseDataPublisher} as the task level publisher and
* {@link HiveRegistrationPublisher} as the job level publisher.
* </p>
*
* @author Ziyang Liu
*/
public class BaseDataPublisherWithHiveRegistration extends BaseDataPublisher {
protected final HiveRegistrationPublisher hivePublisher;
public BaseDataPublisherWithHiveRegistration(State state) throws IOException {
super(state);
this.hivePublisher = this.closer.register(new HiveRegistrationPublisher(state));
}
@Override
public void publish(Collection<? extends WorkUnitState> states) throws IOException {
super.publish(states);
this.hivePublisher.publish(states);
}
}
| 2,944 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/publisher/TaskPublisher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.publisher;
import org.apache.gobblin.qualitychecker.task.TaskLevelPolicyCheckResults;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.qualitychecker.task.TaskLevelPolicy;
public class TaskPublisher {
private final TaskLevelPolicyCheckResults results;
private static final Logger LOG = LoggerFactory.getLogger(TaskPublisher.class);
public enum PublisherState {
SUCCESS, // Data and metadata are successfully published
CLEANUP_FAIL, // Data and metadata were published, but cleanup failed
POLICY_TESTS_FAIL, // All tests didn't pass, no data committed
COMPONENTS_NOT_FINISHED // All components did not complete, no data committed
}
public TaskPublisher(WorkUnitState workUnitState, TaskLevelPolicyCheckResults results) throws Exception {
this.results = results;
}
public PublisherState canPublish() throws Exception {
if (allComponentsFinished()) {
LOG.info("All components finished successfully, checking quality tests");
if (passedAllTests()) {
LOG.info("All required test passed for this task passed.");
if (cleanup()) {
LOG.info("Cleanup for task publisher executed successfully.");
return PublisherState.SUCCESS;
}
return PublisherState.CLEANUP_FAIL;
}
return PublisherState.POLICY_TESTS_FAIL;
}
return PublisherState.COMPONENTS_NOT_FINISHED;
}
/**
* Returns true if all tests from the PolicyChecker pass, false otherwise
*/
public boolean passedAllTests() {
for (Map.Entry<TaskLevelPolicy.Result, TaskLevelPolicy.Type> entry : this.results.getPolicyResults().entrySet()) {
if (entry.getKey().equals(TaskLevelPolicy.Result.FAILED) && entry.getValue().equals(TaskLevelPolicy.Type.FAIL)) {
return false;
}
}
return true;
}
/**
* Returns true if all the components finished, false otherwise
*/
public boolean allComponentsFinished() {
// Have to parse some information from TaskState
return true;
}
/**
* Cleans up any tmp folders used by the Task
* Return true if successful, false otherwise
*/
public boolean cleanup() throws Exception {
return true;
}
}
| 2,945 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/publisher/CommitSequencePublisher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.publisher;
import java.io.IOException;
import java.util.Collection;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Optional;
import com.google.common.collect.Iterables;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.commit.CommitSequence;
import org.apache.gobblin.commit.FsRenameCommitStep;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.util.ParallelRunner;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
/**
* An implementation of {@link DataPublisher} for exactly-once delivery.
*
* <p>
* This publisher does not actually publish data, instead it constructs a {@link CommitSequence.Builder}.
* The builder is used by Gobblin runtime to build a {@link CommitSequence}, which is then persisted
* and executed.
* </p>
*
* @author Ziyang Liu
*/
@Alpha
@Slf4j
public class CommitSequencePublisher extends BaseDataPublisher {
@Getter
protected Optional<CommitSequence.Builder> commitSequenceBuilder = Optional.of(new CommitSequence.Builder());
public CommitSequencePublisher(State state) throws IOException {
super(state);
}
@Override
public void publish(Collection<? extends WorkUnitState> states) throws IOException {
super.publish(states);
if (!states.isEmpty()) {
String jobName = Iterables.get(states, 0).getProp(ConfigurationKeys.JOB_NAME_KEY);
String datasetUrn =
Iterables.get(states, 0).getProp(ConfigurationKeys.DATASET_URN_KEY, ConfigurationKeys.DEFAULT_DATASET_URN);
this.commitSequenceBuilder.get().withJobName(jobName).withDatasetUrn(datasetUrn);
} else {
log.warn("No workunitstate to publish");
this.commitSequenceBuilder = Optional.<CommitSequence.Builder> absent();
}
}
/**
* This method does not actually move data, but it creates an {@link FsRenameCommitStep}.
*/
@Override
protected void movePath(ParallelRunner parallelRunner, State state, Path src, Path dst, int branchId)
throws IOException {
log.info(String.format("Creating CommitStep for moving %s to %s", src, dst));
boolean overwrite = state.getPropAsBoolean(ConfigurationKeys.DATA_PUBLISHER_OVERWRITE_ENABLED, false);
FsRenameCommitStep.Builder<?> builder = this.commitSequenceBuilder.get().beginStep(FsRenameCommitStep.Builder.class)
.withProps(this.state).from(src).withSrcFs(this.writerFileSystemByBranches.get(branchId)).to(dst)
.withDstFs(this.publisherFileSystemByBranches.get(branchId));
if (overwrite) {
builder.overwrite();
}
builder.endStep();
}
}
| 2,946 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/publisher/TaskPublisherBuilderFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.publisher;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.qualitychecker.task.TaskLevelPolicyCheckResults;
public class TaskPublisherBuilderFactory {
public static TaskPublisherBuilder newTaskPublisherBuilder(WorkUnitState workUnitState,
TaskLevelPolicyCheckResults results) {
return TaskPublisherBuilder.newBuilder(workUnitState, results);
}
}
| 2,947 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/publisher/TimePartitionedDataPublisher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.publisher;
import java.io.IOException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.util.FileListUtils;
import org.apache.gobblin.util.ParallelRunner;
import org.apache.gobblin.util.WriterUtils;
/**
* For time partition jobs, writer output directory is
* $GOBBLIN_WORK_DIR/task-output/{extractId}/{tableName}/{partitionPath},
* where partition path is the time bucket, e.g., 2015/04/08/15.
*
* Publisher output directory is $GOBBLIN_WORK_DIR/job-output/{tableName}/{partitionPath}
*/
public class TimePartitionedDataPublisher extends BaseDataPublisher {
public TimePartitionedDataPublisher(State state) throws IOException {
super(state);
}
/**
* This method needs to be overridden for TimePartitionedDataPublisher, since the output folder structure
* contains timestamp, we have to move the files recursively.
*
* For example, move {writerOutput}/2015/04/08/15/output.avro to {publisherOutput}/2015/04/08/15/output.avro
*/
@Override
protected void addWriterOutputToExistingDir(Path writerOutput, Path publisherOutput, WorkUnitState workUnitState,
int branchId, ParallelRunner parallelRunner) throws IOException {
for (FileStatus status : FileListUtils.listFilesRecursively(this.writerFileSystemByBranches.get(branchId),
writerOutput)) {
String filePathStr = status.getPath().toString();
String pathSuffix =
filePathStr.substring(filePathStr.indexOf(writerOutput.toString()) + writerOutput.toString().length() + 1);
Path outputPath = new Path(publisherOutput, pathSuffix);
WriterUtils.mkdirsWithRecursivePermissionWithRetry(this.publisherFileSystemByBranches.get(branchId), outputPath.getParent(),
this.permissions.get(branchId), this.retryerConfig);
movePath(parallelRunner, workUnitState, status.getPath(), outputPath, branchId);
}
}
}
| 2,948 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/RegexBasedPartitionedRetriever.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.joda.time.DateTime;
import org.joda.time.Duration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.source.extractor.filebased.FileBasedHelperException;
import org.apache.gobblin.source.extractor.hadoop.HadoopFsHelper;
import org.apache.gobblin.util.PathUtils;
public class RegexBasedPartitionedRetriever implements PartitionAwareFileRetriever {
private static final Logger LOGGER = LoggerFactory.getLogger(RegexBasedPartitionedRetriever.class);
private Pattern pattern;
private HadoopFsHelper helper;
private Path sourceDir;
private final String expectedExtension;
private Duration leadTime;
private boolean schemaInSourceDir;
private String schemaFile;
public RegexBasedPartitionedRetriever(String expectedExtension) {
this.expectedExtension = expectedExtension;
}
@Override
public void init(SourceState state) {
String regexPattern = state.getProp(PartitionedFileSourceBase.DATE_PARTITIONED_SOURCE_PARTITION_PATTERN);
Preconditions.checkNotNull(regexPattern, "Must specify a regex pattern in " +
PartitionedFileSourceBase.DATE_PARTITIONED_SOURCE_PARTITION_PATTERN
);
this.leadTime = PartitionAwareFileRetrieverUtils.getLeadTimeDurationFromConfig(state);
this.pattern = Pattern.compile(regexPattern);
this.helper = new HadoopFsHelper(state);
this.sourceDir = new Path(state.getProp(ConfigurationKeys.SOURCE_FILEBASED_DATA_DIRECTORY));
this.schemaInSourceDir = state.getPropAsBoolean(ConfigurationKeys.SCHEMA_IN_SOURCE_DIR,
ConfigurationKeys.DEFAULT_SCHEMA_IN_SOURCE_DIR);
this.schemaFile = this.schemaInSourceDir ? state.getProp(ConfigurationKeys.SCHEMA_FILENAME,
ConfigurationKeys.DEFAULT_SCHEMA_FILENAME) : "";
}
@Override
public long getWatermarkFromString(String watermark) {
// Subclasses can override this with DateTimeFormatter and/or specify via config
return Long.parseLong(watermark);
}
protected String extractWatermarkFromDirectory(String directoryName) {
Matcher matcher = pattern.matcher(directoryName);
if (!matcher.matches() || matcher.groupCount() < 1) {
throw new IllegalArgumentException(directoryName + " does not match regex " + pattern.toString());
}
return matcher.group(1);
}
@Override
public long getWatermarkIncrementMs() {
return 1;
}
@Override
public List<FileInfo> getFilesToProcess(long minWatermark, int maxFilesToReturn)
throws IOException {
// This implementation assumes snapshots are always in the root directory and the number of them
// remains relatively small
long maxAllowedWatermark = new DateTime().minus(leadTime).getMillis();
try {
this.helper.connect();
FileSystem fs = helper.getFileSystem();
List<FileInfo> filesToProcess = new ArrayList<>();
List<FileInfo> outerDirectories = getOuterDirectories(fs, minWatermark, maxAllowedWatermark);
for (FileInfo outerDirectory: outerDirectories) {
FileStatus[] files = fs.listStatus(
new Path(outerDirectory.getFilePath()),
getFileFilter()
);
for (FileStatus file: files) {
filesToProcess.add(new FileInfo(
file.getPath().toString(),
file.getLen(),
outerDirectory.getWatermarkMsSinceEpoch(),
outerDirectory.getPartitionName()
));
}
if (filesToProcess.size() > maxFilesToReturn) {
break;
}
}
return filesToProcess;
} catch (FileBasedHelperException e) {
throw new IOException("Error initializing Hadoop connection", e);
}
}
private List<FileInfo> getOuterDirectories(FileSystem fs, long minWatermark, long maxAllowedWatermark) throws IOException {
LOGGER.debug("Listing contents of {}", sourceDir);
FileStatus[] fileStatus = fs.listStatus(sourceDir);
List<FileInfo> outerDirectories = new ArrayList<>();
for (FileStatus file: fileStatus) {
if (!file.isDirectory()) {
LOGGER.debug("Skipping non-directory {}", file.getPath().toUri());
continue;
}
try {
long watermark = getWatermarkFromString(
extractWatermarkFromDirectory(file.getPath().getName())
);
if (watermark > minWatermark && watermark < maxAllowedWatermark) {
LOGGER.info("Processing directory {} with watermark {}",
file.getPath(),
watermark);
outerDirectories.add(new FileInfo(
file.getPath().toString(),
0,
watermark, PathUtils.relativizePath(file.getPath(), sourceDir).toString()
));
} else {
LOGGER.info("Ignoring directory {} - watermark {} is not between minWatermark {} and (now-leadTime) {}",
file.getPath(), watermark, minWatermark, maxAllowedWatermark);
}
} catch (IllegalArgumentException e) {
LOGGER.info("Directory {} ({}) does not match pattern {}; skipping", file.getPath().getName(),
file.getPath(),
this.pattern.toString());
}
}
Collections.sort(outerDirectories);
return outerDirectories;
}
/**
* This method is to filter out files that don't need to be processed by extension
* @return the pathFilter
*/
private PathFilter getFileFilter() {
final String extension = (this.expectedExtension.startsWith(".")) ?
this.expectedExtension :
"." + this.expectedExtension;
return new PathFilter() {
@Override
public boolean accept(Path path) {
return path.getName().endsWith(extension) &&
!(schemaInSourceDir && path.getName().equals(schemaFile)) ;
}
};
}
}
| 2,949 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/PartitionAwareFileRetriever.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source;
import java.io.IOException;
import java.util.List;
import java.util.Objects;
import org.apache.gobblin.configuration.SourceState;
/**
* Interface for an object that, given a sourcePath and a low watermark, can return a set of files
* to process that are newer than the low watermark.
*/
public interface PartitionAwareFileRetriever {
/**
* Initialize the retriever with configuration parameters
*/
void init(SourceState state);
/*
* Retrieve a watermark (in milliseconds since epoch) from a String representation.
* Users can set a low watermark in the `date.partitioned.source.min.watermark.value` configuration setting,
* and this function translates that into an internal watermark.
*
* Actual format is up to the developer but generally this is some sort of DateTimeFormatter (eg translate 2000-01-01
* to the right value).
*/
long getWatermarkFromString(String watermark);
/*
* Amount of milliseconds to increment the watermark after a batch of files has been processed. For example,
* a daily-based partitioner may want to increment the watermark by 24 hours to see the next batch of files
* in a dataset.
*/
long getWatermarkIncrementMs();
/**
* Return a list of files to process that have a watermark later than minWatermark. Generally, a FileRetriever should
* find each valid partition after minWatermark, sorted by ascending time.
*
* For each partition:
* 1. Add all files in the partition
* 2. If the # of files in the return list is now greater than maxFilesToReturn, return immediately
* 3. Else continue to next partition until there are none left
*
* maxFilesToReturn is a soft cap - all files in a partition should be returned by getFilesToProcess().
*/
List<FileInfo> getFilesToProcess(long minWatermark, int maxFilesToReturn) throws IOException;
public static class FileInfo implements Comparable<FileInfo> {
private final String filePath;
private final long fileSize;
private final long watermarkMsSinceEpoch;
private final String partitionName;
public FileInfo(String filePath, long fileSize, long watermarkMsSinceEpoch, String partitionName) {
this.fileSize = fileSize;
this.filePath = filePath;
this.watermarkMsSinceEpoch = watermarkMsSinceEpoch;
this.partitionName = partitionName;
}
public FileInfo(String filePath, long fileSize, long watermarkMsSinceEpoch) {
this(filePath, fileSize, watermarkMsSinceEpoch, Long.toString(watermarkMsSinceEpoch));
}
public String getFilePath() {
return filePath;
}
public long getWatermarkMsSinceEpoch() {
return watermarkMsSinceEpoch;
}
public long getFileSize() {
return fileSize;
}
public String getPartitionName() {
return partitionName;
}
@Override
public String toString() {
return "FileInfo{" + "filePath='" + filePath + '\'' + ", watermarkMsSinceEpoch=" + watermarkMsSinceEpoch +
", partitionName=" + partitionName + '}';
}
@Override
public int compareTo(FileInfo o) {
if (watermarkMsSinceEpoch < o.watermarkMsSinceEpoch) {
return -1;
} else if (watermarkMsSinceEpoch > o.watermarkMsSinceEpoch) {
return 1;
} else {
int ret = filePath.compareTo(o.filePath);
return ret == 0 ? partitionName.compareTo(o.partitionName) : ret;
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
FileInfo fileInfo = (FileInfo) o;
return fileSize == fileInfo.fileSize && watermarkMsSinceEpoch == fileInfo.watermarkMsSinceEpoch && Objects
.equals(filePath, fileInfo.filePath) && Objects.equals(partitionName, fileInfo.partitionName);
}
@Override
public int hashCode() {
return Objects.hash(filePath, fileSize, watermarkMsSinceEpoch, partitionName);
}
}
}
| 2,950 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/DatePartitionedAvroFileSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.DatePartitionedAvroFileExtractor;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.writer.partitioner.TimeBasedAvroWriterPartitioner;
/**
* Implementation of {@link org.apache.gobblin.source.Source} that reads over date-partitioned Avro data.
* This source can be regarded as the reader equivalent of {@link TimeBasedAvroWriterPartitioner}.
*
* <p>
* The class will iterate through all the data folders given by the base directory
* {@link ConfigurationKeys#SOURCE_FILEBASED_DATA_DIRECTORY} and the partitioning type
* {@link #DATE_PARTITIONED_SOURCE_PARTITION_PATTERN} or {@link #DATE_PARTITIONED_SOURCE_PARTITION_GRANULARITY}
*
* <p>
* For example, if the base directory is set to /my/data/ and daily partitioning is used, then it is assumed that
* /my/data/daily/[year]/[month]/[day] is present. It will iterate through all the data under these folders starting
* from the date specified by {@link #DATE_PARTITIONED_SOURCE_MIN_WATERMARK_VALUE} until either
* {@link #DATE_PARTITIONED_SOURCE_MAX_FILES_PER_JOB} files have been processed, or until there is no more data
* to process. For example, if {@link #DATE_PARTITIONED_SOURCE_MIN_WATERMARK_VALUE} is set to 2015/01/01, then the job
* will read from the folder /my/data/daily/2015/01/02/, /my/data/daily/2015/01/03/, /my/data/2015/01/04/ etc.
* When partitions contain pre/suffixes in the form of /my/data/prefix/[year]/[month]/[day]/suffix, one can refer to
* them via the {@link #DATE_PARTITIONED_SOURCE_PARTITION_PREFIX} and {@link #DATE_PARTITIONED_SOURCE_PARTITION_SUFFIX}
* properties.
* </p>
*
* </p>
*
* The class will only process data in Avro format.
*/
public class DatePartitionedAvroFileSource extends PartitionedFileSourceBase<Schema, GenericRecord> {
public DatePartitionedAvroFileSource() {
super(new DatePartitionedNestedRetriever(".avro"));
}
@Override
public Extractor<Schema, GenericRecord> getExtractor(WorkUnitState state)
throws IOException {
return new DatePartitionedAvroFileExtractor(state);
}
} | 2,951 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/PartitionAwareFileRetrieverUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source;
import org.joda.time.Duration;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.DatePartitionType;
import static org.apache.gobblin.source.PartitionedFileSourceBase.DATE_PARTITIONED_SOURCE_PARTITION_LEAD_TIME;
import static org.apache.gobblin.source.PartitionedFileSourceBase.DATE_PARTITIONED_SOURCE_PARTITION_LEAD_TIME_GRANULARITY;
import static org.apache.gobblin.source.PartitionedFileSourceBase.DEFAULT_DATE_PARTITIONED_SOURCE_PARTITION_LEAD_TIME_GRANULARITY;
import static org.apache.gobblin.source.PartitionedFileSourceBase.DEFAULT_PARTITIONED_SOURCE_PARTITION_LEAD_TIME;
/**
* Utility functions for parsing configuration parameters commonly used by {@link PartitionAwareFileRetriever}
* objects.
*/
public class PartitionAwareFileRetrieverUtils {
/**
* Retrieve the lead time duration from the LEAD_TIME and LEAD_TIME granularity config settings.
*/
public static Duration getLeadTimeDurationFromConfig(State state) {
String leadTimeProp = state.getProp(DATE_PARTITIONED_SOURCE_PARTITION_LEAD_TIME);
if (leadTimeProp == null || leadTimeProp.length() == 0) {
return DEFAULT_PARTITIONED_SOURCE_PARTITION_LEAD_TIME;
}
int leadTime = Integer.parseInt(leadTimeProp);
DatePartitionType leadTimeGranularity = DEFAULT_DATE_PARTITIONED_SOURCE_PARTITION_LEAD_TIME_GRANULARITY;
String leadTimeGranularityProp = state.getProp(DATE_PARTITIONED_SOURCE_PARTITION_LEAD_TIME_GRANULARITY);
if (leadTimeGranularityProp != null) {
leadTimeGranularity = DatePartitionType.valueOf(leadTimeGranularityProp);
}
return new Duration(leadTime * leadTimeGranularity.getUnitMilliseconds());
}
}
| 2,952 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/RegexPartitionedAvroFileSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.hadoop.AvroFileExtractor;
public class RegexPartitionedAvroFileSource extends PartitionedFileSourceBase<Schema, GenericRecord> {
public RegexPartitionedAvroFileSource() {
super(new RegexBasedPartitionedRetriever(".avro"));
}
@Override
public Extractor<Schema, GenericRecord> getExtractor(WorkUnitState state)
throws IOException {
return new AvroFileExtractor(state);
}
}
| 2,953 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/PartitionedFileSourceBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source;
import com.google.common.base.Throwables;
import java.io.IOException;
import java.net.URI;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.dataset.DatasetConstants;
import org.apache.gobblin.dataset.DatasetDescriptor;
import org.apache.gobblin.dataset.PartitionDescriptor;
import org.apache.gobblin.metrics.event.lineage.LineageInfo;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.filebased.FileBasedHelperException;
import org.apache.gobblin.source.extractor.filebased.FileBasedSource;
import org.apache.gobblin.source.extractor.hadoop.AvroFsHelper;
import org.apache.gobblin.source.workunit.Extract;
import org.apache.gobblin.source.workunit.Extract.TableType;
import org.apache.gobblin.source.workunit.MultiWorkUnitWeightedQueue;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.DatePartitionType;
import org.apache.gobblin.writer.partitioner.TimeBasedAvroWriterPartitioner;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.joda.time.Duration;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implementation of {@link Source} that reads over date-partitioned files.
* This source can be regarded as the reader equivalent of {@link TimeBasedAvroWriterPartitioner}.
*
* <p>
* The class will iterate through all the data folders given by the base directory
* {@link ConfigurationKeys#SOURCE_FILEBASED_DATA_DIRECTORY}. It relies on a {@link PartitionAwareFileRetriever}
* to actually retrieve the files in a given partition.
*/
@Slf4j
public abstract class PartitionedFileSourceBase<SCHEMA, DATA> extends FileBasedSource<SCHEMA, DATA> {
// Configuration parameters
public static final String DATE_PARTITIONED_SOURCE_PREFIX = "date.partitioned.source";
public static final String DATE_PARTITIONED_SOURCE_PARTITION_PREFIX =
DATE_PARTITIONED_SOURCE_PREFIX + ".partition.prefix";
public static final String DATE_PARTITIONED_SOURCE_PARTITION_SUFFIX =
DATE_PARTITIONED_SOURCE_PREFIX + ".partition.suffix";
public static final String DATE_PARTITIONED_SOURCE_PARTITION_PATTERN =
DATE_PARTITIONED_SOURCE_PREFIX + ".partition.pattern";
public static final String DATE_PARTITIONED_SOURCE_PARTITION_GRANULARITY =
DATE_PARTITIONED_SOURCE_PREFIX + ".partition.granularity";
public static final DatePartitionType DEFAULT_DATE_PARTITIONED_SOURCE_PARTITION_GRANULARITY =
DatePartitionType.HOUR;
/**
* The partition 'lead time' allows a job to ignore a date partition for a given amount of time.
* For example, if the lead_time is set to 1 day and the job is run on Jul 1 2017 at 2am, the job
* will only process partitions from Jun 30 2017 at 2am and before.
*/
public static final String DATE_PARTITIONED_SOURCE_PARTITION_LEAD_TIME =
DATE_PARTITIONED_SOURCE_PREFIX + ".partition.lead_time.size";
public static final Duration DEFAULT_PARTITIONED_SOURCE_PARTITION_LEAD_TIME = new Duration(0);
public static final String DATE_PARTITIONED_SOURCE_PARTITION_LEAD_TIME_GRANULARITY =
DATE_PARTITIONED_SOURCE_PREFIX + ".partition.lead_time.granularity";
public static final DatePartitionType DEFAULT_DATE_PARTITIONED_SOURCE_PARTITION_LEAD_TIME_GRANULARITY =
DEFAULT_DATE_PARTITIONED_SOURCE_PARTITION_GRANULARITY;
/**
* A String of the format defined by {@link #DATE_PARTITIONED_SOURCE_PARTITION_PATTERN} or
* {@link #DATE_PARTITIONED_SOURCE_PARTITION_GRANULARITY}. For example for yyyy/MM/dd the
* 2015/01/01 corresponds to January 1st, 2015. The job will start reading data from this point in time.
* If this parameter is not specified the job will start reading data from
* the beginning of Unix time.
*/
public static final String DATE_PARTITIONED_SOURCE_MIN_WATERMARK_VALUE =
DATE_PARTITIONED_SOURCE_PREFIX + ".min.watermark.value";
/**
* The maximum number of files that this job should process.
*/
private static final String DATE_PARTITIONED_SOURCE_MAX_FILES_PER_JOB =
DATE_PARTITIONED_SOURCE_PREFIX + ".max.files.per.job";
/**
* The maximum number of MultiWorkUnits to create for this job. This number also corresponds to the number of
* tasks (or if running on Hadoop, the number of map tasks) that will be launched in this job.
*/
private static final String DATE_PARTITIONED_SOURCE_MAX_WORKUNITS_PER_JOB =
DATE_PARTITIONED_SOURCE_PREFIX + ".max.workunits.per.job";
// Default configuration parameter values
/**
* Default value for {@link #DATE_PARTITIONED_SOURCE_MAX_FILES_PER_JOB}
*/
private static final int DEFAULT_DATE_PARTITIONED_SOURCE_MAX_FILES_PER_JOB = 2000;
/**
* Default value for {@link #DATE_PARTITIONED_SOURCE_MAX_WORKUNITS_PER_JOB}
*/
private static final int DEFAULT_DATE_PARTITIONED_SOURCE_MAX_WORKUNITS_PER_JOB = 500;
/**
* Controls the default value for {@link #DATE_PARTITIONED_SOURCE_MIN_WATERMARK_VALUE}. The default value will be set
* to the epoch.
*/
private static final int DEFAULT_DATE_PARTITIONED_SOURCE_MIN_WATERMARK_VALUE = 0;
private static final Logger LOG = LoggerFactory.getLogger(PartitionedFileSourceBase.class);
// Instance variables
private SourceState sourceState;
private FileSystem fs;
private long lowWaterMark;
private int maxFilesPerJob;
private int maxWorkUnitsPerJob;
private int fileCount;
private TableType tableType;
private Path sourceDir;
private final PartitionAwareFileRetriever retriever;
protected PartitionedFileSourceBase(PartitionAwareFileRetriever retriever) {
this.retriever = retriever;
}
/**
* Gobblin calls the {@link Source#getWorkunits(SourceState)} method after creating a {@link Source} object with a
* blank constructor, so any custom initialization of the object needs to be done here.
*/
protected void init(SourceState state) {
retriever.init(state);
try {
initFileSystemHelper(state);
} catch (FileBasedHelperException e) {
Throwables.propagate(e);
}
AvroFsHelper fsHelper = (AvroFsHelper) this.fsHelper;
this.fs = fsHelper.getFileSystem();
this.sourceState = state;
this.lowWaterMark =
getLowWaterMark(state.getPreviousWorkUnitStates(), state.getProp(DATE_PARTITIONED_SOURCE_MIN_WATERMARK_VALUE,
String.valueOf(DEFAULT_DATE_PARTITIONED_SOURCE_MIN_WATERMARK_VALUE)));
this.maxFilesPerJob = state.getPropAsInt(DATE_PARTITIONED_SOURCE_MAX_FILES_PER_JOB,
DEFAULT_DATE_PARTITIONED_SOURCE_MAX_FILES_PER_JOB);
this.maxWorkUnitsPerJob = state.getPropAsInt(DATE_PARTITIONED_SOURCE_MAX_WORKUNITS_PER_JOB,
DEFAULT_DATE_PARTITIONED_SOURCE_MAX_WORKUNITS_PER_JOB);
this.tableType = TableType.valueOf(state.getProp(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY).toUpperCase());
this.fileCount = 0;
this.sourceDir = new Path(state.getProp(ConfigurationKeys.SOURCE_FILEBASED_DATA_DIRECTORY));
}
@Override
public void initFileSystemHelper(State state) throws FileBasedHelperException {
this.fsHelper = new AvroFsHelper(state);
this.fsHelper.connect();
}
@Override
public abstract Extractor<SCHEMA, DATA> getExtractor(WorkUnitState state) throws IOException;
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
lineageInfo = LineageInfo.getLineageInfo(state.getBroker());
DateTimeFormatter formatter = DateTimeFormat.fullDateTime();
// Initialize all instance variables for this object
init(state);
LOG.info("Will pull data from " + formatter.print(this.lowWaterMark) + " until " + this.maxFilesPerJob
+ " files have been processed, or until there is no more data to consume");
LOG.info("Creating workunits");
// Weighted MultiWorkUnitWeightedQueue, the job will add new WorkUnits to the queue along with a weight for each
// WorkUnit. The queue will take care of balancing the WorkUnits amongst a set number of MultiWorkUnits
MultiWorkUnitWeightedQueue multiWorkUnitWeightedQueue = new MultiWorkUnitWeightedQueue(this.maxWorkUnitsPerJob);
// Add failed work units from the previous execution
addFailedWorkUnits(getPreviousWorkUnitsForRetry(this.sourceState), multiWorkUnitWeightedQueue);
// If the file count has not exceeded maxFilesPerJob then start adding new WorkUnits to for this job
if (this.fileCount >= this.maxFilesPerJob) {
LOG.info(
"The number of work units from previous job has already reached the upper limit, no more workunits will be made");
return multiWorkUnitWeightedQueue.getQueueAsList();
}
addNewWorkUnits(multiWorkUnitWeightedQueue);
List<WorkUnit> workUnits = multiWorkUnitWeightedQueue.getQueueAsList();
addLineageSourceInfo(workUnits, state);
return workUnits;
}
@Override
protected void addLineageSourceInfo(WorkUnit workUnit, State state) {
if (!lineageInfo.isPresent()) {
log.info("Lineage is not enabled");
return;
}
String platform = state.getProp(ConfigurationKeys.SOURCE_FILEBASED_PLATFORM, DatasetConstants.PLATFORM_HDFS);
Path dataDir = new Path(state.getProp(ConfigurationKeys.SOURCE_FILEBASED_DATA_DIRECTORY));
String dataset = Path.getPathWithoutSchemeAndAuthority(dataDir).toString();
URI fileSystemUrl =
URI.create(state.getProp(ConfigurationKeys.SOURCE_FILEBASED_FS_URI, ConfigurationKeys.LOCAL_FS_URI));
DatasetDescriptor datasetDescriptor = new DatasetDescriptor(platform, fileSystemUrl, dataset);
String partitionName = workUnit.getProp(ConfigurationKeys.WORK_UNIT_DATE_PARTITION_NAME);
PartitionDescriptor descriptor = new PartitionDescriptor(partitionName, datasetDescriptor);
lineageInfo.get().setSource(descriptor, workUnit);
}
/**
* Helper method to process the failed {@link WorkUnit}s from the previous run and add them to the a
* {@link MultiWorkUnitWeightedQueue}
*/
private void addFailedWorkUnits(List<WorkUnit> previousWorkUnitsForRetry,
MultiWorkUnitWeightedQueue multiWorkUnitWeightedQueue) {
for (WorkUnit wu : previousWorkUnitsForRetry) {
try {
multiWorkUnitWeightedQueue.addWorkUnit(wu,
this.fs.getFileStatus(new Path(wu.getProp(ConfigurationKeys.SOURCE_FILEBASED_FILES_TO_PULL))).getLen());
} catch (IOException e) {
Throwables.propagate(e);
}
LOG.info(
"Will process file from previous workunit: " + wu.getProp(ConfigurationKeys.SOURCE_FILEBASED_FILES_TO_PULL));
this.fileCount++;
}
}
private Extract getExtractForFile(PartitionAwareFileRetriever.FileInfo file,
String topicName,
String namespace,
Map<Long, Extract> extractMap) {
Extract extract = extractMap.get(file.getWatermarkMsSinceEpoch());
if (extract == null) {
// Create an extract object for the dayPath
extract = new Extract(this.tableType, namespace, topicName);
LOG.info("Created extract: " + extract.getExtractId() + " for path " + topicName);
extractMap.put(file.getWatermarkMsSinceEpoch(), extract);
}
return extract;
}
/**
* Helper method to add new {@link WorkUnit}s for this job. It iterates through a date partitioned directory and
* creates a {@link WorkUnit} for each file that needs to be processed. It then adds that {@link WorkUnit} to a
* {@link MultiWorkUnitWeightedQueue}
*/
private void addNewWorkUnits(MultiWorkUnitWeightedQueue multiWorkUnitWeightedQueue) {
try {
List<PartitionAwareFileRetriever.FileInfo> filesToPull =
retriever.getFilesToProcess(this.lowWaterMark, this.maxFilesPerJob - this.fileCount);
Collections.sort(filesToPull);
String topicName = this.sourceDir.getName();
String namespace = this.sourceState.getProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY);
Map<Long, Extract> extractMap = new HashMap<>();
for (PartitionAwareFileRetriever.FileInfo file : filesToPull) {
Extract extract = getExtractForFile(file, topicName, namespace, extractMap);
LOG.info("Will process file " + file.getFilePath());
WorkUnit singleWorkUnit = WorkUnit.create(extract);
singleWorkUnit.setProp(ConfigurationKeys.SOURCE_ENTITY, topicName);
singleWorkUnit.setProp(ConfigurationKeys.SOURCE_FILEBASED_FILES_TO_PULL, file.getFilePath());
singleWorkUnit.setProp(ConfigurationKeys.WORK_UNIT_LOW_WATER_MARK_KEY, file.getWatermarkMsSinceEpoch());
singleWorkUnit.setProp(ConfigurationKeys.WORK_UNIT_HIGH_WATER_MARK_KEY, file.getWatermarkMsSinceEpoch());
singleWorkUnit.setProp(ConfigurationKeys.WORK_UNIT_DATE_PARTITION_KEY, file.getWatermarkMsSinceEpoch());
singleWorkUnit.setProp(ConfigurationKeys.WORK_UNIT_DATE_PARTITION_NAME, file.getPartitionName());
if (this.sourceState.getPropAsBoolean(ConfigurationKeys.SCHEMA_IN_SOURCE_DIR,
ConfigurationKeys.DEFAULT_SCHEMA_IN_SOURCE_DIR)) {
addSchemaFile(file, singleWorkUnit);
}
multiWorkUnitWeightedQueue.addWorkUnit(singleWorkUnit, file.getFileSize());
this.fileCount++;
}
LOG.info("Total number of files extracted for the current run: " + filesToPull.size());
} catch (IOException e) {
Throwables.propagate(e);
}
}
private void addSchemaFile(PartitionAwareFileRetriever.FileInfo dataFile, WorkUnit workUnit)
throws IOException {
Path schemaFile = new Path(new Path(dataFile.getFilePath()).getParent(),
workUnit.getProp(ConfigurationKeys.SCHEMA_FILENAME, ConfigurationKeys.DEFAULT_SCHEMA_FILENAME));
if (fs.exists(schemaFile)) {
workUnit.setProp(ConfigurationKeys.SOURCE_SCHEMA, schemaFile.toString());
} else {
throw new IOException("Schema file " + schemaFile + " does not exist.");
}
}
/**
* Gets the LWM for this job runs. The new LWM is the HWM of the previous run + 1 unit (day,hour,minute..etc).
* If there was no previous execution then it is set to the given lowWaterMark + 1 unit.
*/
private long getLowWaterMark(Iterable<WorkUnitState> previousStates, String lowWaterMark) {
long lowWaterMarkValue = retriever.getWatermarkFromString(lowWaterMark);
// Find the max HWM from the previous states, this is the new current LWM
for (WorkUnitState previousState : previousStates) {
if (previousState.getWorkingState().equals(WorkUnitState.WorkingState.COMMITTED)) {
long previousHighWaterMark = previousState.getWorkunit().getHighWaterMark();
if (previousHighWaterMark > lowWaterMarkValue) {
lowWaterMarkValue = previousHighWaterMark;
}
}
}
return lowWaterMarkValue + getRetriever().getWatermarkIncrementMs();
}
protected PartitionAwareFileRetriever getRetriever() {
return retriever;
}
}
| 2,954 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/DatePartitionedJsonFileSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source;
import java.io.IOException;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.DatePartitionedJsonFileExtractor;
import org.apache.gobblin.source.extractor.Extractor;
public class DatePartitionedJsonFileSource extends PartitionedFileSourceBase<String, String> {
public DatePartitionedJsonFileSource() {
super(new DatePartitionedNestedRetriever(".json"));
}
@Override
public Extractor<String, String> getExtractor(WorkUnitState state)
throws IOException {
return new DatePartitionedJsonFileExtractor(state);
}
}
| 2,955 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/DatePartitionedDailyAvroSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.util.DatePartitionType;
/**
* Implementation of {@link org.apache.gobblin.source.Source} that reads over date-partitioned Avro data.
*
* <p>
*
* For example, if {@link ConfigurationKeys#SOURCE_FILEBASED_DATA_DIRECTORY} is set to /my/data/, then the class assumes
* folders following the pattern /my/data/daily/[year]/[month]/[day] are present. It will iterate through all the data
* under these folders starting from the date specified by {@link #DATE_PARTITIONED_SOURCE_MIN_WATERMARK_VALUE} until
* either {@link #DATE_PARTITIONED_SOURCE_MAX_FILES_PER_JOB} files have been processed, or until there is no more data
* to process. For example, if {@link #DATE_PARTITIONED_SOURCE_MIN_WATERMARK_VALUE} is set to 2015/01/01, then the job
* will read from the folder /my/data/daily/2015/01/01/, /my/data/daily/2015/01/02/, /my/data/2015/01/03/ etc.
*
* <p>
*
* The class will only process data in Avro format.
*/
public class DatePartitionedDailyAvroSource extends DatePartitionedAvroFileSource {
@Override
protected void init(SourceState state) {
state.setProp(DATE_PARTITIONED_SOURCE_PARTITION_PATTERN, DatePartitionType.DAY.getDateTimePattern());
super.init(state);
}
}
| 2,956 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/DatePartitionedNestedRetriever.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.joda.time.Duration;
import org.joda.time.DurationFieldType;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Enums;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.source.extractor.filebased.FileBasedHelperException;
import org.apache.gobblin.source.extractor.hadoop.HadoopFsHelper;
import org.apache.gobblin.util.DatePartitionType;
import static org.apache.gobblin.source.PartitionedFileSourceBase.DATE_PARTITIONED_SOURCE_PARTITION_PATTERN;
/**
* PartitionRetriever that is optimized for nested directory structures where data is dumped on a regular basis
* and most data has likely been processed by Gobblin already.
*
* For example, if {@link ConfigurationKeys#SOURCE_FILEBASED_DATA_DIRECTORY} is set to /my/data/, then the class assumes
* folders following the pattern /my/data/daily/[year]/[month]/[day] are present. It will iterate through all the data
* under these folders starting from the date specified by {@link #DATE_PARTITIONED_SOURCE_MIN_WATERMARK_VALUE} until
* either {@link #DATE_PARTITIONED_SOURCE_MAX_FILES_PER_JOB} files have been processed, or until there is no more data
* to process. For example, if {@link #DATE_PARTITIONED_SOURCE_MIN_WATERMARK_VALUE} is set to 2015/01/01, then the job
* will read from the folder /my/data/daily/2015/01/01/, /my/data/daily/2015/01/02/, /my/data/2015/01/03/ etc.
*
*/
public class DatePartitionedNestedRetriever implements PartitionAwareFileRetriever {
private static final Logger LOG = LoggerFactory.getLogger(DatePartitionedNestedRetriever.class);
private DateTimeFormatter partitionPatternFormatter;
private DurationFieldType incrementalUnit;
private String sourcePartitionPrefix;
private String sourcePartitionSuffix;
private Path sourceDir;
private HadoopFsHelper helper;
private final String expectedExtension;
private Duration leadTimeDuration;
private boolean schemaInSourceDir;
private String schemaFile;
protected FileSystem fs;
public DatePartitionedNestedRetriever(String expectedExtension) {
this.expectedExtension = expectedExtension;
}
@Override
public void init(SourceState state) {
DateTimeZone.setDefault(DateTimeZone
.forID(state.getProp(ConfigurationKeys.SOURCE_TIMEZONE, ConfigurationKeys.DEFAULT_SOURCE_TIMEZONE)));
initDatePartition(state);
this.sourcePartitionPrefix =
state.getProp(PartitionedFileSourceBase.DATE_PARTITIONED_SOURCE_PARTITION_PREFIX, StringUtils.EMPTY);
this.sourcePartitionSuffix =
state.getProp(PartitionedFileSourceBase.DATE_PARTITIONED_SOURCE_PARTITION_SUFFIX, StringUtils.EMPTY);
this.sourceDir = new Path(state.getProp(ConfigurationKeys.SOURCE_FILEBASED_DATA_DIRECTORY));
this.leadTimeDuration = PartitionAwareFileRetrieverUtils.getLeadTimeDurationFromConfig(state);
this.helper = new HadoopFsHelper(state);
this.schemaInSourceDir = state.getPropAsBoolean(ConfigurationKeys.SCHEMA_IN_SOURCE_DIR,
ConfigurationKeys.DEFAULT_SCHEMA_IN_SOURCE_DIR);
this.schemaFile = this.schemaInSourceDir ? state.getProp(ConfigurationKeys.SCHEMA_FILENAME,
ConfigurationKeys.DEFAULT_SCHEMA_FILENAME) : "";
}
@Override
public List<FileInfo> getFilesToProcess(long minWatermark, int maxFilesToReturn)
throws IOException {
DateTime currentDay = new DateTime().minus(leadTimeDuration);
DateTime lowWaterMarkDate = new DateTime(minWatermark);
List<FileInfo> filesToProcess = new ArrayList<>();
try {
helper.connect();
this.fs = helper.getFileSystem();
} catch (FileBasedHelperException e) {
throw new IOException("Error initializing FileSystem", e);
}
for (DateTime date = lowWaterMarkDate; !date.isAfter(currentDay) && filesToProcess.size() < maxFilesToReturn;
date = date.withFieldAdded(incrementalUnit, 1)) {
// Constructs the partition path - e.g. prefix/2015/01/01/suffix
String partitionPath = constructPartitionPath(date);
// Constructs the path folder - e.g. /my/data/prefix/2015/01/01/suffix
Path sourcePath = new Path(sourceDir, partitionPath);
if (this.fs.exists(sourcePath)) {
for (FileStatus fileStatus : getFilteredFileStatuses(sourcePath, getFileFilter())) {
LOG.info("Will process file " + fileStatus.getPath());
filesToProcess.add(
new FileInfo(fileStatus.getPath().toString(), fileStatus.getLen(), date.getMillis(), partitionPath));
}
}
}
return filesToProcess;
}
/**
* This method could be overwritten to support more complicated file-loading scheme,
* e.g. recursively browsing of the source path.
*/
protected FileStatus[] getFilteredFileStatuses(Path sourcePath, PathFilter pathFilter) throws IOException {
return this.fs.listStatus(sourcePath, pathFilter);
}
@Override
public long getWatermarkFromString(String lowWaterMark) {
return this.partitionPatternFormatter.parseMillis(lowWaterMark);
}
@Override
public long getWatermarkIncrementMs() {
return new DateTime(0).withFieldAdded(this.incrementalUnit, 1).getMillis();
}
private void initDatePartition(State state) {
initDatePartitionFromPattern(state);
if (this.partitionPatternFormatter == null) {
initDatePartitionFromGranularity(state);
}
}
private void initDatePartitionFromPattern(State state) {
String partitionPattern = null;
try {
partitionPattern = state.getProp(DATE_PARTITIONED_SOURCE_PARTITION_PATTERN);
if (partitionPattern != null) {
this.partitionPatternFormatter =
DateTimeFormat.forPattern(partitionPattern).withZone(DateTimeZone.getDefault());
this.incrementalUnit = DatePartitionType.getLowestIntervalUnit(partitionPattern).getDurationType();
}
} catch (Exception e) {
throw new IllegalArgumentException("Invalid source partition pattern: " + partitionPattern, e);
}
}
private void initDatePartitionFromGranularity(State state) {
String granularityProp = state.getProp(PartitionedFileSourceBase.DATE_PARTITIONED_SOURCE_PARTITION_GRANULARITY);
DatePartitionType partitionType = null;
if (granularityProp == null) {
partitionType = PartitionedFileSourceBase.DEFAULT_DATE_PARTITIONED_SOURCE_PARTITION_GRANULARITY;
} else {
Optional<DatePartitionType> partitionTypeOpt =
Enums.getIfPresent(DatePartitionType.class, granularityProp.toUpperCase());
Preconditions
.checkState(partitionTypeOpt.isPresent(), "Invalid source partition granularity: " + granularityProp);
partitionType = partitionTypeOpt.get();
}
this.partitionPatternFormatter = DateTimeFormat.forPattern(partitionType.getDateTimePattern());
this.incrementalUnit = partitionType.getDateTimeFieldType().getDurationType();
}
private String constructPartitionPath(DateTime date) {
StringBuilder pathBuilder = new StringBuilder();
if (!this.sourcePartitionPrefix.isEmpty()) {
pathBuilder.append(this.sourcePartitionPrefix);
pathBuilder.append(Path.SEPARATOR);
}
pathBuilder.append(this.partitionPatternFormatter.print(date));
if (!this.sourcePartitionSuffix.isEmpty()) {
pathBuilder.append(Path.SEPARATOR);
pathBuilder.append(this.sourcePartitionSuffix);
}
return pathBuilder.toString();
}
/**
* This method is to filter out files that don't need to be processed by extension
* @return the pathFilter
*/
private PathFilter getFileFilter() {
final String extension = (this.expectedExtension.startsWith(".")) ?
this.expectedExtension :
"." + this.expectedExtension;
return new PathFilter() {
@Override
public boolean accept(Path path) {
return path.getName().endsWith(extension) &&
!(schemaInSourceDir && path.getName().equals(schemaFile)) ;
}
};
}
}
| 2,957 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/workunit/MultiWorkUnitWeightedQueue.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.workunit;
import java.util.List;
import java.util.PriorityQueue;
import java.util.Queue;
import com.google.common.collect.ImmutableList;
import com.google.common.primitives.Longs;
/**
* Helper class that distributes {@link WorkUnit}s amongst a series of {@link MultiWorkUnit}s. When a WorkUnit is added
* to this queue, it is added along with a weight which indicates how much effort it will take to process this WorkUnit.
* For example, a larger weight means that this WorkUnit will take longer to process. For files this can simply be the
* file size.
*
* <p>
*
* The constructor {@link MultiWorkUnitWeightedQueue(int maxMultiWorkUnits)} sets a maximum size for the queue. This
* means that when more than maxMultiWorkUnits are added to the queue, WorkUnits will start to be paired together into
* MultiWorkUnits.
*
* @see MultiWorkUnit
*/
public class MultiWorkUnitWeightedQueue {
private final Queue<WeightedMultiWorkUnit> weightedWorkUnitQueue;
private int maxMultiWorkUnits = Integer.MAX_VALUE;
private int numMultiWorkUnits = 0;
/**
* The default constructor sets the limit on the queue to size to be {@link Integer#MAX_VALUE}. This means that until
* Integer.MAX_VALUE + 1 WorkUnits are added to the queue, no WorkUnits will be paired together.
*/
public MultiWorkUnitWeightedQueue() {
this.weightedWorkUnitQueue = new PriorityQueue<>();
}
public MultiWorkUnitWeightedQueue(int maxMultiWorkUnits) {
this.weightedWorkUnitQueue = new PriorityQueue<>(maxMultiWorkUnits);
this.maxMultiWorkUnits = maxMultiWorkUnits;
}
/**
* Adds a {@link WorkUnit} to this queue, along with an associated weight for that WorkUnit.
*/
public void addWorkUnit(WorkUnit workUnit, long weight) {
WeightedMultiWorkUnit weightMultiWorkUnit;
if (this.numMultiWorkUnits < this.maxMultiWorkUnits) {
weightMultiWorkUnit = new WeightedMultiWorkUnit();
this.numMultiWorkUnits++;
} else {
weightMultiWorkUnit = this.weightedWorkUnitQueue.poll();
}
weightMultiWorkUnit.addWorkUnit(weight, workUnit);
this.weightedWorkUnitQueue.offer(weightMultiWorkUnit);
}
/**
* Returns the a list of WorkUnits that have been added to this queue via the {@link #addWorkUnit(WorkUnit, long)}
* method.
*/
public List<WorkUnit> getQueueAsList() {
return ImmutableList.<WorkUnit> builder().addAll(this.weightedWorkUnitQueue).build();
}
/**
* This class defines the weighted multiWorkUnit. It extends {@link MultiWorkUnit}. Each weightedMultiworkUnit has a
* weight, which is the sum of the file sizes assigned to it. It also implements Comparable, based on the weight value.
*
* @author ydai
*/
private static class WeightedMultiWorkUnit extends MultiWorkUnit implements Comparable<WeightedMultiWorkUnit> {
private long weight = 0l;
/**
* Add a new single workUnit to the current workUnits list. Update the weight by adding the weight of the new workUnit.
*
* @param weight the weight of the newWorkUnit.
* @param newWorkUnit the new work unit.
*/
private void addWorkUnit(long weight, WorkUnit newWorkUnit) {
this.addWorkUnit(newWorkUnit);
this.weight += weight;
}
/**
* Compare with the other weightedMultiWorkUnit based on weight.
*/
@Override
public int compareTo(WeightedMultiWorkUnit weightedMultiWorkUnit) {
return Longs.compare(this.weight, weightedMultiWorkUnit.getWeight());
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (int) (this.weight ^ (this.weight >>> 32));
return result;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof WeightedMultiWorkUnit)) {
return false;
}
WeightedMultiWorkUnit weightedMultiWorkUnit = (WeightedMultiWorkUnit) obj;
return this.weight == weightedMultiWorkUnit.getWeight();
}
public long getWeight() {
return this.weight;
}
}
}
| 2,958 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/DummyExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor;
import java.io.IOException;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.instrumented.extractor.InstrumentedExtractor;
/**
* Dummy extractor that always returns 0 records.
*/
public class DummyExtractor<S, D> extends InstrumentedExtractor<S, D> {
public DummyExtractor(WorkUnitState workUnitState) {
super(workUnitState);
}
@Override
public S getSchema() throws IOException {
return null;
}
@Override
public long getExpectedRecordCount() {
return 0;
}
@Override
public long getHighWatermark() {
return 0;
}
@Override
public D readRecordImpl(D reuse) throws DataRecordException, IOException {
return null;
}
}
| 2,959 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/DatePartitionedAvroFileExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.hadoop.AvroFileExtractor;
/**
* Extension of {@link AvroFileExtractor} where the {@link #getHighWatermark()} method returns the result of the
* specified WorkUnit's {@link org.apache.gobblin.source.workunit.WorkUnit#getHighWaterMark()} method.
*/
public class DatePartitionedAvroFileExtractor extends AvroFileExtractor {
public DatePartitionedAvroFileExtractor(WorkUnitState workUnitState) {
super(workUnitState);
}
/**
* Returns the HWM of the workUnit
* {@inheritDoc}
* @see org.apache.gobblin.source.extractor.filebased.FileBasedExtractor#getHighWatermark()
*/
@Override
public long getHighWatermark() {
return this.workUnit.getHighWaterMark();
}
}
| 2,960 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/SimpleJsonExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor;
import java.io.BufferedReader;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.io.Closer;
import com.google.gson.Gson;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.hadoop.HadoopFsHelper;
/**
* An implementation of {@link Extractor}.
*
* <p>
* This extractor reads the assigned input file storing
* json documents confirming to a schema. Each line of the file is a json document.
* </p>
*/
public class SimpleJsonExtractor implements Extractor<String, String> {
private static final Logger LOGGER = LoggerFactory.getLogger(SimpleJsonExtractor.class);
private final WorkUnitState workUnitState;
private final FileSystem fs;
private final BufferedReader bufferedReader;
private final Closer closer = Closer.create();
private static final Gson GSON = new Gson();
public SimpleJsonExtractor(WorkUnitState workUnitState) throws IOException {
this.workUnitState = workUnitState;
HadoopFsHelper fsHelper = new HadoopFsHelper(workUnitState);
try {
fsHelper.connect();
} catch (Exception e) {
throw new IOException("Exception at SimpleJsonExtractor");
}
// Source is responsible to set SOURCE_FILEBASED_FILES_TO_PULL
this.fs = fsHelper.getFileSystem();
InputStreamReader isr = new InputStreamReader(this.fs.open(
new Path(workUnitState.getProp(ConfigurationKeys.SOURCE_FILEBASED_FILES_TO_PULL))), StandardCharsets.UTF_8);
this.bufferedReader =
this.closer.register(new BufferedReader(isr));
}
@Override
public String getSchema() throws IOException {
// Source is responsible to set SOURCE_SCHEMA
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
IOUtils.copyBytes(fs.open(
new Path(workUnitState.getProp(ConfigurationKeys.SOURCE_SCHEMA))), outputStream, 4096, false);
String schema = new String(outputStream.toByteArray(), StandardCharsets.UTF_8);
workUnitState.setProp((ConfigurationKeys.CONVERTER_AVRO_SCHEMA_KEY), schema);
return schema;
}
@Override
public String readRecord(@Deprecated String reuse) throws DataRecordException, IOException {
return this.bufferedReader.readLine();
}
@Override
public long getExpectedRecordCount() {
// We don't know how many records are in the file before actually reading them
return 0;
}
@Override
public long getHighWatermark() {
// Watermark is not applicable for this type of extractor
return 0;
}
@Override
public void close() throws IOException {
try {
this.closer.close();
} catch (IOException ioe) {
LOGGER.error("Failed to close the input stream", ioe);
}
try {
fs.close();
} catch (IOException ioe) {
LOGGER.error("Failed to close the file object", ioe);
}
}
}
| 2,961 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/DatePartitionedJsonFileExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor;
import java.io.IOException;
import org.apache.gobblin.configuration.WorkUnitState;
public class DatePartitionedJsonFileExtractor extends SimpleJsonExtractor {
public DatePartitionedJsonFileExtractor(WorkUnitState workUnitState)
throws IOException {
super(workUnitState);
}
}
| 2,962 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/resultset/RecordSet.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.resultset;
/**
* An interface to extract data records using an iterator
*
* @param <D> type of data record
*/
public interface RecordSet<D> extends Iterable<D> {
/**
* add record to the list
*/
public void add(D record);
/**
* check is there are any records
*/
public boolean isEmpty();
}
| 2,963 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/resultset/RecordSetList.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.resultset;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
/**
* An implementation of RecordSet to return data records through an interator
*
* @param <D> type of data record
*/
public class RecordSetList<D> implements RecordSet<D> {
private List<D> list = new ArrayList<>();
@Override
public Iterator<D> iterator() {
return this.list.iterator();
}
@Override
public void add(D record) {
this.list.add(record);
}
@Override
public boolean isEmpty() {
if (this.list.size() == 0) {
return true;
}
return false;
}
}
| 2,964 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/filebased/TextFileBasedSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.filebased;
import java.io.IOException;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.hadoop.HadoopFsHelper;
import static org.apache.gobblin.configuration.ConfigurationKeys.*;
/**
* A source that reads text based input from a directory as lines.
*/
public class TextFileBasedSource extends FileBasedSource<String, String> {
@Override
public Extractor<String, String> getExtractor(WorkUnitState state) throws IOException {
if (!state.contains(SOURCE_FILEBASED_OPTIONAL_DOWNLOADER_CLASS)) {
state.setProp(SOURCE_FILEBASED_OPTIONAL_DOWNLOADER_CLASS, TokenizedFileDownloader.class.getName());
}
return new FileBasedExtractor<>(state, new HadoopFsHelper(state));
}
@Override
public void initFileSystemHelper(State state) throws FileBasedHelperException {
this.fsHelper = new HadoopFsHelper(state);
this.fsHelper.connect();
}
@Override
protected String getLsPattern(State state) {
return state.getProp(ConfigurationKeys.SOURCE_FILEBASED_DATA_DIRECTORY);
}
}
| 2,965 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/filebased/FileBasedHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.filebased;
import java.io.Closeable;
import java.io.InputStream;
import java.util.List;
public interface FileBasedHelper extends Closeable {
public void connect()
throws FileBasedHelperException;
public List<String> ls(String path)
throws FileBasedHelperException;
public InputStream getFileStream(String path)
throws FileBasedHelperException;
}
| 2,966 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/filebased/SizeAwareFileBasedHelperDecorator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.filebased;
import org.apache.gobblin.util.Decorator;
import java.io.IOException;
import java.io.InputStream;
import java.util.List;
/**
* A decorator that delegates to inner {@link FileBasedHelper}.
* {@link #getFileSize(String)} is not implemented by this class.
*/
public class SizeAwareFileBasedHelperDecorator implements SizeAwareFileBasedHelper, Decorator {
private final FileBasedHelper fileBasedHelper;
public SizeAwareFileBasedHelperDecorator(FileBasedHelper fileBasedHelper) {
this.fileBasedHelper = fileBasedHelper;
}
@Override
public void connect() throws FileBasedHelperException {
this.fileBasedHelper.connect();
}
@Override
public void close() throws IOException {
this.fileBasedHelper.close();
}
@Override
public List<String> ls(String path) throws FileBasedHelperException {
return this.fileBasedHelper.ls(path);
}
@Override
public InputStream getFileStream(String path) throws FileBasedHelperException {
return this.fileBasedHelper.getFileStream(path);
}
@Override
public long getFileSize(String path) throws FileBasedHelperException {
throw new UnsupportedOperationException("Method not implemented");
}
@Override
public Object getDecoratedObject() {
return this.fileBasedHelper;
}
}
| 2,967 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/filebased/TimestampAwareFileBasedHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.filebased;
/**
* A File helper that extends {@link SizeAwareFileBasedHelper} and has functionality to get file modified time.
*
* This interface exists for backward compatibility of {@link SizeAwareFileBasedHelper}.
*/
public interface TimestampAwareFileBasedHelper extends SizeAwareFileBasedHelper {
public long getFileMTime(String path) throws FileBasedHelperException;
}
| 2,968 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/filebased/FileBasedHelperException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.filebased;
public class FileBasedHelperException extends Exception {
private static final long serialVersionUID = 1L;
public FileBasedHelperException(String message) {
super(message);
}
public FileBasedHelperException(String message, Exception e) {
super(message, e);
}
}
| 2,969 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/filebased/SizeAwareFileBasedHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.filebased;
/**
* A File helper that extends {@link FileBasedHelper} and has functionality to get file size.
*
* This interface exists for backward compatibility of {@link FileBasedHelper}.
*/
public interface SizeAwareFileBasedHelper extends FileBasedHelper {
public long getFileSize(String path) throws FileBasedHelperException;
}
| 2,970 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/filebased/FileBasedExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.filebased;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.util.Iterator;
import java.util.List;
import lombok.Getter;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Lists;
import com.google.common.io.Closer;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.instrumented.extractor.InstrumentedExtractor;
import org.apache.gobblin.metrics.Counters;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* Abstract class for file based extractors
*
* @author stakiar
*
* @param <S>
* type of schema
* @param <D>
* type of data record
*/
public class FileBasedExtractor<S, D> extends InstrumentedExtractor<S, D> {
private static final Logger LOG = LoggerFactory.getLogger(FileBasedExtractor.class);
protected final WorkUnit workUnit;
protected final WorkUnitState workUnitState;
protected final List<String> filesToPull;
protected final FileDownloader<D> fileDownloader;
private final int statusCount;
private long totalRecordCount = 0;
private Iterator<D> currentFileItr;
private String currentFile;
private boolean hasNext = false;
@Getter
protected final Closer closer = Closer.create();
@Getter
private final boolean shouldSkipFirstRecord;
@Getter
protected final SizeAwareFileBasedHelper fsHelper;
protected enum CounterNames {
FileBytesRead;
}
protected Counters<CounterNames> counters = new Counters<>();
@SuppressWarnings("unchecked")
public FileBasedExtractor(WorkUnitState workUnitState, FileBasedHelper fsHelper) {
super(workUnitState);
this.workUnitState = workUnitState;
this.workUnit = workUnitState.getWorkunit();
this.filesToPull =
Lists.newArrayList(workUnitState.getPropAsList(ConfigurationKeys.SOURCE_FILEBASED_FILES_TO_PULL, ""));
this.statusCount = this.workUnit.getPropAsInt(ConfigurationKeys.FILEBASED_REPORT_STATUS_ON_COUNT,
ConfigurationKeys.DEFAULT_FILEBASED_REPORT_STATUS_ON_COUNT);
this.shouldSkipFirstRecord = this.workUnitState.getPropAsBoolean(ConfigurationKeys.SOURCE_SKIP_FIRST_RECORD, false);
if (fsHelper instanceof SizeAwareFileBasedHelper) {
this.fsHelper = (SizeAwareFileBasedHelper) fsHelper;
} else {
this.fsHelper = new SizeAwareFileBasedHelperDecorator(fsHelper);
}
try {
this.fsHelper.connect();
} catch (FileBasedHelperException e) {
throw new RuntimeException(e);
}
if (workUnitState.contains(ConfigurationKeys.SOURCE_FILEBASED_OPTIONAL_DOWNLOADER_CLASS)) {
try {
this.fileDownloader = (FileDownloader<D>) ConstructorUtils.invokeConstructor(
Class.forName(workUnitState.getProp(ConfigurationKeys.SOURCE_FILEBASED_OPTIONAL_DOWNLOADER_CLASS)), this);
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException
| ClassNotFoundException e) {
throw new RuntimeException(e);
}
} else {
this.fileDownloader = new SingleFileDownloader<>(this);
}
this.counters.initialize(getMetricContext(), CounterNames.class, this.getClass());
}
/**
* Initializes a list of files to pull on the first call to the method
* Iterates through the file and returns a new record upon each call until
* there are no more records left in the file, then it moves on to the next
* file
*/
@Override
public D readRecordImpl(@Deprecated D reuse) throws DataRecordException, IOException {
this.totalRecordCount++;
if (this.statusCount > 0 && this.totalRecordCount % this.statusCount == 0) {
LOG.info("Total number of records processed so far: " + this.totalRecordCount);
}
// If records have been read, check the hasNext value, if not then get the next file to process
if (this.currentFile != null && this.currentFileItr != null) {
this.hasNext = this.currentFileItr.hasNext();
// If the current file is done, move to the next one
if (!this.hasNext) {
getNextFileToRead();
}
} else {
// If no records have been read yet, get the first file to process
getNextFileToRead();
}
if (this.hasNext) {
return this.currentFileItr.next();
}
LOG.info("Finished reading records from all files");
return null;
}
/**
* If a previous file has been read, first close that file. Then search through {@link #filesToPull} to find the first
* non-empty file.
*/
private void getNextFileToRead() throws IOException {
if (this.currentFile != null && this.currentFileItr != null) {
closeCurrentFile();
incrementBytesReadCounter();
// release the reference to allow garbage collection
this.currentFileItr = null;
}
while (!this.hasNext && !this.filesToPull.isEmpty()) {
this.currentFile = this.filesToPull.remove(0);
this.currentFileItr = downloadFile(this.currentFile);
this.hasNext = this.currentFileItr == null ? false : this.currentFileItr.hasNext();
LOG.info("Will start downloading file: " + this.currentFile);
}
}
@SuppressWarnings("unchecked")
@Override
public S getSchema() {
return (S) this.workUnit.getProp(ConfigurationKeys.SOURCE_SCHEMA);
}
/**
* Gets a list of commands that will get the expected record count from the
* source, executes the commands, and then parses the output for the count
*
* @return the expected record count
*/
@Override
public long getExpectedRecordCount() {
return -1;
}
/**
* Gets a list of commands that will get the high watermark from the source,
* executes the commands, and then parses the output for the watermark
*
* @return the high watermark
*/
@Override
public long getHighWatermark() {
LOG.info("High Watermark is -1 for file based extractors");
return -1;
}
/**
* Downloads a file from the source
*
* @param file
* is the file to download
* @return an iterator over the file
* TODO Add support for different file formats besides text e.g. avro iterator, byte iterator, json iterator.
*/
public Iterator<D> downloadFile(String file) throws IOException {
return this.fileDownloader.downloadFile(file);
}
/**
* Closes the current file being read.
*/
public void closeCurrentFile() {
try {
this.closer.close();
} catch (IOException e) {
if (this.currentFile != null) {
LOG.error("Failed to close file: " + this.currentFile, e);
}
}
}
@Override
public void close() throws IOException {
try {
this.fsHelper.close();
} catch (IOException e) {
LOG.error("Could not successfully close file system helper due to error: " + e.getMessage(), e);
}
}
private void incrementBytesReadCounter() {
try {
this.counters.inc(CounterNames.FileBytesRead, this.fsHelper.getFileSize(this.currentFile));
} catch (FileBasedHelperException e) {
LOG.info("Unable to get file size. Will skip increment to bytes counter " + e.getMessage());
LOG.debug(e.getMessage(), e);
} catch (UnsupportedOperationException e) {
LOG.info("Unable to get file size. Will skip increment to bytes counter " + e.getMessage());
LOG.debug(e.getMessage(), e);
}
}
}
| 2,971 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/filebased/TokenBasedFileInputStreamExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.filebased;
import com.google.common.base.Preconditions;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
/**
* Extends {@link FileBasedExtractor} and uses {@link TokenizedFileDownloader}.
*/
public class TokenBasedFileInputStreamExtractor extends FileBasedExtractor<String, String> {
public static final String TOKEN =
"gobblin.extractor." + TokenBasedFileInputStreamExtractor.class.getSimpleName() + ".token";
public static final String CHARSET =
"gobblin.extractor." + TokenBasedFileInputStreamExtractor.class.getSimpleName() + ".charSet";
private final String token;
private final String charSet;
public TokenBasedFileInputStreamExtractor(WorkUnitState workUnitState, FileBasedHelper fsHelper) {
super(workUnitState, fsHelper);
Preconditions.checkArgument(this.fileDownloader instanceof TokenizedFileDownloader);
this.token = workUnitState.getProp(TOKEN, TokenizedFileDownloader.DEFAULT_TOKEN);
this.charSet = workUnitState.getProp(CHARSET, ConfigurationKeys.DEFAULT_CHARSET_ENCODING.name());
((TokenizedFileDownloader) fileDownloader).setToken(token);
((TokenizedFileDownloader) fileDownloader).setCharset(charSet);
}
}
| 2,972 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/filebased/GZIPFileDownloader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.filebased;
import org.apache.gobblin.configuration.ConfigurationKeys;
import java.io.IOException;
import java.io.InputStream;
import java.util.Iterator;
import java.util.zip.GZIPInputStream;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.io.IOUtils;
/**
* A {@link FileDownloader} that downloads a single gzip file and iterates line by line.
*
* @param <D> record type in the file
*/
@Slf4j
public class GZIPFileDownloader<D> extends FileDownloader<D> {
public GZIPFileDownloader(FileBasedExtractor<?, ?> fileBasedExtractor) {
super(fileBasedExtractor);
}
@SuppressWarnings("unchecked")
public Iterator<D> downloadFile(String file) throws IOException {
log.info("Beginning to download gzip compressed file: " + file);
try {
InputStream inputStream =
this.fileBasedExtractor.getCloser().register(this.fileBasedExtractor.getFsHelper().getFileStream(file));
Iterator<D> fileItr = (Iterator<D>) IOUtils.lineIterator(new GZIPInputStream(inputStream),
ConfigurationKeys.DEFAULT_CHARSET_ENCODING);
if (this.fileBasedExtractor.isShouldSkipFirstRecord() && fileItr.hasNext()) {
fileItr.next();
}
return fileItr;
} catch (FileBasedHelperException e) {
throw new IOException("Exception while downloading file " + file + " with message " + e.getMessage(), e);
}
}
}
| 2,973 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/filebased/FileDownloader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.filebased;
import java.io.IOException;
import java.util.Iterator;
import org.apache.gobblin.configuration.ConfigurationKeys;
/**
* An abstraction for downloading a file in a {@link FileBasedExtractor}. Subclasses are expected to download the file in
* the {@link #downloadFile(String)} method and return a record iterator. A {@link FileDownloader} can be set in a
* {@link FileBasedExtractor} by setting {@link ConfigurationKeys#SOURCE_FILEBASED_OPTIONAL_DOWNLOADER_CLASS} in the
* state.
*
* @param <D> record type in the file
*/
public abstract class FileDownloader<D> {
protected final FileBasedExtractor<?, ?> fileBasedExtractor;
public FileDownloader(FileBasedExtractor<?, ?> fileBasedExtractor) {
this.fileBasedExtractor = fileBasedExtractor;
}
/**
* Downloads the file at <code>filePath</code> using the {@link FileBasedExtractor#fsHelper} and returns an
* {@link Iterator} to the records
*
* @param filePath of the file to be downloaded
* @return An iterator to the records in the file
*/
public abstract Iterator<D> downloadFile(final String filePath) throws IOException;
}
| 2,974 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/filebased/FileBasedSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.filebased;
import com.google.common.base.Optional;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import org.apache.commons.lang3.StringUtils;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.dataset.DatasetConstants;
import org.apache.gobblin.dataset.DatasetDescriptor;
import org.apache.gobblin.metrics.event.lineage.LineageInfo;
import org.apache.gobblin.source.extractor.extract.AbstractSource;
import org.apache.gobblin.source.workunit.Extract;
import org.apache.gobblin.source.workunit.Extract.TableType;
import org.apache.gobblin.source.workunit.MultiWorkUnit;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
/**
* This class is a base class for file based sources, it provides default
* functionality for keeping track of which files have already been pulled
* by the framework and for determining which files need to be pulled in this run
* @author stakiar
*/
public abstract class FileBasedSource<S, D> extends AbstractSource<S, D> {
private static final Logger log = LoggerFactory.getLogger(FileBasedSource.class);
protected TimestampAwareFileBasedHelper fsHelper;
protected String splitPattern = ":::";
protected Optional<LineageInfo> lineageInfo;
/**
* Initialize the logger.
*
* @param state Source state
*/
protected void initLogger(SourceState state) {
StringBuilder sb = new StringBuilder();
sb.append("[");
sb.append(Strings.nullToEmpty(state.getProp(ConfigurationKeys.SOURCE_ENTITY)));
sb.append("]");
MDC.put("sourceInfo", sb.toString());
}
/**
* This method takes the snapshot seen in the previous run, and compares it to the list
* of files currently in the source - it then decided which files it needs to pull
* and distributes those files across the workunits; it does this comparison by comparing
* the names of the files currently in the source vs. the names retrieved from the
* previous state
* @param state is the source state
* @return a list of workunits for the framework to run
*/
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
initLogger(state);
lineageInfo = LineageInfo.getLineageInfo(state.getBroker());
try {
initFileSystemHelper(state);
} catch (FileBasedHelperException e) {
Throwables.propagate(e);
}
log.info("Getting work units");
String nameSpaceName = state.getProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY);
String entityName = state.getProp(ConfigurationKeys.SOURCE_ENTITY);
// Override extract table name
String extractTableName = state.getProp(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY);
// If extract table name is not found then consider entity name as extract table name
if (Strings.isNullOrEmpty(extractTableName)) {
extractTableName = entityName;
}
TableType tableType = TableType.valueOf(state.getProp(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY).toUpperCase());
List<WorkUnitState> previousWorkunits = Lists.newArrayList(state.getPreviousWorkUnitStates());
Set<String> prevFsSnapshot = Sets.newHashSet();
// Get list of files seen in the previous run
if (!previousWorkunits.isEmpty()) {
if (previousWorkunits.get(0).getWorkunit().contains(ConfigurationKeys.SOURCE_FILEBASED_FS_SNAPSHOT)) {
prevFsSnapshot.addAll(previousWorkunits.get(0).getWorkunit().getPropAsSet(ConfigurationKeys.SOURCE_FILEBASED_FS_SNAPSHOT));
} else if (state.getPropAsBoolean(ConfigurationKeys.SOURCE_FILEBASED_FS_PRIOR_SNAPSHOT_REQUIRED,
ConfigurationKeys.DEFAULT_SOURCE_FILEBASED_FS_PRIOR_SNAPSHOT_REQUIRED)) {
// If a previous job exists, there should be a snapshot property. If not, we need to fail so that we
// don't accidentally read files that have already been processed.
throw new RuntimeException(String.format("No '%s' found on state of prior job",
ConfigurationKeys.SOURCE_FILEBASED_FS_SNAPSHOT));
}
}
List<WorkUnit> workUnits = Lists.newArrayList();
List<WorkUnit> previousWorkUnitsForRetry = this.getPreviousWorkUnitsForRetry(state);
log.info("Total number of work units from the previous failed runs: " + previousWorkUnitsForRetry.size());
for (WorkUnit previousWorkUnitForRetry : previousWorkUnitsForRetry) {
prevFsSnapshot.addAll(previousWorkUnitForRetry.getPropAsSet(ConfigurationKeys.SOURCE_FILEBASED_FILES_TO_PULL));
workUnits.add(previousWorkUnitForRetry);
}
// Get list of files that need to be pulled
List<String> currentFsSnapshot = this.getcurrentFsSnapshot(state);
// The snapshot we want to save. This might not be the full snapshot if we don't pull all files.
List<String> effectiveSnapshot = Lists.newArrayList();
List<String> filesToPull = Lists.newArrayList();
int maxFilesToPull = state.getPropAsInt(ConfigurationKeys.SOURCE_FILEBASED_MAX_FILES_PER_RUN, Integer.MAX_VALUE);
int filesSelectedForPull = 0;
if (currentFsSnapshot.size() > maxFilesToPull) {
// if we're going to not pull all files, sort them lexicographically so there is some order in which they are ingested
// note currentFsSnapshot.size > maxFilesToPull does not imply we will ignore some of them, as we still have to diff
// against the previous snapshot. Just a quick check if it even makes sense to sort the files.
Collections.sort(currentFsSnapshot);
}
for (String file: currentFsSnapshot) {
if (prevFsSnapshot.contains(file)) {
effectiveSnapshot.add(file);
} else if ((filesSelectedForPull++) < maxFilesToPull) {
filesToPull.add(file.split(this.splitPattern)[0]);
effectiveSnapshot.add(file);
} else {
// file is not pulled this run
}
}
// Update the snapshot from the previous run with the new files processed in this run
// Otherwise a corrupt file could cause re-processing of already processed files
for (WorkUnit workUnit : previousWorkUnitsForRetry) {
workUnit.setProp(ConfigurationKeys.SOURCE_FILEBASED_FS_SNAPSHOT, StringUtils.join(effectiveSnapshot, ","));
}
if (!filesToPull.isEmpty()) {
logFilesToPull(filesToPull);
int numPartitions = state.contains(ConfigurationKeys.SOURCE_MAX_NUMBER_OF_PARTITIONS)
&& state.getPropAsInt(ConfigurationKeys.SOURCE_MAX_NUMBER_OF_PARTITIONS) <= filesToPull.size()
? state.getPropAsInt(ConfigurationKeys.SOURCE_MAX_NUMBER_OF_PARTITIONS) : filesToPull.size();
if (numPartitions <= 0) {
throw new IllegalArgumentException("The number of partitions should be positive");
}
int filesPerPartition = filesToPull.size() % numPartitions == 0 ? filesToPull.size() / numPartitions
: filesToPull.size() / numPartitions + 1;
// Distribute the files across the workunits
for (int fileOffset = 0; fileOffset < filesToPull.size(); fileOffset += filesPerPartition) {
/* Use extract table name to create extract
*
* We don't want to pass in the whole SourceState object just to avoid any side effect, because
* the constructor with state argument has been deprecated for a long time. Here we selectively
* chose the configuration needed for Extract constructor, to manually form a source state.
*/
SourceState extractState = new SourceState();
extractState.setProp(ConfigurationKeys.EXTRACT_ID_TIME_ZONE,
state.getProp(ConfigurationKeys.EXTRACT_ID_TIME_ZONE, ConfigurationKeys.DEFAULT_EXTRACT_ID_TIME_ZONE));
extractState.setProp(ConfigurationKeys.EXTRACT_IS_FULL_KEY,
state.getProp(ConfigurationKeys.EXTRACT_IS_FULL_KEY, ConfigurationKeys.DEFAULT_EXTRACT_IS_FULL));
Extract extract = new Extract(extractState, tableType, nameSpaceName, extractTableName);
WorkUnit workUnit = WorkUnit.create(extract);
// Eventually these setters should be integrated with framework support for generalized watermark handling
workUnit.setProp(ConfigurationKeys.SOURCE_FILEBASED_FS_SNAPSHOT,
StringUtils.join(effectiveSnapshot, ","));
List<String> partitionFilesToPull = filesToPull.subList(fileOffset,
fileOffset + filesPerPartition > filesToPull.size() ? filesToPull.size() : fileOffset + filesPerPartition);
workUnit.setProp(ConfigurationKeys.SOURCE_FILEBASED_FILES_TO_PULL,
StringUtils.join(partitionFilesToPull, ","));
if (state.getPropAsBoolean(ConfigurationKeys.SOURCE_FILEBASED_PRESERVE_FILE_NAME, false)) {
if (partitionFilesToPull.size() != 1) {
throw new RuntimeException("Cannot preserve the file name if a workunit is given multiple files");
}
workUnit.setProp(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR,
workUnit.getProp(ConfigurationKeys.SOURCE_FILEBASED_FILES_TO_PULL));
}
workUnits.add(workUnit);
}
log.info("Total number of work units for the current run: " + (workUnits.size() - previousWorkUnitsForRetry.size()));
}
addLineageSourceInfo(workUnits, state);
return workUnits;
}
/**
* Add lineage source info to a list of work units, it can have instances of
* {@link org.apache.gobblin.source.workunit.MultiWorkUnit}
*/
protected void addLineageSourceInfo(List<WorkUnit> workUnits, State state) {
workUnits.forEach(workUnit -> {
if (workUnit instanceof MultiWorkUnit) {
((MultiWorkUnit) workUnit).getWorkUnits().forEach((wu -> addLineageSourceInfo(wu, state)));
} else {
addLineageSourceInfo(workUnit, state);
}
});
}
/**
* Add lineage source info to a single work unit
*
* @param workUnit a single work unit, not an instance of {@link org.apache.gobblin.source.workunit.MultiWorkUnit}
* @param state configurations
*/
protected void addLineageSourceInfo(WorkUnit workUnit, State state) {
if (!lineageInfo.isPresent()) {
log.info("Lineage is not enabled");
return;
}
String platform = state.getProp(ConfigurationKeys.SOURCE_FILEBASED_PLATFORM, DatasetConstants.PLATFORM_HDFS);
Path dataDir = new Path(state.getProp(ConfigurationKeys.SOURCE_FILEBASED_DATA_DIRECTORY));
String dataset = Path.getPathWithoutSchemeAndAuthority(dataDir).toString();
URI fileSystemUrl =
URI.create(state.getProp(ConfigurationKeys.SOURCE_FILEBASED_FS_URI, ConfigurationKeys.LOCAL_FS_URI));
DatasetDescriptor source = new DatasetDescriptor(platform, fileSystemUrl, dataset);
lineageInfo.get().setSource(source, workUnit);
}
/**
* This method is responsible for connecting to the source and taking
* a snapshot of the folder where the data is present, it then returns
* a list of the files in String format
* @param state is used to connect to the source
* @return a list of file name or paths present on the external data
* directory
*/
public List<String> getcurrentFsSnapshot(State state) {
List<String> results;
String path = getLsPattern(state);
try {
log.info("Running ls command with input " + path);
results = this.fsHelper.ls(path);
for (int i = 0; i < results.size(); i++) {
URI uri = new URI(results.get(i));
String filePath = uri.toString();
if (!uri.isAbsolute()) {
File file = new File(state.getProp(ConfigurationKeys.SOURCE_FILEBASED_DATA_DIRECTORY), uri.toString());
filePath = file.getAbsolutePath();
}
results.set(i, filePath + this.splitPattern + this.fsHelper.getFileMTime(filePath));
}
} catch (FileBasedHelperException | URISyntaxException e) {
String errMsg = String.format(
"Not able to fetch the filename/file modified time to %s. Will not pull any files", e.getMessage());
log.error(errMsg, e);
throw new RuntimeException(errMsg, e);
}
return results;
}
protected String getLsPattern(State state) {
return state.getProp(ConfigurationKeys.SOURCE_FILEBASED_DATA_DIRECTORY) + "/*"
+ state.getProp(ConfigurationKeys.SOURCE_ENTITY) + "*";
}
@Override
public void shutdown(SourceState state) {
if (this.fsHelper != null) {
log.info("Shutting down the FileSystemHelper connection");
try {
this.fsHelper.close();
} catch (IOException e) {
log.error("Unable to shutdown FileSystemHelper", e);
}
}
}
public abstract void initFileSystemHelper(State state) throws FileBasedHelperException;
private void logFilesToPull(List<String> filesToPull) {
int filesToLog = Math.min(2000, filesToPull.size());
String remainingString = "";
if (filesToLog < filesToPull.size()) {
remainingString = "and " + (filesToPull.size() - filesToLog) + " more ";
}
log.info(String.format("Will pull the following files %s in this run: %s", remainingString,
Arrays.toString(filesToPull.subList(0, filesToLog).toArray())));
}
}
| 2,975 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/filebased/SingleFileDownloader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.filebased;
import java.io.IOException;
import java.io.InputStream;
import java.util.Iterator;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.io.IOUtils;
import org.apache.gobblin.configuration.ConfigurationKeys;
/**
* A {@link FileDownloader} that downloads a single file and iterates line by line.
*
* @param <D> record type in the file
*/
@Slf4j
public class SingleFileDownloader<D> extends FileDownloader<D> {
public SingleFileDownloader(FileBasedExtractor<?, ?> fileBasedExtractor) {
super(fileBasedExtractor);
}
@SuppressWarnings("unchecked")
public Iterator<D> downloadFile(String file) throws IOException {
log.info("Beginning to download file: " + file);
try {
InputStream inputStream =
this.fileBasedExtractor.getCloser().register(this.fileBasedExtractor.getFsHelper().getFileStream(file));
Iterator<D> fileItr = (Iterator<D>) IOUtils.lineIterator(inputStream, ConfigurationKeys.DEFAULT_CHARSET_ENCODING);
if (this.fileBasedExtractor.isShouldSkipFirstRecord() && fileItr.hasNext()) {
fileItr.next();
}
return fileItr;
} catch (FileBasedHelperException e) {
throw new IOException("Exception while downloading file " + file + " with message " + e.getMessage(), e);
}
}
}
| 2,976 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/filebased/CsvFileDownloader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.filebased;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Iterator;
import com.google.common.base.Preconditions;
import com.google.common.collect.Iterators;
import com.google.common.collect.PeekingIterator;
import com.opencsv.CSVReader;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
/**
* A {@link FileDownloader} that downloads a single file and iterates line by line.
*
* @param <D> record type in the file
*/
@Slf4j
public class CsvFileDownloader extends FileDownloader<String[]> {
public static final String CSV_DOWNLOADER_PREFIX = "source.csv_file.";
public static final String SKIP_TOP_ROWS_REGEX = CSV_DOWNLOADER_PREFIX + "skip_top_rows_regex";
public static final String DELIMITER = CSV_DOWNLOADER_PREFIX + "delimiter";
public CsvFileDownloader(FileBasedExtractor<?, ?> fileBasedExtractor) {
super(fileBasedExtractor);
}
/**
* Provide iterator via OpenCSV's CSVReader.
* Provides a way to skip top rows by providing regex.(This is useful when CSV file comes with comments on top rows, but not in fixed size.
* It also provides validation on schema by matching header names between property's schema and header name in CSV file.
*
* {@inheritDoc}
* @see org.apache.gobblin.source.extractor.filebased.FileDownloader#downloadFile(java.lang.String)
*/
@SuppressWarnings("unchecked")
@Override
public Iterator<String[]> downloadFile(String file) throws IOException {
log.info("Beginning to download file: " + file);
final State state = fileBasedExtractor.workUnitState;
CSVReader reader;
try {
if (state.contains(DELIMITER)) {
String delimiterStr = state.getProp(DELIMITER).trim();
Preconditions.checkArgument(delimiterStr.length() == 1, "Delimiter should be a character.");
char delimiter = delimiterStr.charAt(0);
log.info("Using " + delimiter + " as a delimiter.");
reader = this.fileBasedExtractor.getCloser().register(
new CSVReader(new InputStreamReader(
this.fileBasedExtractor.getFsHelper().getFileStream(file),
ConfigurationKeys.DEFAULT_CHARSET_ENCODING), delimiter));
} else {
reader = this.fileBasedExtractor.getCloser().register(
new CSVReader(new InputStreamReader(
this.fileBasedExtractor.getFsHelper().getFileStream(file),
ConfigurationKeys.DEFAULT_CHARSET_ENCODING)));
}
} catch (FileBasedHelperException e) {
throw new IOException(e);
}
PeekingIterator<String[]> iterator = Iterators.peekingIterator(reader.iterator());
if (state.contains(SKIP_TOP_ROWS_REGEX)) {
String regex = state.getProp(SKIP_TOP_ROWS_REGEX);
log.info("Trying to skip with regex: " + regex);
while (iterator.hasNext()) {
String[] row = iterator.peek();
if (row.length == 0) {
break;
}
if (!row[0].matches(regex)) {
break;
}
iterator.next();
}
}
if (this.fileBasedExtractor.isShouldSkipFirstRecord() && iterator.hasNext()) {
log.info("Skipping first record");
iterator.next();
}
return iterator;
}
}
| 2,977 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/filebased/FileByteIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.filebased;
import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Iterator;
import java.util.NoSuchElementException;
import com.google.common.base.Throwables;
public class FileByteIterator implements Iterator<Byte> {
private BufferedInputStream bufferedInputStream;
public FileByteIterator(InputStream inputStream) {
this.bufferedInputStream = new BufferedInputStream(inputStream);
}
@Override
public boolean hasNext() {
try {
return this.bufferedInputStream.available() > 0;
} catch (IOException e) {
throw Throwables.propagate(e);
}
}
@Override
public Byte next() {
try {
if (this.hasNext()) {
return (byte) this.bufferedInputStream.read();
}
throw new NoSuchElementException("No more data left in the file");
} catch (IOException e) {
throw Throwables.propagate(e);
}
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
| 2,978 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/filebased/TokenizedFileDownloader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.filebased;
import java.io.IOException;
import java.io.InputStream;
import java.util.Iterator;
import java.util.Scanner;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.gobblin.configuration.ConfigurationKeys;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
/**
* Extends {@link FileBasedExtractor<String>} which generates {@link Iterator<String>} using {@link #token} when {@link #downloadFile(String)}.
*/
@Slf4j
public class TokenizedFileDownloader extends FileDownloader<String> {
public static final String DEFAULT_TOKEN = "\n";
@Setter
private String token;
@Setter
private String charset;
public TokenizedFileDownloader(FileBasedExtractor<?, ?> fileBasedExtractor) {
this(fileBasedExtractor, DEFAULT_TOKEN, ConfigurationKeys.DEFAULT_CHARSET_ENCODING.name());
}
public TokenizedFileDownloader(FileBasedExtractor<?, ?> fileBasedExtractor, String token, String charset) {
super(fileBasedExtractor);
this.token = token;
this.charset = charset;
}
@Override
public Iterator<String> downloadFile(String filePath)
throws IOException {
Preconditions.checkArgument(this.token != null);
try {
log.info("downloading file: " + filePath);
InputStream inputStream =
this.fileBasedExtractor.getCloser().register(this.fileBasedExtractor.getFsHelper().getFileStream(filePath));
return new RecordIterator(inputStream, this.token, this.charset);
} catch (FileBasedHelperException e) {
throw new IOException("Exception when trying to download file " + filePath, e);
}
}
@VisibleForTesting
protected static class RecordIterator implements Iterator<String> {
Scanner scanner;
public RecordIterator(InputStream inputStream, String delimiter, String charSet) {
this.scanner = new Scanner(inputStream, charSet).useDelimiter(delimiter);
}
@Override
public boolean hasNext() {
boolean hasNextRecord = this.scanner.hasNext();
if (!hasNextRecord) {
this.scanner.close();
}
return hasNextRecord;
}
@Override
public String next() {
return this.hasNext() ? this.scanner.next() : null;
}
@Override
public void remove() {
throw new UnsupportedOperationException("Remove is not supported.");
}
}
}
| 2,979 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/utils/ProxyFsInput.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.utils;
import java.io.Closeable;
import java.io.IOException;
import org.apache.avro.file.SeekableInput;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
/**
* This class provides similar function as {@link org.apache.avro.mapred.FsInput}.
* The difference is that it allows extractor to use customized {@link org.apache.hadoop.fs.FileSystem},
* especially, when file system proxy is enabled.
*
*/
public class ProxyFsInput implements Closeable, SeekableInput {
private final FSDataInputStream stream;
private final long len;
public ProxyFsInput(Path path, FileSystem fs) throws IOException {
this.len = fs.getFileStatus(path).getLen();
this.stream = fs.open(path);
}
@Override
public long length() {
return this.len;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
return this.stream.read(b, off, len);
}
@Override
public void seek(long p) throws IOException {
this.stream.seek(p);
}
@Override
public long tell() throws IOException {
return this.stream.getPos();
}
@Override
public void close() throws IOException {
this.stream.close();
}
}
| 2,980 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/utils/Utils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.utils;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang3.StringUtils;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.map.ObjectMapper;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Strings;
import com.google.gson.Gson;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.source.extractor.watermark.WatermarkType;
public class Utils {
private static final Logger LOG = LoggerFactory.getLogger(Utils.class);
private static final Gson GSON = new Gson();
private static final String CURRENT_DAY = "CURRENTDAY";
private static final String CURRENT_HOUR = "CURRENTHOUR";
private static final String CURRENT_DATE_FORMAT = "yyyyMMddHHmmss";
/**
* Get coalesce of columns if there are multiple comma-separated columns
*/
public static String getCoalesceColumnNames(String columnOrColumnList) {
if (Strings.isNullOrEmpty(columnOrColumnList)) {
return null;
}
if (columnOrColumnList.contains(",")) {
return "COALESCE(" + columnOrColumnList + ")";
}
return columnOrColumnList;
}
public static JsonArray removeElementFromJsonArray(JsonArray inputJsonArray, String key) {
JsonArray outputJsonArray = new JsonArray();
for (int i = 0; i < inputJsonArray.size(); i += 1) {
JsonObject jsonObject = inputJsonArray.get(i).getAsJsonObject();
outputJsonArray.add(removeElementFromJsonObject(jsonObject, key));
}
return outputJsonArray;
}
public static JsonObject removeElementFromJsonObject(JsonObject jsonObject, String key) {
if (jsonObject != null) {
jsonObject.remove(key);
return jsonObject;
}
return null;
}
public static String toDateTimeFormat(String input, String inputfmt, String outputfmt) {
Date date = null;
SimpleDateFormat infmt = new SimpleDateFormat(inputfmt);
try {
date = infmt.parse(input);
} catch (ParseException e) {
e.printStackTrace();
}
SimpleDateFormat outFormat = new SimpleDateFormat(outputfmt);
return outFormat.format(date);
}
public static Date toDate(String input, String inputfmt, String outputfmt) {
final SimpleDateFormat inputFormat = new SimpleDateFormat(inputfmt);
final SimpleDateFormat outputFormat = new SimpleDateFormat(outputfmt);
Date outDate = null;
try {
Date date = inputFormat.parse(input);
String dateStr = outputFormat.format(date);
outDate = outputFormat.parse(dateStr);
} catch (ParseException e) {
LOG.error("Parse to date failed", e);
}
return outDate;
}
public static Date toDate(String input, String inputfmt) {
final SimpleDateFormat inputFormat = new SimpleDateFormat(inputfmt);
Date outDate = null;
try {
outDate = inputFormat.parse(input);
} catch (ParseException e) {
LOG.error("Parse to date failed", e);
}
return outDate;
}
public static String epochToDate(long epoch, String format) {
SimpleDateFormat sdf = new SimpleDateFormat(format);
Date date = new Date(epoch);
return sdf.format(date);
}
public static long getAsLong(String value) {
if (Strings.isNullOrEmpty(value)) {
return 0;
}
return Long.parseLong(value);
}
public static int getAsInt(String value) {
if (Strings.isNullOrEmpty(value)) {
return 0;
}
return Integer.parseInt(value);
}
public static Date toDate(long value, String format) {
SimpleDateFormat fmt = new SimpleDateFormat(format);
Date date = null;
try {
date = fmt.parse(Long.toString(value));
} catch (ParseException e) {
e.printStackTrace();
}
return date;
}
public static Date toDate(Date date, String format) {
SimpleDateFormat fmt = new SimpleDateFormat(format);
String dateStr = fmt.format(date);
Date outDate = null;
try {
outDate = fmt.parse(dateStr);
} catch (ParseException e) {
e.printStackTrace();
}
return outDate;
}
public static String dateToString(Date datetime, String format) {
SimpleDateFormat fmt = new SimpleDateFormat(format);
return fmt.format(datetime);
}
public static Date addDaysToDate(Date datetime, int days) {
Calendar calendar = Calendar.getInstance();
calendar.setTime(datetime);
calendar.add(Calendar.DATE, days);
return calendar.getTime();
}
public static Date addHoursToDate(Date datetime, int hours) {
Calendar calendar = Calendar.getInstance();
calendar.setTime(datetime);
calendar.add(Calendar.HOUR, hours);
return calendar.getTime();
}
public static Date addSecondsToDate(Date datetime, int seconds) {
Calendar calendar = Calendar.getInstance();
calendar.setTime(datetime);
calendar.add(Calendar.SECOND, seconds);
return calendar.getTime();
}
public static boolean isSimpleWatermark(WatermarkType watermarkType) {
if (watermarkType == WatermarkType.SIMPLE) {
return true;
}
return false;
}
/**
* Print time difference in minutes, seconds and milliseconds
*/
public static String printTiming(long start, long end) {
long totalMillis = end - start;
long mins = TimeUnit.MILLISECONDS.toMinutes(totalMillis);
long secs = TimeUnit.MILLISECONDS.toSeconds(totalMillis) - TimeUnit.MINUTES.toSeconds(mins);
long millis =
TimeUnit.MILLISECONDS.toMillis(totalMillis) - TimeUnit.MINUTES.toMillis(mins) - TimeUnit.SECONDS.toMillis(secs);
return String.format("%d min, %d sec, %d millis", mins, secs, millis);
}
/**
* get column list from the user provided query to build schema with the respective columns
* @param input query
* @return list of columns
*/
public static List<String> getColumnListFromQuery(String query) {
if (Strings.isNullOrEmpty(query)) {
return null;
}
String queryLowerCase = query.toLowerCase();
int startIndex = queryLowerCase.indexOf("select ") + 7;
int endIndex = queryLowerCase.indexOf(" from ");
if (startIndex < 0 || endIndex < 0) {
return null;
}
String[] inputQueryColumns = query.substring(startIndex, endIndex).toLowerCase().replaceAll(" ", "").split(",");
return Arrays.asList(inputQueryColumns);
}
/**
* Convert CSV record(List<Strings>) to JsonObject using header(column Names)
* @param header record
* @param data record
* @param column Count
* @return JsonObject
*/
public static JsonObject csvToJsonObject(List<String> bulkRecordHeader, List<String> record, int columnCount) {
ObjectMapper mapper = new ObjectMapper();
Map<String, String> resultInfo = new HashMap<>();
for (int i = 0; i < columnCount; i++) {
resultInfo.put(bulkRecordHeader.get(i), record.get(i));
}
JsonNode json = mapper.valueToTree(resultInfo);
JsonElement element = GSON.fromJson(json.toString(), JsonObject.class);
return element.getAsJsonObject();
}
public static int getAsInt(String value, int defaultValue) {
return (Strings.isNullOrEmpty(value) ? defaultValue : Integer.parseInt(value));
}
public static boolean getPropAsBoolean(Properties properties, String key, String defaultValue) {
return Boolean.valueOf(properties.getProperty(key, defaultValue));
}
// escape characters in column name or table name
public static String escapeSpecialCharacters(String columnName, String escapeChars, String character) {
if (Strings.isNullOrEmpty(columnName)) {
return null;
}
if (StringUtils.isEmpty(escapeChars)) {
return columnName;
}
List<String> specialChars = Arrays.asList(escapeChars.split(","));
for (String specialChar : specialChars) {
columnName = columnName.replace(specialChar, character);
}
return columnName;
}
/**
* Helper method for getting a value containing CURRENTDAY-1 or CURRENTHOUR-1 in the form yyyyMMddHHmmss
* @param value
* @param timezone
* @return
*/
public static long getLongWithCurrentDate(String value, String timezone) {
if (Strings.isNullOrEmpty(value)) {
return 0;
}
DateTime time = getCurrentTime(timezone);
DateTimeFormatter dtFormatter = DateTimeFormat.forPattern(CURRENT_DATE_FORMAT).withZone(time.getZone());
if (value.toUpperCase().startsWith(CURRENT_DAY)) {
return Long
.parseLong(dtFormatter.print(time.minusDays(Integer.parseInt(value.substring(CURRENT_DAY.length() + 1)))));
}
if (value.toUpperCase().startsWith(CURRENT_HOUR)) {
return Long
.parseLong(dtFormatter.print(time.minusHours(Integer.parseInt(value.substring(CURRENT_HOUR.length() + 1)))));
}
return Long.parseLong(value);
}
/**
* Convert joda time to a string in the given format
* @param input timestamp
* @param format expected format
* @param timezone time zone of timestamp
* @return string format of timestamp
*/
public static String dateTimeToString(DateTime input, String format, String timezone) {
String tz = StringUtils.defaultString(timezone, ConfigurationKeys.DEFAULT_SOURCE_TIMEZONE);
DateTimeZone dateTimeZone = getTimeZone(tz);
DateTimeFormatter outputDtFormat = DateTimeFormat.forPattern(format).withZone(dateTimeZone);
return outputDtFormat.print(input);
}
/**
* Get current time - joda
* @param timezone time zone of current time
* @return current datetime in the given timezone
*/
public static DateTime getCurrentTime(String timezone) {
String tz = StringUtils.defaultString(timezone, ConfigurationKeys.DEFAULT_SOURCE_TIMEZONE);
DateTimeZone dateTimeZone = getTimeZone(tz);
DateTime currentTime = new DateTime(dateTimeZone);
return currentTime;
}
/**
* Convert timestamp in a string format to joda time
* @param input timestamp
* @param format timestamp format
* @param timezone time zone of timestamp
* @return joda time
*/
public static DateTime toDateTime(String input, String format, String timezone) {
String tz = StringUtils.defaultString(timezone, ConfigurationKeys.DEFAULT_SOURCE_TIMEZONE);
DateTimeZone dateTimeZone = getTimeZone(tz);
DateTimeFormatter inputDtFormat = DateTimeFormat.forPattern(format).withZone(dateTimeZone);
DateTime outputDateTime = inputDtFormat.parseDateTime(input).withZone(dateTimeZone);
return outputDateTime;
}
/**
* Convert timestamp in a long format to joda time
* @param input timestamp
* @param format timestamp format
* @param timezone time zone of timestamp
* @return joda time
*/
public static DateTime toDateTime(long input, String format, String timezone) {
return toDateTime(Long.toString(input), format, timezone);
}
/**
* Get time zone of time zone id
* @param id timezone id
* @return timezone
*/
private static DateTimeZone getTimeZone(String id) {
DateTimeZone zone;
try {
zone = DateTimeZone.forID(id);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("TimeZone " + id + " not recognized");
}
return zone;
}
}
| 2,981 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/utils/InputStreamCSVReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.utils;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.io.StreamTokenizer;
import java.util.ArrayList;
import org.apache.gobblin.configuration.ConfigurationKeys;
/**
* Reads data from inputStream or bufferedReader and gives records as a list
*
* @author nveeramr
*/
public class InputStreamCSVReader {
private final StreamTokenizer parser;
private final char separator;
private int maxFieldCount;
private boolean atEOF;
private BufferedReader bufferedReader;
public InputStreamCSVReader(Reader input) {
this(new BufferedReader(input));
}
public InputStreamCSVReader(InputStream input) {
this(new InputStreamReader(input, ConfigurationKeys.DEFAULT_CHARSET_ENCODING));
}
public InputStreamCSVReader(BufferedReader input) {
this(input, ',', '\"');
}
public InputStreamCSVReader(String input) {
this(new InputStreamReader(new ByteArrayInputStream(input.getBytes(ConfigurationKeys.DEFAULT_CHARSET_ENCODING)),
ConfigurationKeys.DEFAULT_CHARSET_ENCODING), ',', '\"');
}
public InputStreamCSVReader(Reader input, char customizedSeparator) {
this(new BufferedReader(input), customizedSeparator, '\"');
}
public InputStreamCSVReader(InputStream input, char customizedSeparator) {
this(new InputStreamReader(input, ConfigurationKeys.DEFAULT_CHARSET_ENCODING), customizedSeparator, '\"');
}
public InputStreamCSVReader(BufferedReader input, char customizedSeparator) {
this(input, customizedSeparator, '\"');
}
public InputStreamCSVReader(String input, char customizedSeparator) {
this(new InputStreamReader(new ByteArrayInputStream(input.getBytes(ConfigurationKeys.DEFAULT_CHARSET_ENCODING)),
ConfigurationKeys.DEFAULT_CHARSET_ENCODING), customizedSeparator, '\"');
}
public InputStreamCSVReader(Reader input, char customizedSeparator, char enclosedChar) {
this(new BufferedReader(input), customizedSeparator, enclosedChar);
}
public InputStreamCSVReader(InputStream input, char customizedSeparator, char enclosedChar) {
this(new InputStreamReader(input, ConfigurationKeys.DEFAULT_CHARSET_ENCODING), customizedSeparator, enclosedChar);
}
public InputStreamCSVReader(String input, char customizedSeparator, char enclosedChar) {
this(new InputStreamReader(new ByteArrayInputStream(input.getBytes(ConfigurationKeys.DEFAULT_CHARSET_ENCODING)),
ConfigurationKeys.DEFAULT_CHARSET_ENCODING), customizedSeparator, enclosedChar);
}
public InputStreamCSVReader(BufferedReader input, char separator, char enclosedChar) {
this.bufferedReader = input;
this.separator = separator;
// parser settings for the separator and escape chars
this.parser = new StreamTokenizer(input);
this.parser.ordinaryChars(0, 255);
this.parser.wordChars(0, 255);
this.parser.ordinaryChar(enclosedChar);
this.parser.ordinaryChar(separator);
this.parser.eolIsSignificant(true);
this.parser.whitespaceChars('\n', '\n');
this.parser.whitespaceChars('\r', '\r');
this.atEOF = false;
}
public ArrayList<String> splitRecord() throws IOException {
ArrayList<String> record = this.getNextRecordFromStream();
return record;
}
public ArrayList<String> nextRecord() throws IOException {
ArrayList<String> record = this.getNextRecordFromStream();
// skip record if it is empty
while (record != null) {
boolean emptyLine = false;
if (record.size() == 0) {
emptyLine = true;
} else if (record.size() == 1) {
String val = record.get(0);
if (val == null || val.length() == 0) {
emptyLine = true;
}
}
if (emptyLine) {
record = getNextRecordFromStream();
} else {
break;
}
}
return record;
}
private ArrayList<String> getNextRecordFromStream() throws IOException {
if (this.atEOF) {
return null;
}
ArrayList<String> record = new ArrayList<>(this.maxFieldCount);
StringBuilder fieldValue = null;
while (true) {
int token = this.parser.nextToken();
if (token == StreamTokenizer.TT_EOF) {
addField(record, fieldValue);
this.atEOF = true;
break;
}
if (token == StreamTokenizer.TT_EOL) {
addField(record, fieldValue);
break;
}
if (token == this.separator) {
addField(record, fieldValue);
fieldValue = null;
continue;
}
if (token == StreamTokenizer.TT_WORD) {
if (fieldValue != null) {
throw new CSVParseException("Unknown error", this.parser.lineno());
}
fieldValue = new StringBuilder(this.parser.sval);
continue;
}
if (token == '"') {
if (fieldValue != null) {
throw new CSVParseException("Found unescaped quote. A value with quote should be within a quote",
this.parser.lineno());
}
while (true) {
token = this.parser.nextToken();
if (token == StreamTokenizer.TT_EOF) {
this.atEOF = true;
throw new CSVParseException("EOF reached before closing an opened quote", this.parser.lineno());
}
if (token == this.separator) {
fieldValue = appendFieldValue(fieldValue, token);
continue;
}
if (token == StreamTokenizer.TT_EOL) {
fieldValue = appendFieldValue(fieldValue, "\n");
continue;
}
if (token == StreamTokenizer.TT_WORD) {
fieldValue = appendFieldValue(fieldValue, this.parser.sval);
continue;
}
if (token == '"') {
int nextToken = this.parser.nextToken();
if (nextToken == '"') {
fieldValue = appendFieldValue(fieldValue, nextToken);
continue;
}
if (nextToken == StreamTokenizer.TT_WORD) {
throw new CSVParseException("Not expecting more text after end quote", this.parser.lineno());
}
this.parser.pushBack();
break;
}
}
}
}
if (record.size() > this.maxFieldCount) {
this.maxFieldCount = record.size();
}
return record;
}
private static StringBuilder appendFieldValue(StringBuilder fieldValue, int token) {
return appendFieldValue(fieldValue, "" + (char) token);
}
private static StringBuilder appendFieldValue(StringBuilder fieldValue, String token) {
if (fieldValue == null) {
fieldValue = new StringBuilder();
}
fieldValue.append(token);
return fieldValue;
}
private static void addField(ArrayList<String> record, StringBuilder fieldValue) {
record.add(fieldValue == null ? null : fieldValue.toString());
}
public static class CSVParseException extends IOException {
private static final long serialVersionUID = 1L;
final int recordNumber;
CSVParseException(String message, int lineno) {
super(message);
this.recordNumber = lineno;
}
CSVParseException(int i) {
this.recordNumber = i;
}
public int getRecordNumber() {
return this.recordNumber;
}
}
/**
* close the bufferedReader
*/
public void close() throws IOException {
this.bufferedReader.close();
}
}
| 2,982 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/schema/ColumnNameCase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.schema;
public enum ColumnNameCase {
TOUPPER, TOLOWER, NOCHANGE;
}
| 2,983 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/schema/MapDataType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.schema;
public class MapDataType extends DataType {
String values;
public MapDataType(String type, String values) {
super(type);
this.values = values;
}
public String getValues() {
return this.values;
}
public void setValues(String values) {
this.values = values;
}
}
| 2,984 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/schema/Schema.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.schema;
import com.google.gson.JsonObject;
/**
* Schema from extractor
*/
public class Schema {
private String columnName;
private JsonObject dataType;
private boolean isWaterMark;
private int primaryKey;
private long length;
private int precision;
private int scale;
private boolean isNullable;
private String format;
private String comment;
private String defaultValue;
private boolean isUnique;
public String getColumnName() {
return this.columnName;
}
public void setColumnName(String columnName) {
this.columnName = columnName;
}
public JsonObject getDataType() {
return this.dataType;
}
public void setDataType(JsonObject dataType) {
this.dataType = dataType;
}
public int getPrimaryKey() {
return this.primaryKey;
}
public void setPrimaryKey(int primaryKey) {
this.primaryKey = primaryKey;
}
public long getLength() {
return this.length;
}
public void setLength(long length) {
this.length = length;
}
public int getPrecision() {
return this.precision;
}
public void setPrecision(int precision) {
this.precision = precision;
}
public int getScale() {
return this.scale;
}
public void setScale(int scale) {
this.scale = scale;
}
public String getFormat() {
return this.format;
}
public void setFormat(String format) {
this.format = format;
}
public String getComment() {
return this.comment;
}
public void setComment(String comment) {
this.comment = comment;
}
public String getDefaultValue() {
return this.defaultValue;
}
public void setDefaultValue(String defaultValue) {
this.defaultValue = defaultValue;
}
public boolean isWaterMark() {
return this.isWaterMark;
}
public void setWaterMark(boolean isWaterMark) {
this.isWaterMark = isWaterMark;
}
public boolean isNullable() {
return this.isNullable;
}
public void setNullable(boolean isNullable) {
this.isNullable = isNullable;
}
public boolean isUnique() {
return this.isUnique;
}
public void setUnique(boolean isUnique) {
this.isUnique = isUnique;
}
}
| 2,985 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/schema/DataType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.schema;
import lombok.Getter;
import lombok.Setter;
public class DataType {
@Getter
@Setter
String type;
public DataType(String type) {
this.type = type;
}
}
| 2,986 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/schema/EnumDataType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.schema;
import java.util.List;
public class EnumDataType extends DataType {
String name;
List<String> symbols;
public EnumDataType(String type, String name, List<String> symbols) {
super(type);
this.name = name;
this.symbols = symbols;
}
public String getName() {
return this.name;
}
public void setName(String name) {
this.name = name;
}
public List<String> getSymbols() {
return this.symbols;
}
public void setSymbols(List<String> symbols) {
this.symbols = symbols;
}
}
| 2,987 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/schema/ArrayDataType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.schema;
import lombok.Getter;
public class ArrayDataType extends DataType {
@Getter
String items;
public ArrayDataType(String type, String items) {
super(type);
this.items = items;
}
public void setItems(String items) {
this.items = items;
}
}
| 2,988 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/schema/ColumnAttributes.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.schema;
/**
* Attributes of column in projection list
*
* @author nveeramr
*/
public class ColumnAttributes {
String columnName;
String AliasName;
String sourceTableName;
String sourceColumnName;
public String getColumnName() {
return this.columnName;
}
public void setColumnName(String columnName) {
this.columnName = columnName;
}
public String getAliasName() {
return this.AliasName;
}
public void setAliasName(String aliasName) {
this.AliasName = aliasName;
}
public String getSourceTableName() {
return this.sourceTableName;
}
public void setSourceTableName(String sourceTableName) {
this.sourceTableName = sourceTableName;
}
public String getSourceColumnName() {
return this.sourceColumnName;
}
public void setSourceColumnName(String sourceColumnName) {
this.sourceColumnName = sourceColumnName;
}
}
| 2,989 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/watermark/Watermark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.watermark;
import org.apache.gobblin.source.extractor.extract.QueryBasedExtractor;
import java.util.HashMap;
public interface Watermark {
/**
* Condition statement with the water mark value using the operator. Example (last_updated_ts >= 2013-01-01 00:00:00
*
* @param extractor
* @param watermarkValue mark value
* @param operator operator between water mark column and value
* @return condition statement
*/
public String getWatermarkCondition(QueryBasedExtractor<?, ?> extractor, long watermarkValue, String operator);
/**
* Get partitions for the given range
*
* @param lowWatermarkValue water mark value
* @param highWatermarkValue water mark value
* @param partitionInterval interval(in hours or days)
* @param maxIntervals number of partitions
* @return partitions
*/
public HashMap<Long, Long> getIntervals(long lowWatermarkValue, long highWatermarkValue, long partitionInterval,
int maxIntervals);
/**
* Get number of seconds or hour or days or simple number that needs to be added for the successive water mark
* @return delta value in seconds or hour or days or simple number
*/
public int getDeltaNumForNextWatermark();
}
| 2,990 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/watermark/WatermarkType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.watermark;
public enum WatermarkType {
TIMESTAMP, DATE, HOUR, SIMPLE;
}
| 2,991 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/watermark/DateWatermark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.watermark;
import java.math.RoundingMode;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
import java.util.HashMap;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.google.common.math.DoubleMath;
import com.google.common.primitives.Ints;
import org.apache.gobblin.source.extractor.extract.QueryBasedExtractor;
import org.apache.gobblin.source.extractor.utils.Utils;
public class DateWatermark implements Watermark {
private static final Logger LOG = LoggerFactory.getLogger(DateWatermark.class);
// default water mark format(input format) example: 20140301050505
private static final String INPUTFORMAT = "yyyyMMddHHmmss";
// output format of date water mark example: 20140301
private static final String OUTPUTFORMAT = "yyyyMMdd";
private final SimpleDateFormat inputFormatParser;
private static final int deltaForNextWatermark = 24 * 60 * 60;
private String watermarkColumn;
private String watermarkFormat;
public DateWatermark(String watermarkColumn, String watermarkFormat) {
this.watermarkColumn = watermarkColumn;
this.watermarkFormat = watermarkFormat;
inputFormatParser = new SimpleDateFormat(INPUTFORMAT);
}
@Override
public String getWatermarkCondition(QueryBasedExtractor<?, ?> extractor, long watermarkValue, String operator) {
return extractor.getDatePredicateCondition(this.watermarkColumn, watermarkValue, this.watermarkFormat, operator);
}
@Override
public int getDeltaNumForNextWatermark() {
return deltaForNextWatermark;
}
@Override
synchronized public HashMap<Long, Long> getIntervals(long lowWatermarkValue, long highWatermarkValue,
long partitionIntervalInHours, int maxIntervals) {
Preconditions.checkArgument(maxIntervals > 0, "Invalid value for maxIntervals, positive value expected.");
Preconditions.checkArgument(partitionIntervalInHours >= 24,
"Invalid value for partitionInterval, should be at least 24 hrs.");
HashMap<Long, Long> intervalMap = Maps.newHashMap();
if (lowWatermarkValue > highWatermarkValue) {
LOG.warn("The low water mark is greater than the high water mark, empty intervals are returned");
return intervalMap;
}
final Calendar calendar = Calendar.getInstance();
Date nextTime;
Date lowWatermarkDate = extractFromTimestamp(Long.toString(lowWatermarkValue));
Date highWatermarkDate = extractFromTimestamp(Long.toString(highWatermarkValue));
final long lowWatermark = lowWatermarkDate.getTime();
final long highWatermark = highWatermarkDate.getTime();
int interval = getInterval(highWatermark - lowWatermark, partitionIntervalInHours, maxIntervals);
LOG.info("Recalculated partition interval:" + interval + " days");
Date startTime = new Date(lowWatermark);
Date endTime = new Date(highWatermark);
LOG.debug("Start time:" + startTime + "; End time:" + endTime);
long lwm;
long hwm;
if (startTime.getTime() == endTime.getTime()) {
lwm = Long.parseLong(inputFormatParser.format(startTime));
hwm = lwm;
intervalMap.put(lwm, hwm);
return intervalMap;
}
while (startTime.getTime() < endTime.getTime()) {
lwm = Long.parseLong(inputFormatParser.format(startTime));
calendar.setTime(startTime);
calendar.add(Calendar.DATE, interval);
nextTime = calendar.getTime();
hwm = Long.parseLong(inputFormatParser.format(nextTime.getTime() <= endTime.getTime() ? nextTime : endTime));
intervalMap.put(lwm, hwm);
LOG.debug("Partition - low:" + lwm + "; high:" + hwm);
startTime = nextTime;
}
return intervalMap;
}
/**
* recalculate interval(in hours) if total number of partitions greater than maximum number of allowed partitions
*
* @param diffInMilliSecs difference in range
* @param hourInterval hour interval (ex: 24 hours)
* @param maxIntervals max number of allowed partitions
* @return calculated interval in days
*/
private static int getInterval(long diffInMilliSecs, long hourInterval, int maxIntervals) {
long dayInterval = TimeUnit.HOURS.toDays(hourInterval);
int totalHours = DoubleMath.roundToInt((double) diffInMilliSecs / (60 * 60 * 1000), RoundingMode.CEILING);
int totalIntervals = DoubleMath.roundToInt((double) totalHours / (dayInterval * 24), RoundingMode.CEILING);
if (totalIntervals > maxIntervals) {
hourInterval = DoubleMath.roundToInt((double) totalHours / maxIntervals, RoundingMode.CEILING);
dayInterval = DoubleMath.roundToInt((double) hourInterval / 24, RoundingMode.CEILING);
}
return Ints.checkedCast(dayInterval);
}
/**
* Convert timestamp to date (yyyymmddHHmmss to yyyymmdd)
*
* @param watermark value
* @return value in date format
*/
synchronized private static Date extractFromTimestamp(String watermark) {
final SimpleDateFormat inputFormat = new SimpleDateFormat(INPUTFORMAT);
final SimpleDateFormat outputFormat = new SimpleDateFormat(OUTPUTFORMAT);
Date outDate = null;
try {
Date date = inputFormat.parse(watermark);
String dateStr = outputFormat.format(date);
outDate = outputFormat.parse(dateStr);
} catch (ParseException e) {
LOG.error(e.getMessage(), e);
}
return outDate;
}
/**
* Adjust the given watermark by diff
*
* @param baseWatermark the original watermark
* @param diff the amount to change
* @return the adjusted watermark value
*/
public static long adjustWatermark(String baseWatermark, int diff) {
SimpleDateFormat parser = new SimpleDateFormat(INPUTFORMAT);
Date date = Utils.toDate(baseWatermark, INPUTFORMAT, OUTPUTFORMAT);
return Long.parseLong(parser.format(Utils.addDaysToDate(date, diff)));
}
}
| 2,992 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/watermark/HourWatermark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.watermark;
import org.apache.gobblin.source.extractor.extract.QueryBasedExtractor;
import org.apache.gobblin.source.extractor.utils.Utils;
import java.math.RoundingMode;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
import java.util.HashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.google.common.math.DoubleMath;
import com.google.common.primitives.Ints;
public class HourWatermark implements Watermark {
private static final Logger LOG = LoggerFactory.getLogger(HourWatermark.class);
// default water mark format(input format) example: 20140301050505
private static final String INPUTFORMAT = "yyyyMMddHHmmss";
// output format of hour water mark example: 2014030105
private static final String OUTPUTFORMAT = "yyyyMMddHH";
private static final int deltaForNextWatermark = 60 * 60;
private final SimpleDateFormat inputFormatParser;
private String watermarkColumn;
private String watermarkFormat;
public HourWatermark(String watermarkColumn, String watermarkFormat) {
this.watermarkColumn = watermarkColumn;
this.watermarkFormat = watermarkFormat;
inputFormatParser = new SimpleDateFormat(INPUTFORMAT);
}
@Override
public String getWatermarkCondition(QueryBasedExtractor<?, ?> extractor, long watermarkValue, String operator) {
return extractor.getHourPredicateCondition(this.watermarkColumn, watermarkValue, this.watermarkFormat, operator);
}
@Override
public int getDeltaNumForNextWatermark() {
return deltaForNextWatermark;
}
@Override
synchronized public HashMap<Long, Long> getIntervals(long lowWatermarkValue, long highWatermarkValue,
long partitionIntervalInHours, int maxIntervals) {
Preconditions.checkArgument(maxIntervals > 0, "Invalid value for maxIntervals, positive value expected.");
Preconditions
.checkArgument(partitionIntervalInHours > 0, "Invalid value for partitionInterval, should be at least 1.");
HashMap<Long, Long> intervalMap = Maps.newHashMap();
if (lowWatermarkValue > highWatermarkValue) {
LOG.warn("The low water mark is greater than the high water mark, empty intervals are returned");
return intervalMap;
}
final Calendar calendar = Calendar.getInstance();
Date nextTime;
Date lowWatermarkDate = extractFromTimestamp(Long.toString(lowWatermarkValue));
Date highWatermarkDate = extractFromTimestamp(Long.toString(highWatermarkValue));
final long lowWatermark = lowWatermarkDate.getTime();
final long highWatermark = highWatermarkDate.getTime();
int interval = getInterval(highWatermark - lowWatermark, partitionIntervalInHours, maxIntervals);
LOG.info("Recalculated partition interval:" + interval + " hours");
Date startTime = new Date(lowWatermark);
Date endTime = new Date(highWatermark);
LOG.debug("Start time:" + startTime + "; End time:" + endTime);
long lwm;
long hwm;
if (startTime.getTime() == endTime.getTime()) {
lwm = Long.parseLong(inputFormatParser.format(startTime));
hwm = lwm;
intervalMap.put(lwm, hwm);
return intervalMap;
}
while (startTime.getTime() < endTime.getTime()) {
lwm = Long.parseLong(inputFormatParser.format(startTime));
calendar.setTime(startTime);
calendar.add(Calendar.HOUR, interval);
nextTime = calendar.getTime();
hwm = Long.parseLong(inputFormatParser.format(nextTime.getTime() <= endTime.getTime() ? nextTime : endTime));
intervalMap.put(lwm, hwm);
LOG.debug("Partition - low:" + lwm + "; high:" + hwm);
startTime = nextTime;
}
return intervalMap;
}
/**
* recalculate interval(in hours) if total number of partitions greater than maximum number of allowed partitions
*
* @param diffInMilliSecs difference in range
* @param hourInterval hour interval (ex: 4 hours)
* @param maxIntervals max number of allowed partitions
* @return calculated interval in hours
*/
private static int getInterval(long diffInMilliSecs, long hourInterval, int maxIntervals) {
int totalHours = DoubleMath.roundToInt((double) diffInMilliSecs / (60 * 60 * 1000), RoundingMode.CEILING);
int totalIntervals = DoubleMath.roundToInt((double) totalHours / hourInterval, RoundingMode.CEILING);
if (totalIntervals > maxIntervals) {
hourInterval = DoubleMath.roundToInt((double) totalHours / maxIntervals, RoundingMode.CEILING);
}
return Ints.checkedCast(hourInterval);
}
/**
* Convert timestamp to hour (yyyymmddHHmmss to yyyymmddHH)
*
* @param watermark value
* @return value in hour format
*/
synchronized private static Date extractFromTimestamp(String watermark) {
final SimpleDateFormat inputFormat = new SimpleDateFormat(INPUTFORMAT);
final SimpleDateFormat outputFormat = new SimpleDateFormat(OUTPUTFORMAT);
Date outDate = null;
try {
Date date = inputFormat.parse(watermark);
String dateStr = outputFormat.format(date);
outDate = outputFormat.parse(dateStr);
} catch (ParseException e) {
LOG.error(e.getMessage(), e);
}
return outDate;
}
/**
* Adjust the given watermark by diff
*
* @param baseWatermark the original watermark
* @param diff the amount to change
* @return the adjusted watermark value
*/
public static long adjustWatermark(String baseWatermark, int diff) {
SimpleDateFormat parser = new SimpleDateFormat(INPUTFORMAT);
Date date = Utils.toDate(baseWatermark, INPUTFORMAT, OUTPUTFORMAT);
return Long.parseLong(parser.format(Utils.addHoursToDate(date, diff)));
}
}
| 2,993 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/watermark/WatermarkPredicate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.watermark;
import org.apache.gobblin.source.extractor.extract.QueryBasedExtractor;
import java.util.HashMap;
import org.apache.commons.lang3.StringUtils;
public class WatermarkPredicate {
private static final String DEFAULT_WATERMARK_VALUE_FORMAT = "yyyyMMddHHmmss";
private static final long DEFAULT_WATERMARK_VALUE = -1;
private String watermarkColumn;
private WatermarkType watermarkType;
private Watermark watermark;
public WatermarkPredicate(WatermarkType watermarkType) {
this(null, watermarkType);
}
public WatermarkPredicate(String watermarkColumn, WatermarkType watermarkType) {
super();
this.watermarkColumn = watermarkColumn;
this.watermarkType = watermarkType;
switch (watermarkType) {
case TIMESTAMP:
this.watermark = new TimestampWatermark(watermarkColumn, DEFAULT_WATERMARK_VALUE_FORMAT);
break;
case DATE:
this.watermark = new DateWatermark(watermarkColumn, DEFAULT_WATERMARK_VALUE_FORMAT);
break;
case HOUR:
this.watermark = new HourWatermark(watermarkColumn, DEFAULT_WATERMARK_VALUE_FORMAT);
break;
case SIMPLE:
this.watermark = new SimpleWatermark(watermarkColumn, DEFAULT_WATERMARK_VALUE_FORMAT);
break;
default:
this.watermark = new SimpleWatermark(watermarkColumn, DEFAULT_WATERMARK_VALUE_FORMAT);
break;
}
}
public Predicate getPredicate(QueryBasedExtractor<?, ?> extractor, long watermarkValue, String operator,
Predicate.PredicateType type) {
String condition = "";
if (watermarkValue != DEFAULT_WATERMARK_VALUE) {
condition = this.watermark.getWatermarkCondition(extractor, watermarkValue, operator);
}
if (StringUtils.isBlank(this.watermarkColumn) || condition.equals("")) {
return null;
}
return new Predicate(this.watermarkColumn, watermarkValue, condition, this.getWatermarkSourceFormat(extractor),
type);
}
public String getWatermarkSourceFormat(QueryBasedExtractor<?, ?> extractor) {
return extractor.getWatermarkSourceFormat(this.watermarkType);
}
public HashMap<Long, Long> getPartitions(long lowWatermarkValue, long highWatermarkValue, int partitionInterval,
int maxIntervals) {
return this.watermark.getIntervals(lowWatermarkValue, highWatermarkValue, partitionInterval, maxIntervals);
}
public int getDeltaNumForNextWatermark() {
return this.watermark.getDeltaNumForNextWatermark();
}
}
| 2,994 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/watermark/Predicate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.watermark;
/**
* An implementation for predicate conditions
* columnName : name of the column
* value: value
* condition: predicate condition using column and value
* format: column format
*/
public class Predicate {
public String columnName;
public long value;
public String condition;
public String format;
public PredicateType type;
/**
* Enum which lists the predicate types
* LWM - low water mark and HWM - high water mark
*/
public enum PredicateType {
LWM,
HWM
}
public Predicate(String columnName, long value, String condition, String format, PredicateType type) {
this.columnName = columnName;
this.value = value;
this.condition = condition;
this.format = format;
this.type = type;
}
public String getColumnName() {
return this.columnName;
}
public void setColumnName(String columnName) {
this.columnName = columnName;
}
public long getValue() {
return this.value;
}
public void setValue(long value) {
this.value = value;
}
public String getCondition() {
return this.condition;
}
public void setCondition(String condition) {
this.condition = condition;
}
public String getFormat() {
return this.format;
}
public void setFormat(String format) {
this.format = format;
}
public PredicateType getType() {
return this.type;
}
public void setType(PredicateType type) {
this.type = type;
}
}
| 2,995 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/watermark/SimpleWatermark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.watermark;
import org.apache.gobblin.source.extractor.extract.QueryBasedExtractor;
import org.apache.gobblin.source.extractor.utils.Utils;
import java.math.RoundingMode;
import java.util.HashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.google.common.math.DoubleMath;
public class SimpleWatermark implements Watermark {
private static final Logger LOG = LoggerFactory.getLogger(SimpleWatermark.class);
private static final int deltaForNextWatermark = 1;
private String watermarkColumn;
public SimpleWatermark(String watermarkColumn, String watermarkFormat) {
this.watermarkColumn = watermarkColumn;
}
@Override
public String getWatermarkCondition(QueryBasedExtractor<?, ?> extractor, long watermarkValue, String operator) {
return Utils.getCoalesceColumnNames(this.watermarkColumn) + " " + operator + " " + watermarkValue;
}
@Override
public int getDeltaNumForNextWatermark() {
return deltaForNextWatermark;
}
@Override
public HashMap<Long, Long> getIntervals(long lowWatermarkValue, long highWatermarkValue, long partitionInterval,
int maxIntervals) {
Preconditions.checkArgument(partitionInterval >= 1,
"Invalid value for partitionInterval, value should be at least 1.");
Preconditions.checkArgument(maxIntervals > 0, "Invalid value for maxIntervals, positive value expected.");
HashMap<Long, Long> intervalMap = new HashMap<>();
long nextNum;
long interval = getInterval(lowWatermarkValue, highWatermarkValue, partitionInterval, maxIntervals);
LOG.info("Recalculated partition interval:" + interval);
if (interval == 0) {
return intervalMap;
}
long startNum = lowWatermarkValue;
long endNum = highWatermarkValue;
boolean longOverflow = false;
if (startNum == endNum) {
intervalMap.put(startNum, endNum);
return intervalMap;
}
while (startNum < endNum && !longOverflow) {
longOverflow = (Long.MAX_VALUE - interval < startNum);
nextNum = longOverflow ? endNum : Math.min(startNum + interval, endNum);
intervalMap.put(startNum, nextNum);
startNum = nextNum;
}
return intervalMap;
}
/**
* recalculate interval if total number of partitions greater than maximum number of allowed partitions
*
* @param lowWatermarkValue low watermark value
* @param highWatermarkValue high watermark value
* @param partitionInterval partition interval
* @param maxIntervals max number of allowed partitions
* @return calculated interval
*/
private static long getInterval(long lowWatermarkValue, long highWatermarkValue, long partitionInterval,
int maxIntervals) {
if (lowWatermarkValue > highWatermarkValue) {
LOG.info(
"lowWatermarkValue: " + lowWatermarkValue + " is greater than highWatermarkValue: " + highWatermarkValue);
return 0;
}
long outputInterval = partitionInterval;
boolean longOverflow = false;
long totalIntervals = Long.MAX_VALUE;
try {
totalIntervals = DoubleMath.roundToLong(
(double) highWatermarkValue / partitionInterval - (double) lowWatermarkValue / partitionInterval,
RoundingMode.CEILING);
} catch (java.lang.ArithmeticException e) {
longOverflow = true;
}
if (longOverflow || totalIntervals > maxIntervals) {
outputInterval = DoubleMath.roundToLong(
(double) highWatermarkValue / maxIntervals - (double) lowWatermarkValue / maxIntervals, RoundingMode.CEILING);
}
return outputInterval;
}
/**
* Adjust the given watermark by diff
*
* @param baseWatermark the original watermark
* @param diff the amount to change
* @return the adjusted watermark value
*/
public static long adjustWatermark(String baseWatermark, int diff) {
return Long.parseLong(baseWatermark) + diff;
}
}
| 2,996 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/watermark/TimestampWatermark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.watermark;
import java.math.RoundingMode;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
import java.util.HashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.google.common.math.DoubleMath;
import com.google.common.primitives.Ints;
import org.apache.gobblin.source.extractor.extract.QueryBasedExtractor;
import org.apache.gobblin.source.extractor.utils.Utils;
public class TimestampWatermark implements Watermark {
private static final Logger LOG = LoggerFactory.getLogger(TimestampWatermark.class);
// default water mark format(input format) example: 20140301050505
private static final String INPUTFORMAT = "yyyyMMddHHmmss";
private final SimpleDateFormat inputFormatParser;
private static final int deltaForNextWatermark = 1;
private String watermarkColumn;
private String watermarkFormat;
public TimestampWatermark(String watermarkColumn, String watermarkFormat) {
this.watermarkColumn = watermarkColumn;
this.watermarkFormat = watermarkFormat;
inputFormatParser = new SimpleDateFormat(INPUTFORMAT);
}
@Override
public String getWatermarkCondition(QueryBasedExtractor<?, ?> extractor, long watermarkValue, String operator) {
return extractor
.getTimestampPredicateCondition(this.watermarkColumn, watermarkValue, this.watermarkFormat, operator);
}
@Override
public int getDeltaNumForNextWatermark() {
return deltaForNextWatermark;
}
@Override
synchronized public HashMap<Long, Long> getIntervals(long lowWatermarkValue, long highWatermarkValue,
long partitionInterval, int maxIntervals) {
Preconditions
.checkArgument(partitionInterval >= 1, "Invalid value for partitionInterval, value should be at least 1.");
Preconditions.checkArgument(maxIntervals > 0, "Invalid value for maxIntervals, positive value expected.");
HashMap<Long, Long> intervalMap = new HashMap<>();
if (lowWatermarkValue > highWatermarkValue) {
LOG.warn(
"lowWatermarkValue: " + lowWatermarkValue + " is greater than highWatermarkValue: " + highWatermarkValue);
return intervalMap;
}
final Calendar calendar = Calendar.getInstance();
Date nextTime;
final long lowWatermark = toEpoch(Long.toString(lowWatermarkValue));
final long highWatermark = toEpoch(Long.toString(highWatermarkValue));
long interval = getInterval(highWatermark - lowWatermark, partitionInterval, maxIntervals);
LOG.info("Recalculated partition interval:" + interval + " hours");
if (interval == 0) {
return intervalMap;
}
Date startTime = new Date(lowWatermark);
Date endTime = new Date(highWatermark);
LOG.debug("Sart time:" + startTime + "; End time:" + endTime);
long lwm;
long hwm;
if (startTime.getTime() == endTime.getTime()) {
lwm = Long.parseLong(inputFormatParser.format(startTime));
hwm = lwm;
intervalMap.put(lwm, hwm);
return intervalMap;
}
while (startTime.getTime() < endTime.getTime()) {
lwm = Long.parseLong(inputFormatParser.format(startTime));
calendar.setTime(startTime);
calendar.add(Calendar.HOUR, (int) interval);
nextTime = calendar.getTime();
hwm = Long.parseLong(inputFormatParser.format(nextTime.getTime() <= endTime.getTime() ? nextTime : endTime));
intervalMap.put(lwm, hwm);
LOG.debug("Partition - low:" + lwm + "; high:" + hwm);
startTime = nextTime;
}
return intervalMap;
}
/**
* recalculate interval(in hours) if total number of partitions greater than maximum number of allowed partitions
*
* @param diffInMilliSecs difference in range
* @param hourInterval hour interval (ex: 4 hours)
* @param maxIntervals max number of allowed partitions
* @return calculated interval in hours
*/
private static int getInterval(long diffInMilliSecs, long hourInterval, int maxIntervals) {
long totalHours = DoubleMath.roundToInt((double) diffInMilliSecs / (60 * 60 * 1000), RoundingMode.CEILING);
long totalIntervals = DoubleMath.roundToInt((double) totalHours / hourInterval, RoundingMode.CEILING);
if (totalIntervals > maxIntervals) {
hourInterval = DoubleMath.roundToInt((double) totalHours / maxIntervals, RoundingMode.CEILING);
}
return Ints.checkedCast(hourInterval);
}
synchronized private static long toEpoch(String dateTime) {
Date date = null;
final SimpleDateFormat inputFormat = new SimpleDateFormat(INPUTFORMAT);
try {
date = inputFormat.parse(dateTime);
} catch (ParseException e) {
throw new RuntimeException(e.getMessage(), e);
}
return date.getTime();
}
/**
* Adjust the given watermark by diff
*
* @param baseWatermark the original watermark
* @param diff the amount to change
* @return the adjusted watermark value
*/
public static long adjustWatermark(String baseWatermark, int diff) {
SimpleDateFormat parser = new SimpleDateFormat(INPUTFORMAT);
try {
Date date = parser.parse(baseWatermark);
return Long.parseLong(parser.format(Utils.addSecondsToDate(date, diff)));
} catch (ParseException e) {
LOG.error("Fail to adjust timestamp watermark", e);
}
return -1;
}
}
| 2,997 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/hadoop/OldApiHadoopTextInputSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.hadoop;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
/**
* An extension to {@link OldApiHadoopFileInputSource} that uses a {@link TextInputFormat}.
*
* <p>
* A concrete implementation of this class should at least implement the
* {@link #getExtractor(WorkUnitState, RecordReader, FileSplit, boolean)} method.
* </p>
*
* @param <S> output schema type
*
* @author Yinan Li
*/
public abstract class OldApiHadoopTextInputSource<S> extends OldApiHadoopFileInputSource<S, Text, LongWritable, Text> {
@Override
protected FileInputFormat<LongWritable, Text> getFileInputFormat(State state, JobConf jobConf) {
TextInputFormat textInputFormat = ReflectionUtils.newInstance(TextInputFormat.class, jobConf);
textInputFormat.configure(jobConf);
return textInputFormat;
}
}
| 2,998 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/hadoop/HadoopFsHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.hadoop;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import com.google.common.base.Strings;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.source.extractor.filebased.FileBasedHelper;
import org.apache.gobblin.source.extractor.filebased.FileBasedHelperException;
import org.apache.gobblin.source.extractor.filebased.TimestampAwareFileBasedHelper;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.ProxiedFileSystemWrapper;
/**
* A common helper that extends {@link FileBasedHelper} and provides access to a files via a {@link FileSystem}.
*/
public class HadoopFsHelper implements TimestampAwareFileBasedHelper {
private final State state;
private final Configuration configuration;
private FileSystem fs;
public HadoopFsHelper(State state) {
this(state, HadoopUtils.getConfFromState(state));
}
public HadoopFsHelper(State state, Configuration configuration) {
this.state = state;
this.configuration = configuration;
}
protected State getState() {
return this.state;
}
public FileSystem getFileSystem() {
return this.fs;
}
@Override
public void connect() throws FileBasedHelperException {
String uri = this.state.getProp(ConfigurationKeys.SOURCE_FILEBASED_FS_URI);
try {
if (Strings.isNullOrEmpty(uri)) {
throw new FileBasedHelperException(ConfigurationKeys.SOURCE_FILEBASED_FS_URI + " has not been configured");
}
this.createFileSystem(uri);
} catch (IOException e) {
throw new FileBasedHelperException("Cannot connect to given URI " + uri + " due to " + e.getMessage(), e);
} catch (URISyntaxException e) {
throw new FileBasedHelperException("Malformed uri " + uri + " due to " + e.getMessage(), e);
} catch (InterruptedException e) {
throw new FileBasedHelperException("Interrupted exception is caught when getting the proxy file system", e);
}
}
@Override
public List<String> ls(String path) throws FileBasedHelperException {
List<String> results = new ArrayList<>();
try {
lsr(new Path(path), results);
} catch (IOException e) {
throw new FileBasedHelperException("Cannot do ls on path " + path + " due to " + e.getMessage(), e);
}
return results;
}
public void lsr(Path p, List<String> results) throws IOException {
if (!this.fs.getFileStatus(p).isDirectory()) {
results.add(p.toString());
}
Path qualifiedPath = this.fs.makeQualified(p);
for (FileStatus status : this.fs.listStatus(p)) {
if (status.isDirectory()) {
// Fix for hadoop issue: https://issues.apache.org/jira/browse/HADOOP-12169
if (!qualifiedPath.equals(status.getPath())) {
lsr(status.getPath(), results);
}
} else {
results.add(status.getPath().toString());
}
}
}
private void createFileSystem(String uri) throws IOException, InterruptedException, URISyntaxException {
if (this.state.getPropAsBoolean(ConfigurationKeys.SHOULD_FS_PROXY_AS_USER,
ConfigurationKeys.DEFAULT_SHOULD_FS_PROXY_AS_USER)) {
// Initialize file system as a proxy user.
this.fs = new ProxiedFileSystemWrapper().getProxiedFileSystem(this.state, ProxiedFileSystemWrapper.AuthType.TOKEN,
this.state.getProp(ConfigurationKeys.FS_PROXY_AS_USER_TOKEN_FILE), uri, configuration);
} else {
// Initialize file system as the current user.
this.fs = FileSystem.newInstance(URI.create(uri), this.configuration);
}
}
@Override
public long getFileMTime(String filePath) throws FileBasedHelperException {
try {
return this.getFileSystem().getFileStatus(new Path(filePath)).getModificationTime();
} catch (IOException e) {
throw new FileBasedHelperException(String
.format("Failed to get last modified time for file at path %s due to error %s", filePath, e.getMessage()), e);
}
}
@Override
public long getFileSize(String filePath) throws FileBasedHelperException {
try {
return this.getFileSystem().getFileStatus(new Path(filePath)).getLen();
} catch (IOException e) {
throw new FileBasedHelperException(
String.format("Failed to get size for file at path %s due to error %s", filePath, e.getMessage()), e);
}
}
/**
* Returns an {@link InputStream} to the specified file.
* <p>
* Note: It is the caller's responsibility to close the returned {@link InputStream}.
* </p>
*
* @param path The path to the file to open.
* @return An {@link InputStream} for the specified file.
* @throws FileBasedHelperException if there is a problem opening the {@link InputStream} for the specified file.
*/
@Override
public InputStream getFileStream(String path) throws FileBasedHelperException {
try {
Path p = new Path(path);
InputStream in = this.getFileSystem().open(p);
// Account for compressed files (e.g. gzip).
// https://github.com/apache/spark/blob/master/core/src/main/scala/org/apache/spark/input/WholeTextFileRecordReader.scala
CompressionCodecFactory factory = new CompressionCodecFactory(this.getFileSystem().getConf());
CompressionCodec codec = factory.getCodec(p);
return (codec == null) ? in : codec.createInputStream(in);
} catch (IOException e) {
throw new FileBasedHelperException("Cannot open file " + path + " due to " + e.getMessage(), e);
}
}
@Override
public void close() throws IOException {
this.getFileSystem().close();
}
}
| 2,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.