index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer/commands/JdbcWriterCommandsFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.commands;
import java.sql.Connection;
import com.google.common.base.Preconditions;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.ForkOperatorUtils;
import org.apache.gobblin.writer.Destination;
import org.apache.gobblin.writer.Destination.DestinationType;
/**
* Factory method pattern class. It's not a static class mainly for TDD -- so that it can be mocked for testing purpose.
*/
public class JdbcWriterCommandsFactory {
/**
* @param destination
* @return Provides JdbcWriterCommands bases on destination.
*/
public JdbcWriterCommands newInstance(Destination destination, Connection conn) {
boolean overwriteRecords = destination.getProperties().getPropAsBoolean(ConfigurationKeys.ALLOW_JDBC_RECORD_OVERWRITE);
switch (destination.getType()) {
case MYSQL:
return new MySqlWriterCommands(destination.getProperties(), conn, overwriteRecords);
case TERADATA:
return new TeradataWriterCommands(destination.getProperties(), conn, overwriteRecords);
case POSTGRES:
return new PostgresWriterCommands(destination.getProperties(), conn, overwriteRecords);
default:
throw new IllegalArgumentException(destination.getType() + " is not supported");
}
}
/**
* @param state
* @return Provides JdbcWriterCommands based on ConfigurationKeys.WRITER_DESTINATION_TYPE_KEY
*/
public JdbcWriterCommands newInstance(State state, Connection conn) {
String destKey = ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_DESTINATION_TYPE_KEY,
state.getPropAsInt(ConfigurationKeys.FORK_BRANCHES_KEY, 1),
state.getPropAsInt(ConfigurationKeys.FORK_BRANCH_ID_KEY, 0));
String destType = state.getProp(destKey);
Preconditions.checkNotNull(destType, destKey + " is required for underlying JDBC product name");
return newInstance(Destination.of(DestinationType.valueOf(destType.toUpperCase()), state), conn);
}
}
| 3,500 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer/commands/JdbcWriterCommands.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.commands;
import org.apache.gobblin.converter.jdbc.JdbcType;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.Map;
import java.util.Properties;
/**
* JdbcWriterCommands is interface that its implementation will
* directly talk with underlying RDBMS via JDBC. Different RDBMS may use different SQL syntax,
* and having this interface decouples JdbcWriter with any syntax difference each RDBMS might have.
*/
public interface JdbcWriterCommands extends JdbcBufferedInserter {
/**
* Sets writer specific connection parameters, e.g transaction handling
*
* @param properties State properties
* @param conn Connection object
*/
public void setConnectionParameters(Properties properties, Connection conn) throws SQLException;
/**
* Creates table structure based on the table in parameter. Note that this won't guarantee copy exactly the same as
* original table such as restraints, foreign keys, sequences, indices, etc
* @param fromStructure
* @param targetTableName
* @throws SQLException
*/
public void createTableStructure(String databaseName, String fromStructure, String targetTableName) throws SQLException;
/**
* Check if table is empty.
* @param table
* @return
* @throws SQLException
*/
public boolean isEmpty(String database, String table) throws SQLException;
/**
* Truncates table. Most RDBMS cannot be rollback from this operation.
* @param table table name to be truncated.
* @throws SQLException
*/
public void truncate(String database, String table) throws SQLException;
/**
* Deletes all contents from the table. This method can be rollback if not committed.
* @param table
* @throws SQLException
*/
public void deleteAll(String database, String table) throws SQLException;
/**
* Drops the table.
* @param table
* @throws SQLException
*/
public void drop(String database, String table) throws SQLException;
/**
* Retrieves date related column such as Date, Time, DateTime, Timestamp etc.
* @param database
* @param table
* @return Map of column name and JdbcType that is date related.
* @throws SQLException
*/
public Map<String, JdbcType> retrieveDateColumns(String database, String table) throws SQLException;
/**
* Copy all the contents from one table to another. Both table should be in same structure.
* @param databaseName
* @param from
* @param to
* @throws SQLException
*/
public void copyTable(String databaseName, String from, String to) throws SQLException;
} | 3,501 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer/commands/BaseJdbcBufferedInserter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.commands;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.rholder.retry.Retryer;
import com.github.rholder.retry.RetryerBuilder;
import com.github.rholder.retry.StopStrategies;
import com.github.rholder.retry.WaitStrategies;
import com.google.common.base.Joiner;
import com.google.common.collect.Lists;
import lombok.ToString;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.converter.jdbc.JdbcEntryData;
import org.apache.gobblin.converter.jdbc.JdbcEntryDatum;
/**
* Base implementation of JdbcBufferedInserter.
* Concrete DB specific implementations are expected to subclass this class.
*
*/
@ToString
public abstract class BaseJdbcBufferedInserter implements JdbcBufferedInserter {
private static final Logger LOG = LoggerFactory.getLogger(BaseJdbcBufferedInserter.class);
protected static final String INSERT_STATEMENT_PREFIX_FORMAT = "INSERT INTO %s.%s (%s) VALUES ";
protected static final Joiner JOINER_ON_COMMA = Joiner.on(',');
protected final Connection conn;
// Rows that are inserted at once in one batch cycle
protected final List<JdbcEntryData> pendingInserts = Lists.newArrayList();
protected final List<String> columnNames = Lists.newArrayList();
protected int batchSize;
protected String insertStmtPrefix;
protected PreparedStatement insertPstmtForFixedBatch;
private final Retryer<Boolean> retryer;
public BaseJdbcBufferedInserter(State state, Connection conn) {
this.conn = conn;
this.batchSize = state.getPropAsInt(WRITER_JDBC_INSERT_BATCH_SIZE, DEFAULT_WRITER_JDBC_INSERT_BATCH_SIZE);
if (this.batchSize < 1) {
throw new IllegalArgumentException(WRITER_JDBC_INSERT_BATCH_SIZE + " should be a positive number");
}
int maxWait = state.getPropAsInt(WRITER_JDBC_INSERT_RETRY_TIMEOUT, DEFAULT_WRITER_JDBC_INSERT_RETRY_TIMEOUT);
int maxAttempts =
state.getPropAsInt(WRITER_JDBC_INSERT_RETRY_MAX_ATTEMPT, DEFAULT_WRITER_JDBC_INSERT_RETRY_MAX_ATTEMPT);
//retry after 2, 4, 8, 16... sec, allow at most maxWait sec delay
this.retryer = RetryerBuilder.<Boolean> newBuilder().retryIfException()
.withWaitStrategy(WaitStrategies.exponentialWait(1000, maxWait, TimeUnit.SECONDS))
.withStopStrategy(StopStrategies.stopAfterAttempt(maxAttempts)).build();
}
/**
* Adds all the records from {@link #pendingInserts} to the PreparedStatement and executes the
* batch insert.
*
* @param pstmt PreparedStatement object
* @return true if the insert was successful
*/
protected abstract boolean insertBatch(final PreparedStatement pstmt) throws SQLException;
/**
* Constructs the SQL insert statement for the batch inserts, using the {@link #INSERT_STATEMENT_PREFIX_FORMAT}
*
* @param batchSize size of one batch
* @return the constructed SQL string for batch inserts
*/
protected abstract String createPrepareStatementStr(int batchSize);
/**
* <p>
* Inserts entry into buffer. If current # of entries filled batch size or it overflowed the buffer,
* it will call underlying JDBC to actually insert it.
* </p>
*
* {@inheritDoc}
* @see org.apache.gobblin.writer.commands.JdbcBufferedInserter#insert(java.lang.String, java.lang.String, org.apache.gobblin.converter.jdbc.JdbcEntryData)
*/
@Override
public void insert(String databaseName, String table, JdbcEntryData jdbcEntryData) throws SQLException {
if (this.columnNames.isEmpty()) {
for (JdbcEntryDatum datum : jdbcEntryData) {
this.columnNames.add(datum.getColumnName());
}
initializeBatch(databaseName, table);
}
this.pendingInserts.add(jdbcEntryData);
if (this.pendingInserts.size() == this.batchSize) {
executeBatchInsert(this.insertPstmtForFixedBatch); // Reuse pre-computed Preparedstatement.
}
}
/**
* Initializes variables for batch insert and pre-compute PreparedStatement based on requested batch size and parameter size.
* @param databaseName
* @param table
* @throws SQLException
*/
protected void initializeBatch(String databaseName, String table)
throws SQLException {
this.insertStmtPrefix = createInsertStatementStr(databaseName, table);
this.insertPstmtForFixedBatch =
this.conn.prepareStatement(createPrepareStatementStr(this.batchSize));
LOG.info(String.format("Initialized for %s insert " + this, (this.batchSize > 1) ? "batch" : ""));
}
/**
* Submits the user defined {@link #insertBatch(PreparedStatement)} call to the {@link Retryer} which takes care
* of resubmitting the records according to {@link #WRITER_JDBC_INSERT_RETRY_TIMEOUT} and {@link #WRITER_JDBC_INSERT_RETRY_MAX_ATTEMPT}
* when failure happens.
*
* @param pstmt PreparedStatement object
*/
protected void executeBatchInsert(final PreparedStatement pstmt) {
try {
// Need a Callable interface to be wrapped by Retryer.
this.retryer.wrap(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return insertBatch(pstmt);
}
}).call();
} catch (Exception e) {
throw new RuntimeException("Failed to insert.", e);
}
resetBatch();
}
/**
* Resets the list of rows after the batch insert
*/
protected void resetBatch() {
this.pendingInserts.clear();
}
/**
* Populates the placeholders and constructs the prefix of batch insert statement
* @param databaseName name of the database
* @param table name of the table
* @return {@link #INSERT_STATEMENT_PREFIX_FORMAT} with all its resolved placeholders
*/
protected String createInsertStatementStr(String databaseName, String table) {
return String.format(INSERT_STATEMENT_PREFIX_FORMAT, databaseName, table, JOINER_ON_COMMA.join(this.columnNames));
}
@Override
public void flush() throws SQLException {
if (this.pendingInserts == null || this.pendingInserts.isEmpty()) {
return;
}
try (PreparedStatement pstmt = this.conn.prepareStatement(createPrepareStatementStr(this.pendingInserts.size()));) {
insertBatch(pstmt);
}
}
} | 3,502 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer/commands/GenericJdbcBufferedInserter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.commands;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.converter.jdbc.JdbcEntryData;
import org.apache.gobblin.converter.jdbc.JdbcEntryDatum;
public class GenericJdbcBufferedInserter extends BaseJdbcBufferedInserter {
private static final Logger LOG = LoggerFactory.getLogger(GenericJdbcBufferedInserter.class);
private static final String INSERT_STATEMENT_PREFIX_FORMAT =
BaseJdbcBufferedInserter.INSERT_STATEMENT_PREFIX_FORMAT + " (%s)";
private final int maxParamSize;
private int currBatchSize;
public GenericJdbcBufferedInserter(State state, Connection conn) {
super(state, conn);
this.maxParamSize = state.getPropAsInt(WRITER_JDBC_MAX_PARAM_SIZE, DEFAULT_WRITER_JDBC_MAX_PARAM_SIZE);
}
@Override
protected boolean insertBatch(PreparedStatement pstmt) throws SQLException {
GenericJdbcBufferedInserter.this.insertPstmtForFixedBatch.executeBatch();
return true;
}
@Override
public void insert(String databaseName, String table, JdbcEntryData jdbcEntryData) throws SQLException {
if (this.insertPstmtForFixedBatch == null) {
for (JdbcEntryDatum datum : jdbcEntryData) {
this.columnNames.add(datum.getColumnName());
}
initializeBatch(databaseName, table);
}
int i = 0;
for (JdbcEntryDatum datum : jdbcEntryData) {
this.insertPstmtForFixedBatch.setObject(++i, datum.getVal());
}
this.insertPstmtForFixedBatch.addBatch();
this.currBatchSize++;
if (this.currBatchSize >= this.batchSize) {
executeBatchInsert(this.insertPstmtForFixedBatch);
}
}
@Override
protected void initializeBatch(String databaseName, String table) throws SQLException {
int actualBatchSize = Math.min(this.batchSize, this.maxParamSize / this.columnNames.size());
if (this.batchSize != actualBatchSize) {
LOG.info("Changing batch size from " + this.batchSize + " to " + actualBatchSize
+ " due to # of params limitation " + this.maxParamSize + " , # of columns: " + this.columnNames.size());
}
this.batchSize = actualBatchSize;
super.initializeBatch(databaseName, table);
}
@Override
protected String createInsertStatementStr(String databaseName, String table) {
return String.format(INSERT_STATEMENT_PREFIX_FORMAT, databaseName, table,
JOINER_ON_COMMA.join(columnNames), JOINER_ON_COMMA.useForNull("?").join(new String[columnNames.size()]));
}
@Override
protected void resetBatch() {
try {
this.insertPstmtForFixedBatch.clearBatch();
this.insertPstmtForFixedBatch.clearParameters();
this.currBatchSize = 0;
}
catch (SQLException e) {
throw new RuntimeException(e);
}
}
@Override
public void flush() throws SQLException {
if (this.currBatchSize > 0) {
insertBatch(this.insertPstmtForFixedBatch);
}
}
@Override
protected String createPrepareStatementStr(int batchSize) {
return null;
}
} | 3,503 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer/commands/PostgresBufferedInserter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.commands;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.converter.jdbc.JdbcEntryData;
import org.apache.gobblin.converter.jdbc.JdbcEntryDatum;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class PostgresBufferedInserter extends BaseJdbcBufferedInserter {
public PostgresBufferedInserter(State state, Connection conn) {
super(state, conn);
}
@Override
protected String createPrepareStatementStr(int batchSize) {
final String VALUE_FORMAT = "(%s)";
StringBuilder sb = new StringBuilder(this.insertStmtPrefix);
String values =
String.format(VALUE_FORMAT, JOINER_ON_COMMA.useForNull("?").join(new String[this.columnNames.size()]));
sb.append(values);
for (int i = 1; i < batchSize; i++) {
sb.append(',').append(values);
}
return sb.append(';').toString();
}
@Override
protected boolean insertBatch(PreparedStatement pstmt)
throws SQLException {
int i = 0;
pstmt.clearParameters();
for (JdbcEntryData pendingEntry : PostgresBufferedInserter.this.pendingInserts) {
for (JdbcEntryDatum datum : pendingEntry) {
pstmt.setObject(++i, datum.getVal());
}
}
log.debug("Executing SQL " + pstmt);
return pstmt.execute();
}
}
| 3,504 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer/commands/MySqlBufferedInserter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.commands;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.converter.jdbc.JdbcEntryData;
import org.apache.gobblin.converter.jdbc.JdbcEntryDatum;
import lombok.ToString;
/**
* The implementation of JdbcBufferedInserter for MySQL.
* This purpose of buffered insert is mainly for performance reason and the implementation is based on the
* reference manual https://dev.mysql.com/doc/refman/8.0/en/
*
* This class supports two types of insertions for MySQL 1) standard insertion - only supports records with unique
* primary keys and fails on attempted insertion of a duplicate record 2) replace insertion - inserts new records as
* normal but allows for value overwrites for duplicate inserts (by primary key)
*
* Note that replacement occurs at 'record-level', so if there are duplicates in the same input then they will replace
* each other in a non-deterministic order.
*/
@ToString
public class MySqlBufferedInserter extends BaseJdbcBufferedInserter {
private static final Logger LOG = LoggerFactory.getLogger(MySqlBufferedInserter.class);
protected static final String REPLACE_STATEMENT_PREFIX_FORMAT = "REPLACE INTO %s.%s (%s) VALUES ";
private final int maxParamSize;
private final boolean overwriteRecords;
public MySqlBufferedInserter(State state, Connection conn, boolean overwriteRecords) {
super(state, conn);
this.maxParamSize = state.getPropAsInt(WRITER_JDBC_MAX_PARAM_SIZE, DEFAULT_WRITER_JDBC_MAX_PARAM_SIZE);
this.overwriteRecords = overwriteRecords;
}
@Override
protected boolean insertBatch(PreparedStatement pstmt) throws SQLException {
int i = 0;
pstmt.clearParameters();
for (JdbcEntryData pendingEntry : MySqlBufferedInserter.this.pendingInserts) {
for (JdbcEntryDatum datum : pendingEntry) {
pstmt.setObject(++i, datum.getVal());
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("Executing SQL " + pstmt);
}
return pstmt.execute();
}
@Override
protected String createPrepareStatementStr(int batchSize) {
final String VALUE_FORMAT = "(%s)";
StringBuilder sb = new StringBuilder(this.insertStmtPrefix);
String values =
String.format(VALUE_FORMAT, JOINER_ON_COMMA.useForNull("?").join(new String[this.columnNames.size()]));
sb.append(values);
for (int i = 1; i < batchSize; i++) {
sb.append(',').append(values);
}
return sb.append(';').toString();
}
@Override
protected void initializeBatch(String databaseName, String table)
throws SQLException {
int actualBatchSize = Math.min(this.batchSize, this.maxParamSize / this.columnNames.size());
if (this.batchSize != actualBatchSize) {
LOG.info("Changing batch size from " + this.batchSize + " to " + actualBatchSize
+ " due to # of params limitation " + this.maxParamSize + " , # of columns: " + this.columnNames.size());
}
this.batchSize = actualBatchSize;
super.initializeBatch(databaseName, table);
}
@Override
/**
* Use separate insertion statement if data overwrites are allowed
*/
protected String createInsertStatementStr(String databaseName, String table) {
return String.format(this.overwriteRecords ? REPLACE_STATEMENT_PREFIX_FORMAT : INSERT_STATEMENT_PREFIX_FORMAT, databaseName, table, JOINER_ON_COMMA.join(this.columnNames));
}
} | 3,505 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-parquet/src/test/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-modules/gobblin-parquet/src/test/java/org/apache/gobblin/converter/parquet/JsonIntermediateToParquetGroupConverterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.parquet;
import java.io.InputStreamReader;
import java.lang.reflect.Type;
import org.apache.parquet.example.data.Group;
import org.apache.parquet.schema.MessageType;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.gson.Gson;
import com.google.gson.JsonObject;
import com.google.gson.reflect.TypeToken;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.source.workunit.Extract;
import static org.testng.Assert.assertEquals;
@Test(groups = {"gobblin.converter"})
public class JsonIntermediateToParquetGroupConverterTest {
private static final String RESOURCE_PATH = "/converter/JsonIntermediateToParquetConverter.json";
private static JsonObject testCases;
private static WorkUnitState workUnit;
private static JsonIntermediateToParquetGroupConverter parquetConverter;
@BeforeClass
public static void setUp() {
Type listType = new TypeToken<JsonObject>() {
}.getType();
Gson gson = new Gson();
JsonObject testData = gson.fromJson(
new InputStreamReader(JsonIntermediateToParquetGroupConverter.class.getResourceAsStream(RESOURCE_PATH)), listType);
testCases = testData.getAsJsonObject();
SourceState source = new SourceState();
workUnit = new WorkUnitState(
source.createWorkUnit(source.createExtract(Extract.TableType.SNAPSHOT_ONLY, "test_namespace", "test_table")));
}
private void testCase(String testCaseName)
throws SchemaConversionException, DataConversionException {
JsonObject test = testCases.get(testCaseName).getAsJsonObject();
parquetConverter = new JsonIntermediateToParquetGroupConverter();
MessageType schema = parquetConverter.convertSchema(test.get("schema").getAsJsonArray(), workUnit);
Group record =
parquetConverter.convertRecord(schema, test.get("record").getAsJsonObject(), workUnit).iterator().next();
assertEqualsIgnoreSpaces(schema.toString(), test.get("expectedSchema").getAsString());
assertEqualsIgnoreSpaces(record.toString(), test.get("expectedRecord").getAsString());
}
@Test(expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp = "Symbol .* does not belong to set \\[.*?\\]")
public void testEnumTypeBelongsToEnumSet()
throws Exception {
JsonObject test = deepCopy(testCases.get("enum").getAsJsonObject(), JsonObject.class);
parquetConverter = new JsonIntermediateToParquetGroupConverter();
MessageType schema = parquetConverter.convertSchema(test.get("schema").getAsJsonArray(), workUnit);
JsonObject jsonRecord = test.get("record").getAsJsonObject();
jsonRecord.addProperty("some_enum", "HELL");
parquetConverter.convertRecord(schema, jsonRecord, workUnit).iterator().next();
}
@Test
public void testPrimitiveTypes()
throws Exception {
testCase("simplePrimitiveTypes");
}
@Test
public void testArrayType()
throws Exception {
testCase("array");
}
@Test
public void testEnumTypeWithNullableTrue()
throws Exception {
testCase("enum");
}
@Test
public void testEnumTypeWithNullableFalse()
throws Exception {
testCase("enum1");
}
@Test
public void testRecordType()
throws Exception {
testCase("record");
}
@Test
public void testMapType()
throws Exception {
testCase("map");
}
@Test
public void testNullValueInOptionalField()
throws Exception {
testCase("nullValueInOptionalField");
}
private void assertEqualsIgnoreSpaces(String actual, String expected) {
assertEquals(actual.replaceAll("\\n", ";").replaceAll("\\s|\\t", ""),
expected.replaceAll("\\n", ";").replaceAll("\\s|\\t", ""));
}
public <T> T deepCopy(T object, Class<T> type) {
try {
Gson gson = new Gson();
return gson.fromJson(gson.toJson(object, type), type);
} catch (Exception e) {
e.printStackTrace();
return null;
}
}
}
| 3,506 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-parquet/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-parquet/src/test/java/org/apache/gobblin/writer/TestConstants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import org.apache.parquet.example.data.Group;
import org.apache. parquet.example.data.simple.SimpleGroup;
import org.apache.parquet.schema.MessageType;
import org.apache.parquet.schema.OriginalType;
import org.apache.parquet.schema.PrimitiveType;
import org.apache.parquet.schema.Types;
import org.apache.gobblin.parquet.writer.test.TestConstantsBase;
import org.apache.gobblin.test.TestRecord;
public class TestConstants extends TestConstantsBase<Group> {
public static final MessageType PARQUET_SCHEMA = Types.buildMessage()
.addFields(
Types.required(PrimitiveType.PrimitiveTypeName.BINARY).as(OriginalType.UTF8)
.named(TestConstants.PAYLOAD_FIELD_NAME),
Types.required(PrimitiveType.PrimitiveTypeName.INT32).named(TestConstants.PARTITION_FIELD_NAME),
// Sequence field is INT32 instead of INT64, because this version of parquet only supports INT32
Types.required(PrimitiveType.PrimitiveTypeName.INT32).named(TestConstants.SEQUENCE_FIELD_NAME))
.named("Data");
@Override
public Group convertToParquetGroup(TestRecord record) {
Group group = new SimpleGroup(PARQUET_SCHEMA);
group.add(PAYLOAD_FIELD_NAME, record.getPayload());
group.add(SEQUENCE_FIELD_NAME, Long.valueOf(record.getSequence()).intValue());
group.add(PARTITION_FIELD_NAME, record.getPartition());
return group;
}
}
| 3,507 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-parquet/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-parquet/src/test/java/org/apache/gobblin/writer/ParquetHdfsDataWriterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import org.apache.parquet.avro.AvroParquetReader;
import org.apache.parquet.example.data.Group;
import org.apache.parquet.example.data.simple.convert.GroupRecordConverter;
import org.apache.parquet.hadoop.ParquetReader;
import org.apache.parquet.hadoop.api.InitContext;
import org.apache.parquet.hadoop.api.ReadSupport;
import org.apache.parquet.io.api.RecordMaterializer;
import org.apache.parquet.proto.ProtoParquetReader;
import org.apache.parquet.schema.MessageType;
import org.apache.gobblin.parquet.writer.ParquetRecordFormat;
import org.apache.gobblin.parquet.writer.test.ParquetHdfsDataWriterTestBase;
import org.apache.gobblin.test.TestRecord;
import org.apache.gobblin.test.proto.TestRecordProtos;
@Test(groups = {"gobblin.writer"})
public class ParquetHdfsDataWriterTest extends ParquetHdfsDataWriterTestBase {
public ParquetHdfsDataWriterTest() {
super(new TestConstants());
}
@BeforeMethod
public void setUp()
throws Exception {
super.setUp();
}
protected DataWriterBuilder getDataWriterBuilder() {
return new ParquetDataWriterBuilder();
}
@Override
protected List<TestRecord> readParquetRecordsFromFile(File outputFile, ParquetRecordFormat format)
throws IOException {
switch (format) {
case GROUP: {
return readParquetFilesGroup(outputFile);
}
case PROTOBUF: {
return readParquetFilesProto(outputFile);
}
case AVRO: {
return readParquetFilesAvro(outputFile);
}
default: throw new RuntimeException(format + " is not supported");
}
}
private List<TestRecord> readParquetFilesAvro(File outputFile)
throws IOException {
ParquetReader<org.apache.gobblin.test.avro.TestRecord> reader = null;
List<TestRecord> records = new ArrayList<>();
try {
reader = new AvroParquetReader<>(new Path(outputFile.toString()));
for (org.apache.gobblin.test.avro.TestRecord value = reader.read(); value != null; value = reader.read()) {
records.add(new TestRecord(value.getPartition(),
value.getSequence(),
value.getPayload()));
}
} finally {
if (reader != null) {
try {
reader.close();
} catch (Exception ex) {
System.out.println(ex.getMessage());
}
}
}
return records;
}
protected List<TestRecord> readParquetFilesProto(File outputFile)
throws IOException {
ParquetReader<TestRecordProtos.TestRecordOrBuilder> reader = null;
List<TestRecord> records = new ArrayList<>();
try {
reader = new ProtoParquetReader<>(new Path(outputFile.toString()));
TestRecordProtos.TestRecordOrBuilder value = reader.read();
while (value!= null) {
records.add(new TestRecord(value.getPartition(),
value.getSequence(),
value.getPayload()));
value = reader.read();
}
} finally {
if (reader != null) {
try {
reader.close();
} catch (Exception ex) {
System.out.println(ex.getMessage());
}
}
}
return records;
}
protected List<TestRecord> readParquetFilesGroup(File outputFile)
throws IOException {
ParquetReader<Group> reader = null;
List<Group> records = new ArrayList<>();
try {
reader = new ParquetReader<>(new Path(outputFile.toString()), new SimpleReadSupport());
for (Group value = reader.read(); value != null; value = reader.read()) {
records.add(value);
}
} finally {
if (reader != null) {
try {
reader.close();
} catch (Exception ex) {
System.out.println(ex.getMessage());
}
}
}
return records.stream().map(value -> new TestRecord(
value.getInteger(TestConstants.PARTITION_FIELD_NAME, 0),
value.getInteger(TestConstants.SEQUENCE_FIELD_NAME, 0),
value.getString(TestConstants.PAYLOAD_FIELD_NAME, 0)
)).collect(Collectors.toList());
}
@Test
public void testWrite()
throws Exception {
super.testWrite();
}
@Override
protected Object getSchema(ParquetRecordFormat format) {
switch (format) {
case GROUP: {
return TestConstants.PARQUET_SCHEMA;
}
case PROTOBUF: {
return TestRecordProtos.TestRecord.class;
}
case AVRO: {
return org.apache.gobblin.test.avro.TestRecord.getClassSchema();
}
default:
throw new RuntimeException(format.name() + " is not implemented");
}
}
@AfterClass
public void tearDown()
throws IOException {
super.tearDown();
}
class SimpleReadSupport extends ReadSupport<Group> {
@Override
public RecordMaterializer<Group> prepareForRead(Configuration conf, Map<String, String> metaData,
MessageType schema, ReadContext context) {
return new GroupRecordConverter(schema);
}
@Override
public ReadContext init(InitContext context) {
return new ReadContext(context.getFileSchema());
}
}
}
| 3,508 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-parquet/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-modules/gobblin-parquet/src/main/java/org/apache/gobblin/converter/parquet/ParquetGroup.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.parquet;
import java.util.ArrayList;
import java.util.List;
import org.apache.parquet.example.data.Group;
import org.apache.parquet.example.data.simple.BinaryValue;
import org.apache.parquet.example.data.simple.BooleanValue;
import org.apache.parquet.example.data.simple.DoubleValue;
import org.apache.parquet.example.data.simple.FloatValue;
import org.apache.parquet.example.data.simple.Int96Value;
import org.apache.parquet.example.data.simple.IntegerValue;
import org.apache.parquet.example.data.simple.LongValue;
import org.apache.parquet.example.data.simple.NanoTime;
import org.apache.parquet.example.data.simple.Primitive;
import org.apache.parquet.io.api.Binary;
import org.apache.parquet.io.api.RecordConsumer;
import org.apache.parquet.schema.GroupType;
import org.apache.parquet.schema.PrimitiveType;
import org.apache.parquet.schema.Type;
import static org.apache.parquet.schema.Type.Repetition.REPEATED;
/**
* Custom Implementation of {@link Group} to support adding {@link Object} of type {@link Primitive} or {@link Group}.
* Also provides methods to add {@link Primitive} and {@link Group} with {@link String} key if index is not known.
* @author tilakpatidar
*/
public class ParquetGroup extends Group {
private final GroupType schema;
//each item represents data of a field, which is indexed by the fieldIndex of the schema
private final List<Object>[] data;
public ParquetGroup(GroupType schema) {
this.schema = schema;
this.data = new List[schema.getFields().size()];
for (int i = 0; i < schema.getFieldCount(); ++i) {
this.data[i] = new ArrayList();
}
}
public String toString() {
return this.toString("");
}
public String toString(String indent) {
StringBuilder result = new StringBuilder();
int i = 0;
for (Type field : this.schema.getFields()) {
String name = field.getName();
List<Object> values = this.data[i];
for (Object value : values) {
result.append(indent).append(name);
if (value == null) {
result.append(": NULL\n");
} else if (value instanceof Group) {
result.append("\n").append(((ParquetGroup) value).toString(indent + " "));
} else {
result.append(": ").append(value.toString()).append("\n");
}
}
i++;
}
return result.toString();
}
@Override
public Group addGroup(int fieldIndex) {
ParquetGroup g = new ParquetGroup(this.schema.getType(fieldIndex).asGroupType());
this.data[fieldIndex].add(g);
return g;
}
public Group getGroup(int fieldIndex, int index) {
return (Group) this.getValue(fieldIndex, index);
}
private Object getValue(int fieldIndex, int index) {
List<Object> list;
try {
list = this.data[fieldIndex];
} catch (IndexOutOfBoundsException var6) {
throw new RuntimeException(
"not found " + fieldIndex + "(" + this.schema.getFieldName(fieldIndex) + ") in group:\n" + this);
}
try {
return list.get(index);
} catch (IndexOutOfBoundsException var5) {
throw new RuntimeException(
"not found " + fieldIndex + "(" + this.schema.getFieldName(fieldIndex) + ") element number " + index
+ " in group:\n" + this);
}
}
public void add(int fieldIndex, Primitive value) {
Type type = this.schema.getType(fieldIndex);
List<Object> list = this.data[fieldIndex];
if (!type.isRepetition(REPEATED) && !list.isEmpty()) {
throw new IllegalStateException(
"field " + fieldIndex + " (" + type.getName() + ") can not have more than one value: " + list);
} else {
list.add(value);
}
}
public int getFieldRepetitionCount(int fieldIndex) {
List<Object> list = this.data[fieldIndex];
return list == null ? 0 : list.size();
}
public String getValueToString(int fieldIndex, int index) {
return String.valueOf(this.getValue(fieldIndex, index));
}
public String getString(int fieldIndex, int index) {
return ((BinaryValue) this.getValue(fieldIndex, index)).getString();
}
public int getInteger(int fieldIndex, int index) {
return ((IntegerValue) this.getValue(fieldIndex, index)).getInteger();
}
@Override
public long getLong(int fieldIndex, int index) {
return ((LongValue) this.getValue(fieldIndex, index)).getLong();
}
@Override
public double getDouble(int fieldIndex, int index) {
return ((DoubleValue) this.getValue(fieldIndex, index)).getDouble();
}
@Override
public float getFloat(int fieldIndex, int index) {
return ((FloatValue) this.getValue(fieldIndex, index)).getFloat();
}
public boolean getBoolean(int fieldIndex, int index) {
return ((BooleanValue) this.getValue(fieldIndex, index)).getBoolean();
}
public Binary getBinary(int fieldIndex, int index) {
return ((BinaryValue) this.getValue(fieldIndex, index)).getBinary();
}
public Binary getInt96(int fieldIndex, int index) {
return ((Int96Value) this.getValue(fieldIndex, index)).getInt96();
}
public void add(int fieldIndex, int value) {
this.add(fieldIndex, new IntegerValue(value));
}
public void add(int fieldIndex, long value) {
this.add(fieldIndex, new LongValue(value));
}
public void add(int fieldIndex, String value) {
this.add(fieldIndex, new BinaryValue(Binary.fromString(value)));
}
public void add(int fieldIndex, NanoTime value) {
this.add(fieldIndex, value.toInt96());
}
public void add(int fieldIndex, boolean value) {
this.add(fieldIndex, new BooleanValue(value));
}
public void add(int fieldIndex, Binary value) {
switch (this.getType().getType(fieldIndex).asPrimitiveType().getPrimitiveTypeName()) {
case BINARY:
this.add(fieldIndex, new BinaryValue(value));
break;
case INT96:
this.add(fieldIndex, new Int96Value(value));
break;
default:
throw new UnsupportedOperationException(
this.getType().asPrimitiveType().getName() + " not supported for Binary");
}
}
public void add(int fieldIndex, float value) {
this.add(fieldIndex, new FloatValue(value));
}
public void add(int fieldIndex, double value) {
this.add(fieldIndex, new DoubleValue(value));
}
@Override
public void add(int i, Group group) {
this.data[i].add(group);
}
public GroupType getType() {
return this.schema;
}
public void writeValue(int field, int index, RecordConsumer recordConsumer) {
((Primitive) this.getValue(field, index)).writeValue(recordConsumer);
}
/**
* Add any object of {@link PrimitiveType} or {@link Group} type with a String key.
* @param key
* @param object
*/
public void add(String key, Object object) {
int fieldIndex = getIndex(key);
if (object.getClass() == ParquetGroup.class) {
this.addGroup(key, (Group) object);
} else {
this.add(fieldIndex, (Primitive) object);
}
}
private int getIndex(String key) {
return getType().getFieldIndex(key);
}
/**
* Add a {@link Group} given a String key.
* @param key
* @param object
*/
private void addGroup(String key, Group object) {
int fieldIndex = getIndex(key);
this.schema.getType(fieldIndex).asGroupType();
this.data[fieldIndex].add(object);
}
}
| 3,509 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-parquet/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-modules/gobblin-parquet/src/main/java/org/apache/gobblin/converter/parquet/JsonElementConversionFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.parquet;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import org.apache.parquet.example.data.Group;
import org.apache.parquet.example.data.simple.BinaryValue;
import org.apache.parquet.example.data.simple.BooleanValue;
import org.apache.parquet.example.data.simple.DoubleValue;
import org.apache.parquet.example.data.simple.FloatValue;
import org.apache.parquet.example.data.simple.IntegerValue;
import org.apache.parquet.example.data.simple.LongValue;
import org.apache.parquet.io.api.Binary;
import org.apache.parquet.schema.GroupType;
import org.apache.parquet.schema.LogicalTypeAnnotation;
import org.apache.parquet.schema.MessageType;
import org.apache.parquet.schema.PrimitiveType;
import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName;
import org.apache.parquet.schema.Type;
import org.apache.parquet.schema.Types;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import org.apache.gobblin.converter.parquet.JsonSchema.*;
import static org.apache.gobblin.converter.parquet.JsonElementConversionFactory.RecordConverter.RecordType.CHILD;
import static org.apache.gobblin.converter.parquet.JsonSchema.*;
import static org.apache.gobblin.converter.parquet.JsonSchema.InputType.STRING;
import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.BINARY;
import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.INT32;
import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.INT64;
import static org.apache.parquet.schema.Type.Repetition.OPTIONAL;
import static org.apache.parquet.schema.Type.Repetition.REPEATED;
import static org.apache.parquet.schema.Type.Repetition.REQUIRED;
/**
* <p>
* Creates a JsonElement to Parquet converter for all supported data types.
* </p>
*
* @author tilakpatidar
*
*/
public class JsonElementConversionFactory {
/**
* Use to create a converter for a single field from a parquetSchema.
*
* @param schema
* @param repeated - Is the {@link Type} repeated in the parent {@link Group}
* @return
*/
public static JsonElementConverter getConverter(JsonSchema schema, boolean repeated) {
InputType fieldType = schema.getInputType();
switch (fieldType) {
case INT:
return new IntConverter(schema, repeated);
case LONG:
return new LongConverter(schema, repeated);
case FLOAT:
return new FloatConverter(schema, repeated);
case DOUBLE:
return new DoubleConverter(schema, repeated);
case BOOLEAN:
return new BooleanConverter(schema, repeated);
case STRING:
return new StringConverter(schema, repeated);
case ARRAY:
return new ArrayConverter(schema);
case ENUM:
return new EnumConverter(schema);
case RECORD:
return new RecordConverter(schema);
case MAP:
return new MapConverter(schema);
case DATE:
case TIMESTAMP:
return new StringConverter(schema, repeated);
default:
throw new UnsupportedOperationException(fieldType + " is unsupported");
}
}
/**
* Converts a JsonElement into a supported ParquetType
* @author tilakpatidar
*
*/
public static abstract class JsonElementConverter {
protected final JsonSchema jsonSchema;
protected JsonElementConverter(JsonSchema schema) {
this.jsonSchema = schema;
}
/**
* Convert value to a parquet type and perform null check.
* @param value
* @return Parquet safe type
*/
public Object convert(JsonElement value) {
if (value.isJsonNull()) {
if (this.jsonSchema.isNullable()) {
return null;
}
throw new RuntimeException(
"Field: " + this.jsonSchema.getColumnName() + " is not nullable and contains a null value");
}
return convertField(value);
}
/**
* Returns a {@link Type} parquet schema
* @return
*/
abstract public Type schema();
/**
* Convert JsonElement to Parquet type
* @param value
* @return
*/
abstract Object convertField(JsonElement value);
}
/**
* Converts a {@link JsonSchema} to a {@link PrimitiveType}
*/
public static abstract class PrimitiveConverter extends JsonElementConverter {
protected final boolean repeated;
private PrimitiveTypeName outputType;
protected Type schema;
/**
* @param jsonSchema
* @param repeated
* @param outputType
*/
public PrimitiveConverter(JsonSchema jsonSchema, boolean repeated, PrimitiveTypeName outputType) {
super(jsonSchema);
this.repeated = repeated;
this.outputType = outputType;
this.schema = buildSchema();
}
protected Type buildSchema() {
return new PrimitiveType(this.repeated ? REPEATED : optionalOrRequired(this.jsonSchema), this.outputType,
this.jsonSchema.getColumnName());
}
@Override
public Type schema() {
return this.schema;
}
}
/**
* Converts {@link JsonSchema} having collection of elements of {@link InputType} into a {@link GroupType}.
*/
public static abstract class CollectionConverter extends JsonElementConverter {
protected InputType elementType;
protected JsonElementConverter elementConverter;
protected Type schema;
public CollectionConverter(JsonSchema collectionSchema, InputType elementType, boolean repeated) {
super(collectionSchema);
this.elementType = elementType;
this.elementConverter = getConverter(getElementSchema(), repeated);
this.schema = buildSchema();
}
@Override
public Type schema() {
return this.schema;
}
/**
* Prepare a {@link JsonSchema} for the elements in a collection.
* @return
*/
abstract JsonSchema getElementSchema();
abstract Type buildSchema();
}
public static class IntConverter extends PrimitiveConverter {
public IntConverter(JsonSchema schema, boolean repeated) {
super(schema, repeated, INT32);
}
@Override
IntegerValue convertField(JsonElement value) {
return new IntegerValue(value.getAsInt());
}
}
public static class LongConverter extends PrimitiveConverter {
public LongConverter(JsonSchema schema, boolean repeated) {
super(schema, repeated, INT64);
}
@Override
LongValue convertField(JsonElement value) {
return new LongValue(value.getAsLong());
}
}
public static class FloatConverter extends PrimitiveConverter {
public FloatConverter(JsonSchema schema, boolean repeated) {
super(schema, repeated, PrimitiveTypeName.FLOAT);
}
@Override
FloatValue convertField(JsonElement value) {
return new FloatValue(value.getAsFloat());
}
}
public static class DoubleConverter extends PrimitiveConverter {
public DoubleConverter(JsonSchema schema, boolean repeated) {
super(schema, repeated, PrimitiveTypeName.DOUBLE);
}
@Override
DoubleValue convertField(JsonElement value) {
return new DoubleValue(value.getAsDouble());
}
}
public static class BooleanConverter extends PrimitiveConverter {
public BooleanConverter(JsonSchema schema, boolean repeated) {
super(schema, repeated, PrimitiveTypeName.BOOLEAN);
}
@Override
BooleanValue convertField(JsonElement value) {
return new BooleanValue(value.getAsBoolean());
}
}
public static class StringConverter extends PrimitiveConverter {
public StringConverter(JsonSchema schema, boolean repeated) {
super(schema, repeated, BINARY);
this.schema = buildSchema();
}
@Override
BinaryValue convertField(JsonElement value) {
return new BinaryValue(Binary.fromString(value.getAsString()));
}
@Override
protected Type buildSchema() {
String columnName = this.jsonSchema.getColumnName();
if (this.repeated) {
return Types.repeated(BINARY).as(LogicalTypeAnnotation.StringLogicalTypeAnnotation.stringType()).named(columnName);
}
switch (optionalOrRequired(this.jsonSchema)) {
case OPTIONAL:
return Types.optional(BINARY).as(LogicalTypeAnnotation.StringLogicalTypeAnnotation.stringType()).named(columnName);
case REQUIRED:
return Types.required(BINARY).as(LogicalTypeAnnotation.StringLogicalTypeAnnotation.stringType()).named(columnName);
default:
throw new RuntimeException("Unsupported Repetition type");
}
}
}
public static Type.Repetition optionalOrRequired(JsonSchema jsonBaseSchema) {
return jsonBaseSchema.isNullable() ? OPTIONAL : REQUIRED;
}
public static class ArrayConverter extends CollectionConverter {
public ArrayConverter(JsonSchema arraySchema) {
super(arraySchema, arraySchema.getElementTypeUsingKey(ARRAY_ITEMS_KEY), true);
}
@Override
Object convertField(JsonElement value) {
ParquetGroup array = new ParquetGroup((GroupType) schema());
JsonElementConverter converter = this.elementConverter;
for (JsonElement elem : (JsonArray) value) {
array.add(ARRAY_KEY, converter.convert(elem));
}
return array;
}
@Override
protected Type buildSchema() {
List<Type> fields = new ArrayList<>();
fields.add(0, this.elementConverter.schema());
return new GroupType(optionalOrRequired(jsonSchema), this.jsonSchema.getColumnName(), fields);
}
@Override
JsonSchema getElementSchema() {
JsonSchema jsonSchema = JsonSchema.buildBaseSchema(this.elementType, true);
jsonSchema.setColumnName(ARRAY_KEY);
return jsonSchema;
}
}
public static class EnumConverter extends CollectionConverter {
private final HashSet<String> symbols = new HashSet<>();
public EnumConverter(JsonSchema enumSchema) {
super(enumSchema, STRING, false);
JsonArray symbolsArray = enumSchema.getSymbols();
symbolsArray.forEach(e -> symbols.add(e.getAsString()));
}
@Override
Object convertField(JsonElement value) {
if (symbols.contains(value.getAsString()) || (this.jsonSchema.isNullable() && value.isJsonNull())) {
return this.elementConverter.convert(value);
}
throw new RuntimeException("Symbol " + value.getAsString() + " does not belong to set " + symbols.toString());
}
@Override
protected Type buildSchema() {
return this.elementConverter.schema();
}
@Override
JsonSchema getElementSchema() {
JsonSchema jsonSchema = JsonSchema.buildBaseSchema(STRING, this.jsonSchema.isNullable());
jsonSchema.setColumnName(this.jsonSchema.getColumnName());
return jsonSchema;
}
}
public static class RecordConverter extends JsonElementConverter {
private final HashMap<String, JsonElementConverter> converters;
private final RecordType recordType;
private final Type schema;
public enum RecordType {
ROOT, CHILD
}
public RecordConverter(JsonSchema recordSchema) {
this(recordSchema, CHILD);
}
public RecordConverter(JsonSchema recordSchema, RecordType recordType) {
super(recordSchema);
this.converters = new HashMap<>();
this.recordType = recordType;
this.schema = buildSchema();
}
@Override
Object convertField(JsonElement value) {
ParquetGroup r1 = new ParquetGroup((GroupType) schema());
JsonObject inputRecord = value.getAsJsonObject();
for (Map.Entry<String, JsonElement> entry : inputRecord.entrySet()) {
String key = entry.getKey();
JsonElementConverter converter = this.converters.get(key);
Object convertedValue = converter.convert(entry.getValue());
boolean valueIsNull = convertedValue == null;
Type.Repetition repetition = optionalOrRequired(converter.jsonSchema);
if (valueIsNull && repetition.equals(OPTIONAL)) {
continue;
}
r1.add(key, convertedValue);
}
return r1;
}
private Type buildSchema() {
JsonArray inputSchema = this.jsonSchema.getDataTypeValues();
List<Type> parquetTypes = new ArrayList<>();
for (JsonElement element : inputSchema) {
JsonObject map = (JsonObject) element;
JsonSchema elementSchema = new JsonSchema(map);
String columnName = elementSchema.getColumnName();
JsonElementConverter converter = JsonElementConversionFactory.getConverter(elementSchema, false);
Type schemaType = converter.schema();
this.converters.put(columnName, converter);
parquetTypes.add(schemaType);
}
String docName = this.jsonSchema.getColumnName();
switch (recordType) {
case ROOT:
return new MessageType(docName, parquetTypes);
case CHILD:
return new GroupType(optionalOrRequired(this.jsonSchema), docName, parquetTypes);
default:
throw new RuntimeException("Unsupported Record type");
}
}
@Override
public Type schema() {
return this.schema;
}
}
public static class MapConverter extends CollectionConverter {
public MapConverter(JsonSchema mapSchema) {
super(mapSchema, mapSchema.getElementTypeUsingKey(MAP_ITEMS_KEY), false);
}
@Override
Object convertField(JsonElement value) {
ParquetGroup mapGroup = new ParquetGroup((GroupType) schema());
JsonElementConverter converter = this.elementConverter;
JsonObject map = (JsonObject) value;
for (Map.Entry<String, JsonElement> entry : map.entrySet()) {
ParquetGroup entrySet = (ParquetGroup) mapGroup.addGroup(MAP_KEY);
entrySet.add(MAP_KEY_COLUMN_NAME, entry.getKey());
entrySet.add(MAP_VALUE_COLUMN_NAME, converter.convert(entry.getValue()));
}
return mapGroup;
}
@Override
protected Type buildSchema() {
JsonElementConverter elementConverter = this.elementConverter;
JsonElementConverter keyConverter = getKeyConverter();
GroupType mapGroup =
Types.repeatedGroup().addFields(keyConverter.schema(), elementConverter.schema()).named(MAP_KEY)
.asGroupType();
String columnName = this.jsonSchema.getColumnName();
switch (optionalOrRequired(this.jsonSchema)) {
case OPTIONAL:
return Types.optionalGroup().addFields(mapGroup).named(columnName).asGroupType();
case REQUIRED:
return Types.requiredGroup().addFields(mapGroup).named(columnName).asGroupType();
default:
return null;
}
}
@Override
JsonSchema getElementSchema() {
JsonSchema jsonSchema = JsonSchema.buildBaseSchema(this.elementType, false);
jsonSchema.setColumnName(MAP_VALUE_COLUMN_NAME);
return jsonSchema;
}
public JsonElementConverter getKeyConverter() {
JsonSchema jsonSchema = JsonSchema.buildBaseSchema(STRING, false);
jsonSchema.setColumnName(MAP_KEY_COLUMN_NAME);
return getConverter(jsonSchema, false);
}
}
}
| 3,510 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-parquet/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-modules/gobblin-parquet/src/main/java/org/apache/gobblin/converter/parquet/JsonIntermediateToParquetGroupConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.parquet;
import org.apache.parquet.example.data.Group;
import org.apache.parquet.schema.MessageType;
import com.google.gson.JsonArray;
import com.google.gson.JsonObject;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.gobblin.converter.parquet.JsonElementConversionFactory.RecordConverter;
import static org.apache.gobblin.converter.parquet.JsonElementConversionFactory.RecordConverter.RecordType.ROOT;
/**
* A converter to Convert JsonIntermediate to Parquet
* @author tilakpatidar
*/
public class JsonIntermediateToParquetGroupConverter extends Converter<JsonArray, MessageType, JsonObject, Group> {
private RecordConverter recordConverter;
@Override
public MessageType convertSchema(JsonArray inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
String fieldName = workUnit.getExtract().getTable();
JsonSchema jsonSchema = new JsonSchema(inputSchema);
jsonSchema.setColumnName(fieldName);
recordConverter = new RecordConverter(jsonSchema, ROOT);
return (MessageType) recordConverter.schema();
}
@Override
public Iterable<Group> convertRecord(MessageType outputSchema, JsonObject inputRecord, WorkUnitState workUnit)
throws DataConversionException {
return new SingleRecordIterable<>((Group) recordConverter.convert(inputRecord));
}
}
| 3,511 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-parquet/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-parquet/src/main/java/org/apache/gobblin/writer/ParquetDataWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.hadoop.conf.Configuration;
import org.apache.parquet.avro.AvroParquetWriter;
import org.apache.parquet.column.ParquetProperties;
import org.apache.parquet.example.data.Group;
import org.apache.parquet.hadoop.ParquetWriter;
import org.apache.parquet.hadoop.api.WriteSupport;
import org.apache.parquet.hadoop.example.GroupWriteSupport;
import org.apache.parquet.hadoop.metadata.CompressionCodecName;
import org.apache.parquet.proto.ProtoParquetWriter;
import org.apache.parquet.schema.MessageType;
import com.google.protobuf.Message;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.parquet.writer.AbstractParquetDataWriterBuilder;
import org.apache.gobblin.parquet.writer.ParquetWriterConfiguration;
import org.apache.gobblin.parquet.writer.ParquetWriterShim;
@Slf4j
public class ParquetDataWriterBuilder<S,D> extends AbstractParquetDataWriterBuilder<S,D> {
/**
* Build a version-specific {@link ParquetWriter} for given {@link ParquetWriterConfiguration}
* @param writerConfiguration
* @return
* @throws IOException
*/
@Override
public ParquetWriterShim getVersionSpecificWriter(ParquetWriterConfiguration writerConfiguration)
throws IOException {
CompressionCodecName codecName = CompressionCodecName.fromConf(writerConfiguration.getCodecName());
ParquetProperties.WriterVersion writerVersion = ParquetProperties.WriterVersion
.fromString(writerConfiguration.getWriterVersion());
Configuration conf = new Configuration();
ParquetWriter versionSpecificWriter = null;
switch (writerConfiguration.getRecordFormat()) {
case GROUP: {
GroupWriteSupport.setSchema((MessageType) this.schema, conf);
WriteSupport support = new GroupWriteSupport();
versionSpecificWriter = new ParquetWriter<Group>(
writerConfiguration.getAbsoluteStagingFile(),
support,
codecName,
writerConfiguration.getBlockSize(),
writerConfiguration.getPageSize(),
writerConfiguration.getDictPageSize(),
writerConfiguration.isDictionaryEnabled(),
writerConfiguration.isValidate(),
writerVersion,
conf);
break;
}
case AVRO: {
versionSpecificWriter = new AvroParquetWriter(
writerConfiguration.getAbsoluteStagingFile(),
(Schema) this.schema,
codecName,
writerConfiguration.getBlockSize(),
writerConfiguration.getPageSize(),
writerConfiguration.isDictionaryEnabled(),
conf);
break;
}
case PROTOBUF: {
versionSpecificWriter = new ProtoParquetWriter(
writerConfiguration.getAbsoluteStagingFile(),
(Class<? extends Message>) this.schema,
codecName,
writerConfiguration.getBlockSize(),
writerConfiguration.getPageSize(),
writerConfiguration.isDictionaryEnabled(),
writerConfiguration.isValidate());
break;
}
default: throw new RuntimeException("Record format not supported");
}
ParquetWriter finalVersionSpecificWriter = versionSpecificWriter;
return new ParquetWriterShim() {
@Override
public void write(Object record)
throws IOException {
finalVersionSpecificWriter.write(record);
}
@Override
public void close()
throws IOException {
finalVersionSpecificWriter.close();
}
};
}
}
| 3,512 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin/test/JsonRecordGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.test;
import java.util.Collections;
import org.apache.gobblin.elasticsearch.typemapping.JsonTypeMapper;
import com.google.gson.Gson;
import com.google.gson.JsonElement;
/**
* A generator of {@link JsonElement} records
*/
public class JsonRecordGenerator implements RecordTypeGenerator<JsonElement> {
private final Gson gson = new Gson();
@Override
public String getName() {
return "json";
}
@Override
public String getTypeMapperClassName() {
return JsonTypeMapper.class.getCanonicalName();
}
static class TestObject<T> {
private String id;
private T key;
TestObject(String id, T payload) {
this.id = id;
this.key = payload;
}
}
@Override
public JsonElement getRecord(String id, PayloadType payloadType) {
Object testObject;
switch (payloadType) {
case STRING: {
testObject = new TestObject(id, TestUtils.generateRandomAlphaString(20));
break;
}
case LONG: {
testObject = new TestObject(id, TestUtils.generateRandomLong());
break;
}
case MAP: {
testObject = new TestObject(id, Collections.EMPTY_MAP);
break;
}
default:
throw new RuntimeException("Do not know how to handle this type of payload");
}
JsonElement jsonElement = gson.toJsonTree(testObject);
return jsonElement;
}
}
| 3,513 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin/test/PayloadType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.test;
/**
* An enumeration of Payload types
* Used to configure the record in tests
*/
public enum PayloadType {
STRING,
LONG,
MAP
}
| 3,514 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin/test/RecordTypeGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.test;
/**
* An interface to describe a generator of records
*/
public interface RecordTypeGenerator<T> {
/**
* The name of this record type
* @return
*/
String getName();
/**
* A {@link org.apache.gobblin.elasticsearch.typemapping.TypeMapper} that can work with
* records of this type
* @return
*/
String getTypeMapperClassName();
/**
* Generate a record with the provided characteristics
* @param identifier
* @param payloadType
* @return a record of the type T
*/
T getRecord(String identifier, PayloadType payloadType);
}
| 3,515 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin/test/AvroRecordGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.test;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.util.Collections;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.gobblin.elasticsearch.typemapping.AvroGenericRecordTypeMapper;
/**
* A generator of Avro records of type {@link GenericRecord}
*/
public class AvroRecordGenerator implements RecordTypeGenerator<GenericRecord> {
@Override
public String getName() {
return "avro";
}
@Override
public String getTypeMapperClassName() {
return AvroGenericRecordTypeMapper.class.getCanonicalName();
}
@Override
public GenericRecord getRecord(String id, PayloadType payloadType) {
GenericRecord record = getTestAvroRecord(id, payloadType);
return record;
}
static GenericRecord getTestAvroRecord(String identifier, PayloadType payloadType) {
Schema dataRecordSchema =
SchemaBuilder.record("Data").fields().name("data").type().bytesType().noDefault().name("flags").type().intType()
.noDefault().endRecord();
Schema schema;
Object payloadValue;
switch (payloadType) {
case STRING: {
schema = SchemaBuilder.record("TestRecord").fields()
.name("id").type().stringType().noDefault()
.name("key").type().stringType().noDefault()
.name("data").type(dataRecordSchema).noDefault()
.endRecord();
payloadValue = TestUtils.generateRandomAlphaString(20);
break;
}
case LONG: {
schema = SchemaBuilder.record("TestRecord").fields()
.name("id").type().stringType().noDefault()
.name("key").type().longType().noDefault()
.name("data").type(dataRecordSchema).noDefault()
.endRecord();
payloadValue = TestUtils.generateRandomLong();
break;
}
case MAP: {
schema = SchemaBuilder.record("TestRecord").fields()
.name("id").type().stringType().noDefault()
.name("key").type().map().values().stringType().noDefault()
.name("data").type(dataRecordSchema).noDefault()
.endRecord();
payloadValue = Collections.EMPTY_MAP;
break;
}
default: {
throw new RuntimeException("Do not know how to handle this time");
}
}
GenericData.Record testRecord = new GenericData.Record(schema);
String testContent = "hello world";
GenericData.Record dataRecord = new GenericData.Record(dataRecordSchema);
dataRecord.put("data", ByteBuffer.wrap(testContent.getBytes(Charset.forName("UTF-8"))));
dataRecord.put("flags", 0);
testRecord.put("key", payloadValue);
testRecord.put("data", dataRecord);
testRecord.put("id", identifier);
return testRecord;
}
}
| 3,516 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin/elasticsearch/ElasticsearchTestServer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.net.HttpURLConnection;
import java.net.URL;
import java.nio.file.Files;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.gobblin.test.TestUtils;
import org.testng.Assert;
import com.google.common.base.Throwables;
import javax.annotation.concurrent.NotThreadSafe;
import lombok.extern.slf4j.Slf4j;
/**
* A Test ElasticSearch server
*/
@Slf4j
@NotThreadSafe
public class ElasticsearchTestServer {
private static final String ELASTICSEARCH_VERSION="5.6.8";
private static final String TEST_ROOT_DIR="gobblin-modules/gobblin-elasticsearch/test-elasticsearch/";
// The clean elasticsearch instance is installed here
private static final String BASE_ELASTICSEARCH_INSTALL =TEST_ROOT_DIR + "elasticsearch-" + ELASTICSEARCH_VERSION;
// Per-test elasticsearch instances are installed under a different directory
private static final String TEST_INSTALL_PREFIX =TEST_ROOT_DIR + "es-test-install-";
private static final String ELASTICSEARCH_BIN="/bin/elasticsearch";
private static final String ELASTICSEARCH_CONFIG_FILE= "/config/elasticsearch.yml";
private static final String ELASTICSEARCH_JVMOPTS_FILE="/config/jvm.options";
private final String _testId;
private final int _tcpPort;
private Process elasticProcess;
private final int _httpPort;
private String _pid = ManagementFactory.getRuntimeMXBean().getName();
private final String _testInstallDirectory;
private AtomicBoolean _started = new AtomicBoolean(false);
public ElasticsearchTestServer(String testId)
throws IOException {
this(testId, TestUtils.findFreePort(), TestUtils.findFreePort());
}
private ElasticsearchTestServer(String testId, int httpPort, int tcpPort)
throws IOException {
_testId = testId;
_httpPort = httpPort;
_tcpPort = tcpPort;
_testInstallDirectory = TEST_INSTALL_PREFIX + _testId;
try {
createInstallation();
}
catch (Exception e) {
throw new IOException("Failed to create a test installation of elasticsearch", e);
}
configure();
}
public ElasticsearchTestServer()
throws IOException {
this(TestUtils.generateRandomAlphaString(25));
}
private void createInstallation()
throws IOException {
File srcDir = new File(BASE_ELASTICSEARCH_INSTALL);
if (!srcDir.exists()) {
throw new IOException("Could not find base elasticsearch instance installed at " + srcDir.getAbsolutePath() + "\n"
+ "Run ./gradlew :gobblin-modules:gobblin-elasticsearch:installTestDependencies before running this test");
}
File destDir = new File(_testInstallDirectory);
log.debug("About to recreate directory : {}", destDir.getPath());
if (destDir.exists()) {
org.apache.commons.io.FileUtils.deleteDirectory(destDir);
}
String[] commands = {"cp", "-r", srcDir.getAbsolutePath(), destDir.getAbsolutePath()};
try {
log.debug("{}: Will run command: {}", this._pid, Arrays.toString(commands));
Process copyProcess = new ProcessBuilder().inheritIO().command(commands).start();
copyProcess.waitFor();
} catch (Exception e) {
log.error("Failed to create installation directory at {}", destDir.getPath(), e);
Throwables.propagate(e);
}
}
private void configure() throws IOException {
File configFile = new File(_testInstallDirectory + ELASTICSEARCH_CONFIG_FILE);
FileOutputStream configFileStream = new FileOutputStream(configFile);
try {
configFileStream.write(("cluster.name: " + _testId + "\n").getBytes("UTF-8"));
configFileStream.write(("http.port: " + _httpPort + "\n").getBytes("UTF-8"));
configFileStream.write(("transport.tcp.port: " + _tcpPort + "\n").getBytes("UTF-8"));
}
finally {
configFileStream.close();
}
File jvmConfigFile = new File(_testInstallDirectory + ELASTICSEARCH_JVMOPTS_FILE);
try (Stream<String> lines = Files.lines(jvmConfigFile.toPath())) {
List<String> newLines = lines.map(line -> line.replaceAll("^\\s*(-Xm[s,x]).*$", "$1128m"))
.collect(Collectors.toList());
Files.write(jvmConfigFile.toPath(), newLines);
}
}
public void start(int maxStartupTimeSeconds)
{
if (_started.get()) {
log.warn("ElasticSearch server has already been attempted to be started... returning without doing anything");
return;
}
_started.set(true);
log.error("{}: Starting elasticsearch server on port {}", this._pid, this._httpPort);
String[] commands = {_testInstallDirectory + ELASTICSEARCH_BIN};
try {
log.error("{}: Will run command: {}", this._pid, Arrays.toString(commands));
elasticProcess = new ProcessBuilder().inheritIO().command(commands).start();
if (elasticProcess != null) {
// register destroy of process on shutdown in-case of unclean test termination
Runtime.getRuntime().addShutdownHook(new Thread() {
public void run() {
if (elasticProcess!=null) {
elasticProcess.destroy();
}
}
});
}
} catch (Exception e) {
log.error("Failed to start elasticsearch server", e);
Throwables.propagate(e);
}
boolean isUp = false;
int numTries = maxStartupTimeSeconds * 2;
while (!isUp && numTries-- > 0) {
try {
Thread.sleep(500); // wait 1/2 second
isUp = isUp();
} catch (Exception e) {
}
}
Assert.assertTrue(isUp, "Server is not up!");
}
public boolean isUp()
{
try {
URL url = new URL("http://localhost:" + _httpPort + "/_cluster/health?wait_for_status=green");
long startTime = System.nanoTime();
HttpURLConnection httpURLConnection = (HttpURLConnection) url.openConnection();
int responseCode = httpURLConnection.getResponseCode();
log.info("Duration: {} seconds, Response code = {}",
(System.nanoTime() - startTime) / 1000000000.0,
responseCode);
if (responseCode == 200) { return true; } else {return false;}
}
catch (Exception e) {
Throwables.propagate(e);
return false;
}
}
public int getTransportPort() {
return _tcpPort;
}
public int getHttpPort() { return _httpPort; }
public void stop() {
if (elasticProcess != null) {
try {
elasticProcess.destroy();
elasticProcess = null; // set to null to prevent redundant call to destroy on shutdown
} catch (Exception e) {
log.warn("Failed to stop the ElasticSearch server", e);
}
}
}
}
| 3,517 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin/elasticsearch/ElasticsearchTestServerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch;
import java.io.IOException;
import org.testng.annotations.AfterSuite;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
/**
* A Test to test that the {@link ElasticsearchTestServer} class does what it is supposed to do
*/
public class ElasticsearchTestServerTest {
ElasticsearchTestServer _elasticsearchTestServer;
@BeforeSuite
public void startServer()
throws IOException {
_elasticsearchTestServer = new ElasticsearchTestServer();
_elasticsearchTestServer.start(60);
}
@Test
public void testServerStart()
throws InterruptedException, IOException {
_elasticsearchTestServer.start(60); // second start should be a no-op
}
@AfterSuite
public void stopServer() {
_elasticsearchTestServer.stop();
}
}
| 3,518 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin/elasticsearch | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin/elasticsearch/writer/WriterVariant.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch.writer;
import java.io.IOException;
import org.apache.gobblin.writer.BatchAsyncDataWriter;
import com.typesafe.config.Config;
/**
* An interface to implement Writer variants to enable generic testing
*/
public interface WriterVariant {
String getName();
ConfigBuilder getConfigBuilder();
BatchAsyncDataWriter getBatchAsyncDataWriter(Config config)
throws IOException;
TestClient getTestClient(Config config)
throws IOException;
}
| 3,519 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin/elasticsearch | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin/elasticsearch/writer/RestWriterVariant.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch.writer;
import java.io.IOException;
import java.util.Collections;
import org.apache.gobblin.writer.BatchAsyncDataWriter;
import org.apache.http.HttpEntity;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestHighLevelClient;
import org.testng.Assert;
import com.typesafe.config.Config;
/**
* A variant that uses the {@link ElasticsearchRestWriter}
*/
public class RestWriterVariant implements WriterVariant {
private ElasticsearchRestWriter _restWriter;
@Override
public String getName() {
return "rest";
}
@Override
public ConfigBuilder getConfigBuilder() {
return new ConfigBuilder()
.setClientType("REST");
}
@Override
public BatchAsyncDataWriter getBatchAsyncDataWriter(Config config)
throws IOException {
_restWriter = new ElasticsearchRestWriter(config);
return _restWriter;
}
@Override
public TestClient getTestClient(Config config)
throws IOException {
final ElasticsearchRestWriter restWriter = new ElasticsearchRestWriter(config);
final RestHighLevelClient highLevelClient = restWriter.getRestHighLevelClient();
return new TestClient() {
@Override
public GetResponse get(GetRequest getRequest)
throws IOException {
return highLevelClient.get(getRequest);
}
@Override
public void recreateIndex(String indexName)
throws IOException {
RestClient restClient = restWriter.getRestLowLevelClient();
try {
restClient.performRequest("DELETE", "/" + indexName);
} catch (Exception e) {
// ok since index may not exist
}
String indexSettings = "{\"settings\" : {\"index\":{\"number_of_shards\":1,\"number_of_replicas\":1}}}";
HttpEntity entity = new StringEntity(indexSettings, ContentType.APPLICATION_JSON);
Response putResponse = restClient.performRequest("PUT", "/" + indexName, Collections.emptyMap(), entity);
Assert.assertEquals(putResponse.getStatusLine().getStatusCode(),200, "Recreate index succeeded");
}
@Override
public void close()
throws IOException {
restWriter.close();
}
};
}
}
| 3,520 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin/elasticsearch | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin/elasticsearch/writer/TestClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch.writer;
import java.io.Closeable;
import java.io.IOException;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
/**
* An interface to describe a functional Elasticsearch client to aid in verification
* of test results
*/
public interface TestClient extends Closeable {
GetResponse get(GetRequest getRequest)
throws IOException;
void recreateIndex(String indexName)
throws IOException;
}
| 3,521 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin/elasticsearch | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin/elasticsearch/writer/ConfigBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch.writer;
import java.util.Properties;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.Setter;
import lombok.experimental.Accessors;
/**
* A helper class to build Config for Elasticsearch Writers
*/
@Accessors(chain=true)
public class ConfigBuilder {
@Setter
String indexName;
@Setter
String indexType;
@Setter
int httpPort;
@Setter
int transportPort;
@Setter
boolean idMappingEnabled = true;
@Setter
String clientType = "REST";
@Setter
String typeMapperClassName;
@Setter
MalformedDocPolicy malformedDocPolicy;
Config build() {
Properties props = new Properties();
props.setProperty(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_CLIENT_TYPE, clientType);
props.setProperty(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_INDEX_NAME, indexName);
props.setProperty(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_INDEX_TYPE, indexType);
props.setProperty(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_ID_MAPPING_ENABLED,
"" + idMappingEnabled);
if (this.clientType.equalsIgnoreCase("rest")) {
props.setProperty(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_HOSTS, "localhost:" + httpPort);
} else if (this.clientType.equalsIgnoreCase("transport")) {
props.setProperty(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_HOSTS, "localhost:" + transportPort);
} else throw new RuntimeException("Client type needs to be one of rest/transport");
props.setProperty(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_TYPEMAPPER_CLASS, typeMapperClassName);
if (malformedDocPolicy != null) {
props.setProperty(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_MALFORMED_DOC_POLICY,
malformedDocPolicy.toString().toUpperCase());
}
return ConfigFactory.parseProperties(props);
}
}
| 3,522 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin/elasticsearch | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin/elasticsearch/writer/ElasticsearchTransportClientWriterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch.writer;
import java.net.UnknownHostException;
import java.util.Properties;
import org.apache.gobblin.elasticsearch.typemapping.AvroGenericRecordTypeMapper;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class ElasticsearchTransportClientWriterTest {
@Test
public void testBadSslConfiguration()
throws UnknownHostException {
Properties props = new Properties();
props.setProperty(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_INDEX_NAME, "test");
props.setProperty(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_INDEX_TYPE, "test");
props.setProperty(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_TYPEMAPPER_CLASS,
AvroGenericRecordTypeMapper.class.getCanonicalName());
props.setProperty(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_ID_MAPPING_ENABLED, "true");
props.setProperty(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_SSL_ENABLED, "true");
Config config = ConfigFactory.parseProperties(props);
try {
new ElasticsearchTransportClientWriter(config);
Assert.fail("Writer should not be constructed");
}
catch (Exception e) {
}
}
}
| 3,523 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin/elasticsearch | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin/elasticsearch/writer/TransportWriterVariant.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch.writer;
import java.io.IOException;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.index.IndexNotFoundException;
import org.testng.Assert;
import com.typesafe.config.Config;
import org.apache.gobblin.writer.BatchAsyncDataWriter;
/**
* A variant that uses the {@link ElasticsearchTransportClientWriter}
*/
public class TransportWriterVariant implements WriterVariant {
@Override
public String getName() {
return "transport";
}
@Override
public ConfigBuilder getConfigBuilder() {
return new ConfigBuilder()
.setClientType("transport");
}
@Override
public BatchAsyncDataWriter getBatchAsyncDataWriter(Config config)
throws IOException {
ElasticsearchTransportClientWriter transportClientWriter = new ElasticsearchTransportClientWriter(config);
return transportClientWriter;
}
@Override
public TestClient getTestClient(Config config)
throws IOException {
final ElasticsearchTransportClientWriter transportClientWriter = new ElasticsearchTransportClientWriter(config);
final TransportClient transportClient = transportClientWriter.getTransportClient();
return new TestClient() {
@Override
public GetResponse get(GetRequest getRequest)
throws IOException {
try {
return transportClient.get(getRequest).get();
} catch (Exception e) {
throw new IOException(e);
}
}
@Override
public void recreateIndex(String indexName)
throws IOException {
DeleteIndexRequestBuilder dirBuilder = transportClient.admin().indices().prepareDelete(indexName);
try {
DeleteIndexResponse diResponse = dirBuilder.execute().actionGet();
} catch (IndexNotFoundException ie) {
System.out.println("Index not found... that's ok");
}
CreateIndexRequestBuilder cirBuilder = transportClient.admin().indices().prepareCreate(indexName);
CreateIndexResponse ciResponse = cirBuilder.execute().actionGet();
Assert.assertTrue(ciResponse.isAcknowledged(), "Create index succeeeded");
}
@Override
public void close()
throws IOException {
transportClientWriter.close();
}
};
}
}
| 3,524 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin/elasticsearch | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin/elasticsearch/writer/ElasticsearchWriterBaseTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch.writer;
import java.net.UnknownHostException;
import java.util.Properties;
import org.apache.gobblin.elasticsearch.typemapping.AvroGenericRecordTypeMapper;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class ElasticsearchWriterBaseTest {
public static ElasticsearchWriterBase getWriterBase(Config config)
throws UnknownHostException {
return new ElasticsearchWriterBase(config) {
@Override
int getDefaultPort() {
return 0;
}
};
}
private void assertFailsToConstruct(Properties props, String testScenario) {
assertConstructionExpectation(props, testScenario, false);
}
private void assertSucceedsToConstruct(Properties props, String testScenario) {
assertConstructionExpectation(props, testScenario, true);
}
private void assertConstructionExpectation(Properties props,
String testScenario,
Boolean constructionSuccess) {
Config config = ConfigFactory.parseProperties(props);
try {
ElasticsearchWriterBase writer = getWriterBase(config);
if (!constructionSuccess) {
Assert.fail("Test Scenario: " + testScenario + ": Writer should not be constructed");
}
}
catch (Exception e) {
if (constructionSuccess) {
Assert.fail("Test Scenario: " + testScenario + ": Writer should be constructed successfully");
}
}
}
@Test
public void testMinimalRequiredConfiguration()
throws UnknownHostException {
Properties props = new Properties();
props.setProperty(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_INDEX_NAME, "test");
props.setProperty(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_INDEX_TYPE, "test");
props.setProperty(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_TYPEMAPPER_CLASS,
AvroGenericRecordTypeMapper.class.getCanonicalName());
assertSucceedsToConstruct(props, "minimal configuration");
}
@Test
public void testBadIndexNameConfiguration()
throws UnknownHostException {
Properties props = new Properties();
props.setProperty(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_INDEX_TYPE, "test");
props.setProperty(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_TYPEMAPPER_CLASS,
AvroGenericRecordTypeMapper.class.getCanonicalName());
assertFailsToConstruct(props, "index name missing");
}
@Test
public void testBadIndexNameCasingConfiguration()
throws UnknownHostException {
Properties props = new Properties();
props.setProperty(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_INDEX_NAME, "Test");
props.setProperty(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_TYPEMAPPER_CLASS,
AvroGenericRecordTypeMapper.class.getCanonicalName());
assertFailsToConstruct(props, "bad index name casing");
}
@Test
public void testBadIndexTypeConfiguration()
throws UnknownHostException {
Properties props = new Properties();
props.setProperty(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_INDEX_NAME, "test");
props.setProperty(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_TYPEMAPPER_CLASS,
AvroGenericRecordTypeMapper.class.getCanonicalName());
assertFailsToConstruct(props, "no index type provided");
}
}
| 3,525 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin/elasticsearch | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/test/java/org/apache/gobblin/elasticsearch/writer/ElasticsearchWriterIntegrationTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch.writer;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.util.List;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.testng.Assert;
import org.testng.annotations.AfterSuite;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableList;
import com.typesafe.config.Config;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.elasticsearch.ElasticsearchTestServer;
import org.apache.gobblin.test.AvroRecordGenerator;
import org.apache.gobblin.test.JsonRecordGenerator;
import org.apache.gobblin.test.PayloadType;
import org.apache.gobblin.test.RecordTypeGenerator;
import org.apache.gobblin.test.TestUtils;
import org.apache.gobblin.writer.AsyncWriterManager;
import org.apache.gobblin.writer.BatchAsyncDataWriter;
import org.apache.gobblin.writer.BufferedAsyncDataWriter;
import org.apache.gobblin.writer.DataWriter;
import org.apache.gobblin.writer.SequentialBasedBatchAccumulator;
@Slf4j
public class ElasticsearchWriterIntegrationTest {
private ElasticsearchTestServer _esTestServer;
private String pid = ManagementFactory.getRuntimeMXBean().getName();
private List<WriterVariant> variants;
private List<RecordTypeGenerator> recordGenerators;
ElasticsearchWriterIntegrationTest() {
variants = ImmutableList.of(new RestWriterVariant(),
new TransportWriterVariant());
recordGenerators = ImmutableList.of(new AvroRecordGenerator(), new JsonRecordGenerator());
}
@BeforeSuite(alwaysRun=true)
public void startServers()
throws IOException {
log.error("{}: Starting Elasticsearch Server", pid);
_esTestServer = new ElasticsearchTestServer();
_esTestServer.start(60);
}
@AfterSuite(alwaysRun=true)
public void stopServers() {
log.error("{}: Stopping Elasticsearch Server", pid);
if (_esTestServer != null ) {
_esTestServer.stop();
}
}
@Test
public void testSingleRecordWrite()
throws IOException {
for (WriterVariant writerVariant : variants) {
for (RecordTypeGenerator recordVariant : recordGenerators) {
String indexName = "posts" + writerVariant.getName().toLowerCase();
String indexType = recordVariant.getName();
Config config = writerVariant.getConfigBuilder()
.setIndexName(indexName)
.setIndexType(indexType)
.setTypeMapperClassName(recordVariant.getTypeMapperClassName())
.setHttpPort(_esTestServer.getHttpPort())
.setTransportPort(_esTestServer.getTransportPort())
.build();
TestClient testClient = writerVariant.getTestClient(config);
SequentialBasedBatchAccumulator<Object> batchAccumulator = new SequentialBasedBatchAccumulator<>(config);
BufferedAsyncDataWriter bufferedAsyncDataWriter = new BufferedAsyncDataWriter(batchAccumulator, writerVariant.getBatchAsyncDataWriter(config));
String id = TestUtils.generateRandomAlphaString(10);
Object testRecord = recordVariant.getRecord(id, PayloadType.STRING);
DataWriter writer = AsyncWriterManager.builder().failureAllowanceRatio(0.0).retriesEnabled(false).config(config)
.asyncDataWriter(bufferedAsyncDataWriter).build();
try {
testClient.recreateIndex(indexName);
writer.write(testRecord);
writer.commit();
} finally {
writer.close();
}
try {
GetResponse response = testClient.get(new GetRequest(indexName, indexType, id));
Assert.assertEquals(response.getId(), id, "Response id matches request");
Assert.assertEquals(response.isExists(), true, "Document not found");
} catch (Exception e) {
Assert.fail("Failed to get a response", e);
} finally {
testClient.close();
}
}
}
}
@Test
public void testMalformedDocCombinations()
throws IOException {
for (WriterVariant writerVariant : variants) {
for (RecordTypeGenerator recordVariant : recordGenerators) {
for (MalformedDocPolicy policy : MalformedDocPolicy.values()) {
testMalformedDocs(writerVariant, recordVariant, policy);
}
}
}
}
/**
* Sends two docs in a single batch with different field types
* Triggers Elasticsearch server to send back an exception due to malformed docs
* @throws IOException
*/
public void testMalformedDocs(WriterVariant writerVariant, RecordTypeGenerator recordVariant, MalformedDocPolicy malformedDocPolicy)
throws IOException {
String indexName = writerVariant.getName().toLowerCase();
String indexType = (recordVariant.getName()+malformedDocPolicy.name()).toLowerCase();
Config config = writerVariant.getConfigBuilder()
.setIdMappingEnabled(true)
.setIndexName(indexName)
.setIndexType(indexType)
.setHttpPort(_esTestServer.getHttpPort())
.setTransportPort(_esTestServer.getTransportPort())
.setTypeMapperClassName(recordVariant.getTypeMapperClassName())
.setMalformedDocPolicy(malformedDocPolicy)
.build();
TestClient testClient = writerVariant.getTestClient(config);
testClient.recreateIndex(indexName);
String id1=TestUtils.generateRandomAlphaString(10);
String id2=TestUtils.generateRandomAlphaString(10);
Object testRecord1 = recordVariant.getRecord(id1, PayloadType.LONG);
Object testRecord2 = recordVariant.getRecord(id2, PayloadType.MAP);
SequentialBasedBatchAccumulator<Object> batchAccumulator = new SequentialBasedBatchAccumulator<>(config);
BatchAsyncDataWriter elasticsearchWriter = writerVariant.getBatchAsyncDataWriter(config);
BufferedAsyncDataWriter bufferedAsyncDataWriter = new BufferedAsyncDataWriter(batchAccumulator, elasticsearchWriter);
DataWriter writer = AsyncWriterManager.builder()
.failureAllowanceRatio(0.0)
.retriesEnabled(false)
.config(config)
.asyncDataWriter(bufferedAsyncDataWriter)
.build();
try {
writer.write(testRecord1);
writer.write(testRecord2);
writer.commit();
writer.close();
if (malformedDocPolicy == MalformedDocPolicy.FAIL) {
Assert.fail("Should have thrown an exception if malformed doc policy was set to Fail");
}
}
catch (Exception e) {
switch (malformedDocPolicy) {
case IGNORE:case WARN:{
Assert.fail("Should not have failed if malformed doc policy was set to ignore or warn", e);
break;
}
case FAIL: {
// pass through
break;
}
default: {
throw new RuntimeException("This test does not handle this policyType : " + malformedDocPolicy.toString());
}
}
}
// Irrespective of policy, first doc should be inserted and second doc should fail
int docsIndexed = 0;
try {
{
GetResponse response = testClient.get(new GetRequest(indexName, indexType, id1));
Assert.assertEquals(response.getId(), id1, "Response id matches request");
System.out.println(malformedDocPolicy + ":" + response.toString());
if (response.isExists()) {
docsIndexed++;
}
}
{
GetResponse response = testClient.get(new GetRequest(indexName, indexType, id2));
Assert.assertEquals(response.getId(), id2, "Response id matches request");
System.out.println(malformedDocPolicy + ":" + response.toString());
if (response.isExists()) {
docsIndexed++;
}
}
// only one doc should be found
Assert.assertEquals(docsIndexed, 1, "Only one document should be indexed");
}
catch (Exception e) {
Assert.fail("Failed to get a response", e);
}
finally {
testClient.close();
}
}
}
| 3,526 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch/writer/ElasticsearchRestWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch.writer;
import java.io.FileInputStream;
import java.io.IOException;
import java.nio.file.Paths;
import java.security.KeyStore;
import java.util.List;
import java.util.concurrent.Future;
import org.apache.commons.math3.util.Pair;
import org.apache.gobblin.password.PasswordManager;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.writer.Batch;
import org.apache.gobblin.writer.BatchAsyncDataWriter;
import org.apache.gobblin.writer.WriteCallback;
import org.apache.gobblin.writer.WriteResponse;
import org.apache.http.HttpHost;
import org.apache.http.conn.ssl.NoopHostnameVerifier;
import org.apache.http.impl.nio.reactor.IOReactorConfig;
import org.apache.http.ssl.SSLContextBuilder;
import org.apache.http.ssl.SSLContexts;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import com.google.common.annotations.VisibleForTesting;
import com.typesafe.config.Config;
import javax.annotation.Nullable;
import javax.net.ssl.SSLContext;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class ElasticsearchRestWriter extends ElasticsearchWriterBase implements BatchAsyncDataWriter<Object> {
private final RestHighLevelClient client;
private final RestClient lowLevelClient;
ElasticsearchRestWriter(Config config)
throws IOException {
super(config);
int threadCount = ConfigUtils.getInt(config, ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_CLIENT_THREADPOOL_SIZE,
ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_CLIENT_THREADPOOL_DEFAULT);
try {
PasswordManager passwordManager = PasswordManager.getInstance();
Boolean sslEnabled = ConfigUtils.getBoolean(config,
ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_SSL_ENABLED,
ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_SSL_ENABLED_DEFAULT);
if (sslEnabled) {
// keystore
String keyStoreType = ConfigUtils
.getString(config, ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_SSL_KEYSTORE_TYPE,
ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_SSL_KEYSTORE_TYPE_DEFAULT);
String keyStoreFilePassword = passwordManager.readPassword(ConfigUtils
.getString(config, ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_SSL_KEYSTORE_PASSWORD, ""));
String identityFilepath = ConfigUtils
.getString(config, ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_SSL_KEYSTORE_LOCATION, "");
// truststore
String trustStoreType = ConfigUtils
.getString(config, ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_SSL_TRUSTSTORE_TYPE,
ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_SSL_TRUSTSTORE_TYPE_DEFAULT);
String trustStoreFilePassword = passwordManager.readPassword(ConfigUtils
.getString(config, ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_SSL_TRUSTSTORE_PASSWORD, ""));
String cacertsFilepath = ConfigUtils
.getString(config, ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_SSL_TRUSTSTORE_LOCATION, "");
String truststoreAbsolutePath = Paths.get(cacertsFilepath).toAbsolutePath().normalize().toString();
log.info("Truststore absolutePath is:" + truststoreAbsolutePath);
this.lowLevelClient =
buildRestClient(this.hostAddresses, threadCount, true, keyStoreType, keyStoreFilePassword, identityFilepath,
trustStoreType, trustStoreFilePassword, cacertsFilepath);
}
else {
this.lowLevelClient = buildRestClient(this.hostAddresses, threadCount);
}
client = new RestHighLevelClient(this.lowLevelClient);
log.info("Elasticsearch Rest Writer configured successfully with: indexName={}, "
+ "indexType={}, idMappingEnabled={}, typeMapperClassName={}, ssl={}",
this.indexName, this.indexType, this.idMappingEnabled, this.typeMapper.getClass().getCanonicalName(),
sslEnabled);
} catch (Exception e) {
throw new IOException("Failed to instantiate rest elasticsearch client", e);
}
}
@Override
int getDefaultPort() {
return ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_REST_WRITER_DEFAULT_PORT;
}
private static RestClient buildRestClient(List<InetSocketTransportAddress> hosts, int threadCount)
throws Exception {
return buildRestClient(hosts, threadCount, false, null, null, null, null, null, null);
}
//TODO: Support pass through of configuration (e.g. timeouts etc) of rest client from above
private static RestClient buildRestClient(List<InetSocketTransportAddress> hosts, int threadCount, boolean sslEnabled,
String keyStoreType, String keyStoreFilePassword, String identityFilepath, String trustStoreType,
String trustStoreFilePassword, String cacertsFilepath) throws Exception {
HttpHost[] httpHosts = new HttpHost[hosts.size()];
String scheme = sslEnabled?"https":"http";
for (int h = 0; h < httpHosts.length; h++) {
InetSocketTransportAddress host = hosts.get(h);
httpHosts[h] = new HttpHost(host.getAddress(), host.getPort(), scheme);
}
RestClientBuilder builder = RestClient.builder(httpHosts);
if (sslEnabled) {
log.info("ssl configuration: trustStoreType = {}, cacertsFilePath = {}", trustStoreType, cacertsFilepath);
KeyStore truststore = KeyStore.getInstance(trustStoreType);
FileInputStream trustInputStream = new FileInputStream(cacertsFilepath);
try {
truststore.load(trustInputStream, trustStoreFilePassword.toCharArray());
}
finally {
trustInputStream.close();
}
SSLContextBuilder sslBuilder = SSLContexts.custom().loadTrustMaterial(truststore, null);
log.info("ssl key configuration: keyStoreType = {}, keyFilePath = {}", keyStoreType, identityFilepath);
KeyStore keystore = KeyStore.getInstance(keyStoreType);
FileInputStream keyInputStream = new FileInputStream(identityFilepath);
try {
keystore.load(keyInputStream, keyStoreFilePassword.toCharArray());
}
finally {
keyInputStream.close();
}
sslBuilder.loadKeyMaterial(keystore, keyStoreFilePassword.toCharArray());
final SSLContext sslContext = sslBuilder.build();
builder = builder.setHttpClientConfigCallback(httpAsyncClientBuilder -> httpAsyncClientBuilder
// Set ssl context
.setSSLContext(sslContext).setSSLHostnameVerifier(new NoopHostnameVerifier())
// Configure number of threads for clients
.setDefaultIOReactorConfig(IOReactorConfig.custom().setIoThreadCount(threadCount).build()));
} else {
builder = builder.setHttpClientConfigCallback(httpAsyncClientBuilder -> httpAsyncClientBuilder
// Configure number of threads for clients
.setDefaultIOReactorConfig(IOReactorConfig.custom().setIoThreadCount(threadCount).build()));
}
// Configure timeouts
builder.setRequestConfigCallback(requestConfigBuilder -> requestConfigBuilder
.setConnectionRequestTimeout(0)); // Important, otherwise the client has spurious timeouts
return builder.build();
}
@Override
public Future<WriteResponse> write(final Batch<Object> batch, @Nullable WriteCallback callback) {
Pair<BulkRequest, FutureCallbackHolder> preparedBatch = this.prepareBatch(batch, callback);
try {
client.bulkAsync(preparedBatch.getFirst(), preparedBatch.getSecond().getActionListener());
return preparedBatch.getSecond().getFuture();
}
catch (Exception e) {
throw new RuntimeException("Caught unexpected exception while calling bulkAsync API", e);
}
}
@Override
public void flush() throws IOException {
}
@Override
public void close() throws IOException {
super.close();
this.lowLevelClient.close();
}
@VisibleForTesting
public RestHighLevelClient getRestHighLevelClient() {
return this.client;
}
@VisibleForTesting
public RestClient getRestLowLevelClient() {
return this.lowLevelClient;
}
}
| 3,527 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch/writer/FutureCallbackHolder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch.writer;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.math3.util.Pair;
import org.apache.gobblin.writer.GenericWriteResponse;
import org.apache.gobblin.writer.WriteCallback;
import org.apache.gobblin.writer.WriteResponse;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkResponse;
import javax.annotation.Nullable;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
/**
* A class to hold Futures and Callbacks to support Async writes
*/
@Slf4j
public class FutureCallbackHolder {
@Getter
private final ActionListener<BulkResponse> actionListener;
private final BlockingQueue<Pair<WriteResponse, Throwable>> writeResponseQueue = new ArrayBlockingQueue<>(1);
@Getter
private final Future<WriteResponse> future;
private final AtomicBoolean done = new AtomicBoolean(false);
public FutureCallbackHolder(final @Nullable WriteCallback callback,
ExceptionLogger exceptionLogger,
final MalformedDocPolicy malformedDocPolicy) {
this.future = new Future<WriteResponse>() {
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return false;
}
@Override
public boolean isCancelled() {
return false;
}
@Override
public boolean isDone() {
return done.get();
}
@Override
public WriteResponse get()
throws InterruptedException, ExecutionException {
Pair<WriteResponse, Throwable> writeResponseThrowablePair = writeResponseQueue.take();
return getWriteResponseorThrow(writeResponseThrowablePair);
}
@Override
public WriteResponse get(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
Pair<WriteResponse, Throwable> writeResponseThrowablePair = writeResponseQueue.poll(timeout, unit);
if (writeResponseThrowablePair == null) {
throw new TimeoutException("Timeout exceeded while waiting for future to be done");
} else {
return getWriteResponseorThrow(writeResponseThrowablePair);
}
}
};
this.actionListener = new ActionListener<BulkResponse>() {
@Override
public void onResponse(BulkResponse bulkItemResponses) {
if (bulkItemResponses.hasFailures()) {
boolean logicalErrors = false;
boolean serverErrors = false;
for (BulkItemResponse bulkItemResponse: bulkItemResponses) {
if (bulkItemResponse.isFailed()) {
// check if the failure is permanent (logical) or transient (server)
if (isLogicalError(bulkItemResponse)) {
// check error policy
switch (malformedDocPolicy) {
case IGNORE: {
log.debug("Document id {} was malformed with error {}",
bulkItemResponse.getId(),
bulkItemResponse.getFailureMessage());
break;
}
case WARN: {
log.warn("Document id {} was malformed with error {}",
bulkItemResponse.getId(),
bulkItemResponse.getFailureMessage());
break;
}
default: {
// Pass through
}
}
logicalErrors = true;
} else {
serverErrors = true;
}
}
}
if (serverErrors) {
onFailure(new RuntimeException("Partial failures in the batch: " + bulkItemResponses.buildFailureMessage()));
} else if (logicalErrors) {
// all errors found were logical, throw RuntimeException if policy says to Fail
switch (malformedDocPolicy) {
case FAIL: {
onFailure(new RuntimeException("Partial non-recoverable failures in the batch. To ignore these, set "
+ ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_MALFORMED_DOC_POLICY + " to "
+ MalformedDocPolicy.IGNORE.name()));
break;
}
default: {
WriteResponse writeResponse = new GenericWriteResponse<BulkResponse>(bulkItemResponses);
writeResponseQueue.add(new Pair<WriteResponse, Throwable>(writeResponse, null));
if (callback != null) {
callback.onSuccess(writeResponse);
}
}
}
}
} else {
WriteResponse writeResponse = new GenericWriteResponse<BulkResponse>(bulkItemResponses);
writeResponseQueue.add(new Pair<WriteResponse, Throwable>(writeResponse, null));
if (callback != null) {
callback.onSuccess(writeResponse);
}
}
}
private boolean isLogicalError(BulkItemResponse bulkItemResponse) {
String failureMessage = bulkItemResponse.getFailureMessage();
return failureMessage.contains("IllegalArgumentException")
|| failureMessage.contains("illegal_argument_exception")
|| failureMessage.contains("MapperParsingException")
|| failureMessage.contains("mapper_parsing_exception");
}
@Override
public void onFailure(Exception exception) {
writeResponseQueue.add(new Pair<WriteResponse, Throwable>(null, exception));
if (exceptionLogger != null) {
exceptionLogger.log(exception);
}
if (callback != null) {
callback.onFailure(exception);
}
}
};
}
private WriteResponse getWriteResponseorThrow(Pair<WriteResponse, Throwable> writeResponseThrowablePair)
throws ExecutionException {
try {
if (writeResponseThrowablePair.getFirst() != null) {
return writeResponseThrowablePair.getFirst();
} else if (writeResponseThrowablePair.getSecond() != null) {
throw new ExecutionException(writeResponseThrowablePair.getSecond());
} else {
throw new ExecutionException(new RuntimeException("Could not find non-null WriteResponse pair"));
}
} finally {
done.set(true);
}
}
}
| 3,528 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch/writer/ElasticsearchWriterConfigurationKeys.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch.writer;
import org.apache.gobblin.elasticsearch.typemapping.JsonTypeMapper;
public class ElasticsearchWriterConfigurationKeys {
private static final String ELASTICSEARCH_WRITER_PREFIX = "writer.elasticsearch";
private static String prefix(String value) { return ELASTICSEARCH_WRITER_PREFIX + "." + value;};
public static final String ELASTICSEARCH_WRITER_SETTINGS = prefix("settings");
public static final String ELASTICSEARCH_WRITER_HOSTS = prefix("hosts");
public static final String ELASTICSEARCH_WRITER_INDEX_NAME = prefix("index.name");
public static final String ELASTICSEARCH_WRITER_INDEX_TYPE = prefix("index.type");
public static final String ELASTICSEARCH_WRITER_TYPEMAPPER_CLASS = prefix("typeMapperClass");
public static final String ELASTICSEARCH_WRITER_TYPEMAPPER_CLASS_DEFAULT = JsonTypeMapper.class.getCanonicalName();
public static final String ELASTICSEARCH_WRITER_ID_MAPPING_ENABLED = prefix("useIdFromData");
public static final Boolean ELASTICSEARCH_WRITER_ID_MAPPING_DEFAULT = false;
public static final String ELASTICSEARCH_WRITER_ID_FIELD = prefix("idFieldName");
public static final String ELASTICSEARCH_WRITER_ID_FIELD_DEFAULT = "id";
public static final String ELASTICSEARCH_WRITER_CLIENT_TYPE = prefix("client.type");
public static final String ELASTICSEARCH_WRITER_CLIENT_TYPE_DEFAULT = "REST";
public static final String ELASTICSEARCH_WRITER_CLIENT_THREADPOOL_SIZE = prefix("client.threadPoolSize");
public static final int ELASTICSEARCH_WRITER_CLIENT_THREADPOOL_DEFAULT = 5;
public static final String ELASTICSEARCH_WRITER_SSL_ENABLED=prefix("ssl.enabled");
public static final boolean ELASTICSEARCH_WRITER_SSL_ENABLED_DEFAULT=false;
public static final String ELASTICSEARCH_WRITER_SSL_KEYSTORE_TYPE=prefix("ssl.keystoreType");
public static final String ELASTICSEARCH_WRITER_SSL_KEYSTORE_TYPE_DEFAULT = "pkcs12";
public static final String ELASTICSEARCH_WRITER_SSL_KEYSTORE_PASSWORD=prefix("ssl.keystorePassword");
public static final String ELASTICSEARCH_WRITER_SSL_KEYSTORE_LOCATION=prefix("ssl.keystoreLocation");
public static final String ELASTICSEARCH_WRITER_SSL_TRUSTSTORE_TYPE=prefix("ssl.truststoreType");
public static final String ELASTICSEARCH_WRITER_SSL_TRUSTSTORE_TYPE_DEFAULT = "jks";
public static final String ELASTICSEARCH_WRITER_SSL_TRUSTSTORE_LOCATION=prefix("ssl.truststoreLocation");
public static final String ELASTICSEARCH_WRITER_SSL_TRUSTSTORE_PASSWORD=prefix("ssl.truststorePassword");
public static final String ELASTICSEARCH_WRITER_MALFORMED_DOC_POLICY = prefix("malformedDocPolicy");
public static final String ELASTICSEARCH_WRITER_MALFORMED_DOC_POLICY_DEFAULT = "FAIL";
//Async Writer Configuration
public static final String RETRIES_ENABLED = prefix("retriesEnabled");
public static final boolean RETRIES_ENABLED_DEFAULT = true;
public static final String MAX_RETRIES = prefix("maxRetries");
public static final int MAX_RETRIES_DEFAULT = 5;
static final String FAILURE_ALLOWANCE_PCT_CONFIG = prefix("failureAllowancePercentage");
static final double FAILURE_ALLOWANCE_PCT_DEFAULT = 0.0;
public enum ClientType {
TRANSPORT,
REST
}
public static final String ELASTICSEARCH_WRITER_DEFAULT_HOST = "localhost";
public static final int ELASTICSEARCH_TRANSPORT_WRITER_DEFAULT_PORT = 9300;
public static final int ELASTICSEARCH_REST_WRITER_DEFAULT_PORT = 9200;
}
| 3,529 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch/writer/ElasticsearchTransportClientWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch.writer;
import java.io.IOException;
import java.net.UnknownHostException;
import java.util.concurrent.Future;
import org.apache.commons.math3.util.Pair;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.transport.client.PreBuiltTransportClient;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.typesafe.config.Config;
import javax.annotation.Nullable;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.writer.Batch;
import org.apache.gobblin.writer.BatchAsyncDataWriter;
import org.apache.gobblin.writer.WriteCallback;
import org.apache.gobblin.writer.WriteResponse;
@Slf4j
class ElasticsearchTransportClientWriter extends ElasticsearchWriterBase implements BatchAsyncDataWriter<Object> {
private final TransportClient client;
ElasticsearchTransportClientWriter(Config config) throws UnknownHostException {
super(config);
// Check if ssl is being configured, throw error that transport client does not support ssl
Preconditions.checkArgument(!ConfigUtils.getBoolean(config,
ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_SSL_ENABLED, false),
"Transport client does not support ssl, try the Rest client instead");
this.client = createTransportClient(config);
log.info("ElasticsearchWriter configured successfully with: indexName={}, indexType={}, idMappingEnabled={}, typeMapperClassName={}",
this.indexName, this.indexType, this.idMappingEnabled, this.typeMapper);
}
@Override
int getDefaultPort() {
return ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_TRANSPORT_WRITER_DEFAULT_PORT;
}
@Override
public Future<WriteResponse> write(Batch<Object> batch, @Nullable WriteCallback callback) {
Pair<BulkRequest, FutureCallbackHolder> preparedBatch = this.prepareBatch(batch, callback);
client.bulk(preparedBatch.getFirst(), preparedBatch.getSecond().getActionListener());
return preparedBatch.getSecond().getFuture();
}
@Override
public void flush() throws IOException {
// Elasticsearch client doesn't support a flush method
}
@Override
public void close() throws IOException {
log.info("Got a close call in ElasticSearchTransportWriter");
super.close();
this.client.close();
}
@VisibleForTesting
TransportClient getTransportClient() {
return this.client;
}
private TransportClient createTransportClient(Config config) throws UnknownHostException {
TransportClient transportClient;
// Set TransportClient settings
Settings.Builder settingsBuilder = Settings.builder();
if (config.hasPath(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_SETTINGS)) {
settingsBuilder.put(ConfigUtils.configToProperties(config,
ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_SETTINGS));
}
settingsBuilder.put("client.transport.ignore_cluster_name",true);
settingsBuilder.put("client.transport.sniff", true);
transportClient = new PreBuiltTransportClient(settingsBuilder.build());
this.hostAddresses.forEach(transportClient::addTransportAddress);
return transportClient;
}
}
| 3,530 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch/writer/MalformedDocPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch.writer;
/**
* A class to represent different policies for handling malformed documents
*/
public enum MalformedDocPolicy {
IGNORE, // Ignore on failure
WARN, // Log warning on failure
FAIL // Fail on failure
}
| 3,531 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch/writer/ElasticsearchWriterBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch.writer;
import java.io.Closeable;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import org.apache.commons.math3.util.Pair;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.common.xcontent.XContentType;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.base.Throwables;
import com.typesafe.config.Config;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.elasticsearch.typemapping.JsonSerializer;
import org.apache.gobblin.elasticsearch.typemapping.TypeMapper;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.writer.Batch;
import org.apache.gobblin.writer.WriteCallback;
/**
* A base class for different types of Elasticsearch writers
*/
@Slf4j
public abstract class ElasticsearchWriterBase implements Closeable {
protected final String indexName;
protected final String indexType;
protected final TypeMapper typeMapper;
protected final JsonSerializer serializer;
protected final boolean idMappingEnabled;
protected final String idFieldName;
List<InetSocketTransportAddress> hostAddresses;
protected final MalformedDocPolicy malformedDocPolicy;
ElasticsearchWriterBase(Config config)
throws UnknownHostException {
this.indexName = config.getString(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_INDEX_NAME);
Preconditions.checkNotNull(this.indexName, "Index Name not provided. Please set "
+ ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_INDEX_NAME);
Preconditions.checkArgument(this.indexName.equals(this.indexName.toLowerCase()),
"Index name must be lowercase, you provided " + this.indexName);
this.indexType = config.getString(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_INDEX_TYPE);
Preconditions.checkNotNull(this.indexName, "Index Type not provided. Please set "
+ ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_INDEX_TYPE);
this.idMappingEnabled = ConfigUtils.getBoolean(config,
ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_ID_MAPPING_ENABLED,
ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_ID_MAPPING_DEFAULT);
this.idFieldName = ConfigUtils.getString(config, ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_ID_FIELD,
ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_ID_FIELD_DEFAULT);
String typeMapperClassName = ConfigUtils.getString(config,
ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_TYPEMAPPER_CLASS,
ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_TYPEMAPPER_CLASS_DEFAULT);
if (typeMapperClassName.isEmpty()) {
throw new IllegalArgumentException(this.getClass().getCanonicalName() + " needs to be configured with "
+ ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_TYPEMAPPER_CLASS + " to enable type mapping");
}
try {
Class<?> typeMapperClass = (Class<?>) Class.forName(typeMapperClassName);
this.typeMapper = (TypeMapper) ConstructorUtils.invokeConstructor(typeMapperClass);
this.typeMapper.configure(config);
this.serializer = this.typeMapper.getSerializer();
} catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException | InstantiationException | InvocationTargetException e) {
log.error("Failed to instantiate type-mapper from class " + typeMapperClassName, e);
throw Throwables.propagate(e);
}
this.malformedDocPolicy = MalformedDocPolicy.valueOf(ConfigUtils.getString(config,
ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_MALFORMED_DOC_POLICY,
ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_MALFORMED_DOC_POLICY_DEFAULT));
// If list is empty, connect to the default host and port
if (!config.hasPath(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_HOSTS)) {
InetSocketTransportAddress hostAddress = new InetSocketTransportAddress(
InetAddress.getByName(ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_DEFAULT_HOST),
getDefaultPort());
this.hostAddresses = new ArrayList<>(1);
this.hostAddresses.add(hostAddress);
log.info("Adding host {} to Elasticsearch writer", hostAddress);
} else {
// Get list of hosts
List<String> hosts = ConfigUtils.getStringList(config, ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_HOSTS);
// Add host addresses
Splitter hostSplitter = Splitter.on(":").trimResults();
this.hostAddresses = new ArrayList<>(hosts.size());
for (String host : hosts) {
List<String> hostSplit = hostSplitter.splitToList(host);
Preconditions.checkArgument(hostSplit.size() == 1 || hostSplit.size() == 2,
"Malformed host name for Elasticsearch writer: " + host + " host names must be of form [host] or [host]:[port]");
InetAddress hostInetAddress = InetAddress.getByName(hostSplit.get(0));
InetSocketTransportAddress hostAddress = null;
if (hostSplit.size() == 1) {
hostAddress = new InetSocketTransportAddress(hostInetAddress, this.getDefaultPort());
} else if (hostSplit.size() == 2) {
hostAddress = new InetSocketTransportAddress(hostInetAddress, Integer.parseInt(hostSplit.get(1)));
}
this.hostAddresses.add(hostAddress);
log.info("Adding host {} to Elasticsearch writer", hostAddress);
}
}
}
abstract int getDefaultPort();
protected Pair<BulkRequest, FutureCallbackHolder> prepareBatch(Batch<Object> batch, WriteCallback callback) {
BulkRequest bulkRequest = new BulkRequest();
final StringBuilder stringBuilder = new StringBuilder();
for (Object record : batch.getRecords()) {
try {
byte[] serializedBytes = this.serializer.serializeToJson(record);
log.debug("serialized record: {}", serializedBytes);
IndexRequest indexRequest = new IndexRequest(this.indexName, this.indexType)
.source(serializedBytes, 0, serializedBytes.length, XContentType.JSON);
if (this.idMappingEnabled) {
String id = this.typeMapper.getValue(this.idFieldName, record);
indexRequest.id(id);
stringBuilder.append(";").append(id);
}
bulkRequest.add(indexRequest);
}
catch (Exception e) {
log.error("Encountered exception {}", e);
}
}
FutureCallbackHolder futureCallbackHolder = new FutureCallbackHolder(callback,
exception -> log.error("Batch: {} failed on ids; {} with exception {}", batch.getId(),
stringBuilder.toString(), exception),
this.malformedDocPolicy);
return new Pair(bulkRequest, futureCallbackHolder);
}
@Override
public void close() throws IOException {
this.serializer.close();
}
}
| 3,532 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch/writer/ElasticsearchDataWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch.writer;
import java.io.IOException;
import java.util.Properties;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.writer.AsyncWriterManager;
import org.apache.gobblin.writer.BatchAsyncDataWriter;
import org.apache.gobblin.writer.BufferedAsyncDataWriter;
import org.apache.gobblin.writer.DataWriter;
import org.apache.gobblin.writer.DataWriterBuilder;
import org.apache.gobblin.writer.SequentialBasedBatchAccumulator;
import com.google.gson.JsonObject;
import com.typesafe.config.Config;
import org.apache.gobblin.configuration.State;
public class ElasticsearchDataWriterBuilder extends DataWriterBuilder {
@Override
public DataWriter build() throws IOException {
State state = this.destination.getProperties();
Properties taskProps = state.getProperties();
Config config = ConfigUtils.propertiesToConfig(taskProps);
SequentialBasedBatchAccumulator<JsonObject> batchAccumulator = new SequentialBasedBatchAccumulator<>(taskProps);
BatchAsyncDataWriter asyncDataWriter;
switch (ElasticsearchWriterConfigurationKeys.ClientType.valueOf(
ConfigUtils.getString(config,
ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_CLIENT_TYPE,
ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_CLIENT_TYPE_DEFAULT).toUpperCase())) {
case REST: {
asyncDataWriter = new ElasticsearchRestWriter(config);
break;
}
case TRANSPORT: {
asyncDataWriter = new ElasticsearchTransportClientWriter(config);
break;
}
default: {
throw new IllegalArgumentException("Need to specify which "
+ ElasticsearchWriterConfigurationKeys.ELASTICSEARCH_WRITER_CLIENT_TYPE
+ " client to use (rest/transport)");
}
}
BufferedAsyncDataWriter bufferedAsyncDataWriter = new BufferedAsyncDataWriter(batchAccumulator, asyncDataWriter);
double failureAllowance = ConfigUtils.getDouble(config, ElasticsearchWriterConfigurationKeys.FAILURE_ALLOWANCE_PCT_CONFIG,
ElasticsearchWriterConfigurationKeys.FAILURE_ALLOWANCE_PCT_DEFAULT) / 100.0;
boolean retriesEnabled = ConfigUtils.getBoolean(config, ElasticsearchWriterConfigurationKeys.RETRIES_ENABLED,
ElasticsearchWriterConfigurationKeys.RETRIES_ENABLED_DEFAULT);
int maxRetries = ConfigUtils.getInt(config, ElasticsearchWriterConfigurationKeys.MAX_RETRIES,
ElasticsearchWriterConfigurationKeys.MAX_RETRIES_DEFAULT);
return AsyncWriterManager.builder()
.failureAllowanceRatio(failureAllowance)
.retriesEnabled(retriesEnabled)
.numRetries(maxRetries)
.config(config)
.asyncDataWriter(bufferedAsyncDataWriter)
.build();
}
} | 3,533 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch/writer/ExceptionLogger.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch.writer;
/**
* An interface to log Exceptions
*/
public interface ExceptionLogger {
void log(Exception exception);
}
| 3,534 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch/typemapping/FieldMappingException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch.typemapping;
/**
* An exception for type mapping errors during field-based access
*/
public class FieldMappingException extends Exception {
public FieldMappingException(Exception e) {
super(e);
}
public FieldMappingException(String message, Exception e) {
super(message, e);
}
public FieldMappingException(String message) {
super(message);
}
}
| 3,535 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch/typemapping/AvroGenericRecordSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch.typemapping;
import java.io.DataOutputStream;
import java.io.IOException;
import java.nio.charset.Charset;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.generic.GenericRecord;
import org.apache.commons.io.output.ByteArrayOutputStream;
import com.google.common.io.Closer;
import com.typesafe.config.Config;
import lombok.extern.slf4j.Slf4j;
/**
* A {@link JsonSerializer} for {@link GenericRecord} objects.
*/
@Slf4j
public class AvroGenericRecordSerializer implements JsonSerializer<GenericRecord> {
private final ByteArrayOutputStream byteArrayOutputStream;
private final DataOutputStream out;
private final GenericDatumWriter<GenericRecord> writer;
private final Closer closer;
public AvroGenericRecordSerializer() {
this.closer =Closer.create();
this.byteArrayOutputStream = new ByteArrayOutputStream();
this.out = this.closer.register(new DataOutputStream(this.byteArrayOutputStream));
this.writer = new GenericDatumWriter<GenericRecord>();
}
@Override
public void configure(Config config) {
}
@Override
public synchronized byte[] serializeToJson(GenericRecord serializable)
throws SerializationException {
try {
/**
* We use the toString method of Avro to flatten the JSON for optional nullable types.
* Otherwise the JSON has an additional level of nesting to encode the type.
* e.g. "id": {"string": "id-value"} versus "id": "id-value"
* See {@link: https://issues.apache.org/jira/browse/AVRO-1582} for a good discussion on this.
*/
String serialized = serializable.toString();
return serialized.getBytes(Charset.forName("UTF-8"));
} catch (Exception exception) {
throw new SerializationException("Could not serializeToJson Avro record", exception);
}
}
@Override
public void close()
throws IOException {
this.closer.close();
}
}
| 3,536 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch/typemapping/AvroGenericRecordTypeMapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch.typemapping;
import java.io.IOException;
import org.apache.avro.generic.GenericRecord;
import com.google.common.io.Closer;
import com.typesafe.config.Config;
import lombok.extern.slf4j.Slf4j;
/**
* A TypeMapper for Avro GenericRecords.
*/
@Slf4j
public class AvroGenericRecordTypeMapper implements TypeMapper<GenericRecord> {
private final JsonSerializer<GenericRecord> serializer;
private final Closer closer;
public AvroGenericRecordTypeMapper() {
this.closer =Closer.create();
this.serializer = this.closer.register(new AvroGenericRecordSerializer());
}
@Override
public void configure(Config config) {
this.serializer.configure(config);
log.info("AvroGenericRecordTypeMapper successfully configured");
}
@Override
public JsonSerializer<GenericRecord> getSerializer() {
return this.serializer;
}
@Override
public String getValue(String fieldName, GenericRecord record)
throws FieldMappingException {
try {
Object idValue = record.get(fieldName);
return idValue.toString();
}
catch (Exception e) {
throw new FieldMappingException("Could not find field " + fieldName, e);
}
}
@Override
public void close()
throws IOException {
this.closer.close();
}
}
| 3,537 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch/typemapping/JsonTypeMapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch.typemapping;
import java.io.IOException;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.typesafe.config.Config;
public class JsonTypeMapper implements TypeMapper<JsonElement> {
private final JsonSerializer serializer = new GsonJsonSerializer();
@Override
public void configure(Config config) {
}
@Override
public JsonSerializer<JsonElement> getSerializer() {
return serializer;
}
@Override
public String getValue(String fieldName, JsonElement record)
throws FieldMappingException {
assert record.isJsonObject();
JsonObject jsonObject = record.getAsJsonObject();
if (jsonObject.has(fieldName)) {
return jsonObject.get(fieldName).getAsString();
} else {
throw new FieldMappingException("Could not find field :" + fieldName);
}
}
@Override
public void close()
throws IOException {
}
}
| 3,538 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch/typemapping/JsonSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch.typemapping;
import java.io.Closeable;
import com.typesafe.config.Config;
public interface JsonSerializer<T> extends Closeable {
void configure(Config config);
byte[] serializeToJson(T serializable) throws SerializationException;
}
| 3,539 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch/typemapping/SerializationException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch.typemapping;
/**
* A class to hold exceptions thrown by {@link JsonSerializer}s.
*/
public class SerializationException extends Exception {
public SerializationException(Exception e) {
super(e);
}
public SerializationException(String s, Exception exception) {
super(s, exception);
}
}
| 3,540 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch/typemapping/TypeMapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch.typemapping;
import java.io.Closeable;
import com.typesafe.config.Config;
/**
* An interface that enables the ElasticSearch writer to work with different types of records.
* Supports serialization and id-getter capabilities
*/
public interface TypeMapper<T> extends Closeable {
void configure(Config config);
JsonSerializer<T> getSerializer();
String getValue(String fieldName, T record) throws FieldMappingException;
}
| 3,541 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch | Create_ds/gobblin/gobblin-modules/gobblin-elasticsearch/src/main/java/org/apache/gobblin/elasticsearch/typemapping/GsonJsonSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.elasticsearch.typemapping;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import com.google.gson.Gson;
import com.typesafe.config.Config;
/**
* A Gson based Json Serializer
*/
public class GsonJsonSerializer implements JsonSerializer<Object> {
private final Gson _gson = new Gson();
@Override
public void configure(Config config) {
}
@Override
public byte[] serializeToJson(Object serializable)
throws SerializationException {
String jsonString = _gson.toJson(serializable);
try {
return jsonString.getBytes("UTF-8");
} catch (UnsupportedEncodingException e) {
throw new SerializationException(e);
}
}
@Override
public void close()
throws IOException {
}
}
| 3,542 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-couchbase/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-couchbase/src/test/java/org/apache/gobblin/couchbase/CouchbaseTestServer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.couchbase;
import com.couchbase.mock.CouchbaseMock;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.Arrays;
import java.util.concurrent.TimeUnit;
import org.json.JSONArray;
import org.json.JSONObject;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.couchbase.client.java.Bucket;
import com.couchbase.client.java.CouchbaseCluster;
import com.couchbase.client.java.document.JsonDocument;
import com.couchbase.client.java.document.json.JsonObject;
import com.couchbase.client.java.env.CouchbaseEnvironment;
import com.couchbase.client.java.env.DefaultCouchbaseEnvironment;
import com.google.common.base.Throwables;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.test.TestUtils;
@Slf4j
public class CouchbaseTestServer {
private int _port;
private int _serverPort;
public CouchbaseTestServer(int port)
{
_port = port;
}
public void start()
{
log.info("Starting couchbase server on port " + _port);
String[] commands = {
"--port",
_port +"",
"-n",
"1",
"-R",
"0",
"-b",
"default:",
"--host",
"127.0.0.1"};
try {
System.out.println("Will run command " + Arrays.toString(commands));
CouchbaseMock.main(commands);
}
catch (Exception e)
{
log.error("Failed to start couchbase mock server", e);
Throwables.propagate(e);
}
boolean isUp = false;
int numTries = 5;
while (!isUp && numTries-- > 0)
{
try {
Thread.sleep(500); // wait .5 secs
isUp = isUp();
}
catch (Exception e)
{
}
}
Assert.assertTrue(isUp, "Server is not up!");
fillServerPort();
}
public boolean isUp()
{
try {
URL url = new URL("http://localhost:" + _port + "/pools");
HttpURLConnection httpURLConnection = (HttpURLConnection) url.openConnection();
int responseCode = httpURLConnection.getResponseCode();
return true;
}
catch (Exception e) {
Throwables.propagate(e);
return false;
}
}
private void fillServerPort()
{
try {
URL url = new URL("http://localhost:" + _port + "/pools/default/buckets");
HttpURLConnection httpURLConnection = (HttpURLConnection) url.openConnection();
httpURLConnection.setRequestProperty("Accept", "application/json");
httpURLConnection.setRequestMethod("GET");
if (200 <= httpURLConnection.getResponseCode() && httpURLConnection.getResponseCode() <= 299) {
BufferedReader br = new BufferedReader(new InputStreamReader((httpURLConnection.getInputStream())));
StringBuilder sb = new StringBuilder();
String output;
while ((output = br.readLine()) != null) {
sb.append(output);
}
JSONArray json = new JSONArray(sb.toString());
log.debug(json.toString());
int serverPort =
(Integer) ((JSONObject) ((JSONObject) ((JSONArray) ((JSONObject) json.get(0)).get("nodes")).get(0)).get("ports")).get("direct");
_serverPort = serverPort;
}
}
catch (Exception e) {
log.error("Server is not up", e);
Throwables.propagate(e);
}
}
public int getServerPort() {
return _serverPort;
}
public int getPort() { return _port; }
public void stop() {}
@Test
public static void testServer()
throws InterruptedException, IOException {
CouchbaseTestServer couchbaseTestServer = new CouchbaseTestServer(TestUtils.findFreePort());
couchbaseTestServer.start();
int port = couchbaseTestServer.getPort();
int serverPort = couchbaseTestServer.getServerPort();
try {
CouchbaseEnvironment cbEnv = DefaultCouchbaseEnvironment.builder().bootstrapHttpEnabled(true)
.bootstrapHttpDirectPort(port)
.bootstrapCarrierDirectPort(serverPort)
.connectTimeout(TimeUnit.SECONDS.toMillis(15))
.bootstrapCarrierEnabled(true).build();
CouchbaseCluster cbCluster = CouchbaseCluster.create(cbEnv, "localhost");
Bucket bucket = cbCluster.openBucket("default","");
try {
JsonObject content = JsonObject.empty().put("name", "Michael");
JsonDocument doc = JsonDocument.create("docId", content);
JsonDocument inserted = bucket.insert(doc);
}
catch (Exception e)
{
Assert.fail("Should not throw exception on insert", e);
}
}
finally
{
couchbaseTestServer.stop();
}
}
}
| 3,543 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-couchbase/src/test/java/org/apache/gobblin/couchbase | Create_ds/gobblin/gobblin-modules/gobblin-couchbase/src/test/java/org/apache/gobblin/couchbase/converter/AvroToCouchbaseTupleConverterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.couchbase.converter;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.couchbase.common.TupleDocument;
public class AvroToCouchbaseTupleConverterTest {
@Test
public void testBasicConvert() throws Exception {
Schema dataRecordSchema = SchemaBuilder.record("Data")
.fields()
.name("data").type().bytesType().noDefault()
.name("flags").type().intType().noDefault()
.endRecord();
Schema schema = SchemaBuilder.record("TestRecord")
.fields()
.name("key").type().stringType().noDefault()
.name("data").type(dataRecordSchema).noDefault()
.endRecord();
GenericData.Record testRecord = new GenericData.Record(schema);
String testContent = "hello world";
GenericData.Record dataRecord = new GenericData.Record(dataRecordSchema);
dataRecord.put("data", ByteBuffer.wrap(testContent.getBytes(Charset.forName("UTF-8"))));
dataRecord.put("flags", 0);
testRecord.put("key", "hello");
testRecord.put("data", dataRecord);
Converter<Schema, String, GenericRecord, TupleDocument> recordConverter = new AvroToCouchbaseTupleConverter();
TupleDocument returnDoc = recordConverter.convertRecord("", testRecord, null).iterator().next();
byte[] returnedBytes = new byte[returnDoc.content().value1().readableBytes()];
returnDoc.content().value1().readBytes(returnedBytes);
Assert.assertEquals(returnedBytes, testContent.getBytes(Charset.forName("UTF-8")));
int returnedFlags = returnDoc.content().value2();
Assert.assertEquals(returnedFlags, 0);
}
}
| 3,544 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-couchbase/src/test/java/org/apache/gobblin/couchbase | Create_ds/gobblin/gobblin-modules/gobblin-couchbase/src/test/java/org/apache/gobblin/couchbase/converter/AnyToCouchbaseJsonConverterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.couchbase.converter;
import java.util.HashMap;
import java.util.Map;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.couchbase.client.java.document.RawJsonDocument;
import com.google.gson.Gson;
import lombok.AllArgsConstructor;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class AnyToCouchbaseJsonConverterTest {
private static final Gson GSON = new Gson();
@Test
public void testBasicConvertDefaultConfig()
throws Exception {
// default config
testBasicConvert("key", false);
}
@Test
public void testBasicConvertWithConfig()
throws Exception {
// with config
testBasicConvert("foobar", true);
}
private void testBasicConvert(String keyField, boolean setConfig)
throws Exception {
String key = "hello";
String testContent = "hello world";
Map<String, String> content = new HashMap<>();
content.put(keyField, key);
content.put("value", testContent);
AnyToCouchbaseJsonConverter recordConverter = new AnyToCouchbaseJsonConverter();
WorkUnitState workUnitState = mock(WorkUnitState.class);
if (setConfig) {
when(workUnitState.getProp(AnyToCouchbaseJsonConverter.KEY_FIELD_CONFIG)).thenReturn(keyField);
when(workUnitState.contains(AnyToCouchbaseJsonConverter.KEY_FIELD_CONFIG)).thenReturn(true);
recordConverter.init(workUnitState);
} else {
recordConverter.init(workUnitState);
}
RawJsonDocument returnDoc = recordConverter.convertRecord("", content, null).iterator().next();
System.out.println(returnDoc.toString());
Assert.assertEquals(key.getBytes(), returnDoc.id().getBytes(), "key should be equal");
Map<String, String> convertedMap = GSON.fromJson(returnDoc.content(), Map.class);
Assert.assertEquals(key, convertedMap.get(keyField), "key in content should be equal");
Assert.assertEquals(testContent, convertedMap.get("value"), "value in content should be equal");
Assert.assertEquals(2, convertedMap.keySet().size(), "should have 2 fields");
}
@AllArgsConstructor
class Record {
int key;
String value;
};
@Test
public void testBasicConvertIntKey()
throws Exception {
int key = 5;
String testContent = "hello world";
Record record = new Record(key, testContent);
Converter<String, String, Object, RawJsonDocument> recordConverter = new AnyToCouchbaseJsonConverter();
RawJsonDocument returnDoc = recordConverter.convertRecord("", record, null).iterator().next();
Assert.assertEquals(key+"", returnDoc.id(), "key should be equal");
Record convertedRecord = GSON.fromJson(returnDoc.content(), Record.class);
Assert.assertEquals(convertedRecord.key, key);
Assert.assertEquals(convertedRecord.value, testContent, "value in content should be equal");
}
private void testFailure(AnyToCouchbaseJsonConverter recordConverter, Object obj)
{
try {
recordConverter.convertRecord("", obj, null);
Assert.fail("Expecting to throw an exception");
} catch (DataConversionException dce) {
} catch (Exception e) {
Assert.fail("Expecting to throw only a DataConversionException", e);
}
}
@Test
public void testExpectedFailures()
throws Exception {
AnyToCouchbaseJsonConverter recordConverter = new AnyToCouchbaseJsonConverter();
testFailure(recordConverter, new Integer(5));
testFailure(recordConverter, new String("hello"));
Map<String, Object> missingKey = new HashMap<>();
missingKey.put("value", "value");
testFailure(recordConverter, missingKey);
}
}
| 3,545 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-couchbase/src/test/java/org/apache/gobblin/couchbase | Create_ds/gobblin/gobblin-modules/gobblin-couchbase/src/test/java/org/apache/gobblin/couchbase/writer/CouchbaseWriterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.couchbase.writer;
import com.google.gson.JsonObject;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentLinkedDeque;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.commons.math3.util.Pair;
import org.apache.gobblin.writer.GenericWriteResponse;
import org.apache.http.HttpResponse;
import org.apache.http.NameValuePair;
import org.apache.http.client.entity.UrlEncodedFormEntity;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.message.BasicNameValuePair;
import org.testng.Assert;
import org.testng.annotations.AfterSuite;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import com.couchbase.client.deps.io.netty.buffer.ByteBuf;
import com.couchbase.client.java.Bucket;
import com.couchbase.client.java.document.AbstractDocument;
import com.couchbase.client.java.document.RawJsonDocument;
import com.couchbase.client.java.env.CouchbaseEnvironment;
import com.couchbase.client.java.env.DefaultCouchbaseEnvironment;
import com.google.gson.Gson;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.couchbase.CouchbaseTestServer;
import org.apache.gobblin.couchbase.common.TupleDocument;
import org.apache.gobblin.couchbase.converter.AnyToCouchbaseJsonConverter;
import org.apache.gobblin.couchbase.converter.AvroToCouchbaseTupleConverter;
import org.apache.gobblin.metrics.RootMetricContext;
import org.apache.gobblin.metrics.reporter.OutputStreamReporter;
import org.apache.gobblin.test.TestUtils;
import org.apache.gobblin.writer.AsyncWriterManager;
import org.apache.gobblin.writer.WriteCallback;
import org.apache.gobblin.writer.WriteResponse;
@Slf4j
public class CouchbaseWriterTest {
private CouchbaseTestServer _couchbaseTestServer;
private CouchbaseEnvironment _couchbaseEnvironment;
@BeforeSuite
public void startServers() {
_couchbaseTestServer = new CouchbaseTestServer(TestUtils.findFreePort());
_couchbaseTestServer.start();
_couchbaseEnvironment = DefaultCouchbaseEnvironment.builder().bootstrapHttpEnabled(true)
.bootstrapHttpDirectPort(_couchbaseTestServer.getPort())
.bootstrapCarrierDirectPort(_couchbaseTestServer.getServerPort()).bootstrapCarrierEnabled(false)
.kvTimeout(10000).build();
}
/**
* Implement the equivalent of:
* curl -XPOST -u Administrator:password localhost:httpPort/pools/default/buckets \ -d bucketType=couchbase \
* -d name={@param bucketName} -d authType=sasl -d ramQuotaMB=200
**/
private boolean createBucket(String bucketName) {
CloseableHttpClient httpClient = HttpClientBuilder.create().build();
try {
HttpPost httpPost = new HttpPost("http://localhost:" + _couchbaseTestServer.getPort() + "/pools/default/buckets");
List<NameValuePair> params = new ArrayList<>(2);
params.add(new BasicNameValuePair("bucketType", "couchbase"));
params.add(new BasicNameValuePair("name", bucketName));
params.add(new BasicNameValuePair("authType", "sasl"));
params.add(new BasicNameValuePair("ramQuotaMB", "200"));
httpPost.setEntity(new UrlEncodedFormEntity(params, "UTF-8"));
//Execute and get the response.
HttpResponse response = httpClient.execute(httpPost);
log.info(String.valueOf(response.getStatusLine().getStatusCode()));
return true;
}
catch (Exception e) {
log.error("Failed to create bucket {}", bucketName, e);
return false;
}
}
@AfterSuite
public void stopServers() {
_couchbaseTestServer.stop();
}
/**
* Test that a single tuple document can be written successfully.
* @throws IOException
* @throws DataConversionException
* @throws ExecutionException
* @throws InterruptedException
*/
@Test
public void testTupleDocumentWrite()
throws IOException, DataConversionException, ExecutionException, InterruptedException {
Config config = getConfig("default", Optional.empty(), Optional.empty(), Optional.empty());
CouchbaseWriter writer = new CouchbaseWriter(_couchbaseEnvironment, config);
try {
Schema dataRecordSchema =
SchemaBuilder.record("Data").fields().name("data").type().bytesType().noDefault().name("flags").type().intType()
.noDefault().endRecord();
Schema schema = SchemaBuilder.record("TestRecord").fields().name("key").type().stringType().noDefault().name("data")
.type(dataRecordSchema).noDefault().endRecord();
GenericData.Record testRecord = new GenericData.Record(schema);
String testContent = "hello world";
GenericData.Record dataRecord = new GenericData.Record(dataRecordSchema);
dataRecord.put("data", ByteBuffer.wrap(testContent.getBytes(Charset.forName("UTF-8"))));
dataRecord.put("flags", 0);
testRecord.put("key", "hello");
testRecord.put("data", dataRecord);
Converter<Schema, String, GenericRecord, TupleDocument> recordConverter = new AvroToCouchbaseTupleConverter();
TupleDocument doc = recordConverter.convertRecord("", testRecord, null).iterator().next();
writer.write(doc, null).get();
TupleDocument returnDoc = writer.getBucket().get("hello", TupleDocument.class);
byte[] returnedBytes = new byte[returnDoc.content().value1().readableBytes()];
returnDoc.content().value1().readBytes(returnedBytes);
Assert.assertEquals(returnedBytes, testContent.getBytes(Charset.forName("UTF-8")));
int returnedFlags = returnDoc.content().value2();
Assert.assertEquals(returnedFlags, 0);
} finally {
writer.close();
}
}
private Config getConfig(String bucket, Optional<Integer> ttl, Optional<TimeUnit> ttlTimeUnit, Optional<String> ttlOriginField) {
Properties props = new Properties();
props.setProperty(CouchbaseWriterConfigurationKeys.BUCKET, bucket);
ttl.ifPresent(x -> props.setProperty(CouchbaseWriterConfigurationKeys.DOCUMENT_TTL, "" + x));
ttlTimeUnit.ifPresent(x -> props.setProperty(CouchbaseWriterConfigurationKeys.DOCUMENT_TTL_UNIT, "" + x));
ttlOriginField.ifPresent(x -> props.setProperty(CouchbaseWriterConfigurationKeys.DOCUMENT_TTL_ORIGIN_FIELD, "" + x));
return ConfigFactory.parseProperties(props);
}
/**
* Test that a single tuple document can be written successfully.
* @throws IOException
* @throws DataConversionException
* @throws ExecutionException
* @throws InterruptedException
*/
@Test
public void testTupleDocumentWriteWithTtl()
throws IOException, DataConversionException, ExecutionException, InterruptedException {
int ttl = 10000;
long expiry = Math.toIntExact(System.currentTimeMillis() / 1000) + ttl;
Config config = getConfig("default", Optional.of(ttl), Optional.empty(), Optional.empty());
CouchbaseWriter writer = new CouchbaseWriter(_couchbaseEnvironment, config);
try {
Schema dataRecordSchema =
SchemaBuilder.record("Data").fields().name("data").type().bytesType().noDefault().name("flags").type().intType()
.noDefault().endRecord();
Schema schema = SchemaBuilder.record("TestRecord").fields().name("key").type().stringType().noDefault().name("data")
.type(dataRecordSchema).noDefault().endRecord();
GenericData.Record testRecord = new GenericData.Record(schema);
String testContent = "hello world";
GenericData.Record dataRecord = new GenericData.Record(dataRecordSchema);
dataRecord.put("data", ByteBuffer.wrap(testContent.getBytes(Charset.forName("UTF-8"))));
dataRecord.put("flags", 0);
testRecord.put("key", "hello");
testRecord.put("data", dataRecord);
Converter<Schema, String, GenericRecord, TupleDocument> recordConverter = new AvroToCouchbaseTupleConverter();
TupleDocument doc = recordConverter.convertRecord("", testRecord, null).iterator().next();
AbstractDocument storedDoc = ((GenericWriteResponse<AbstractDocument>) writer.write(doc, null).get()).getRawResponse();
TupleDocument returnDoc = writer.getBucket().get("hello", TupleDocument.class);
byte[] returnedBytes = new byte[returnDoc.content().value1().readableBytes()];
returnDoc.content().value1().readBytes(returnedBytes);
Assert.assertEquals(returnedBytes, testContent.getBytes(Charset.forName("UTF-8")));
int returnedFlags = returnDoc.content().value2();
Assert.assertEquals(returnedFlags, 0);
// Since get operations do not set the expiry meta, we need to rely
Assert.assertEquals(storedDoc.expiry() - expiry , 0, 50 );
} finally {
writer.close();
}
}
/**
* Test that a single Json document can be written successfully
* @throws IOException
* @throws DataConversionException
* @throws ExecutionException
* @throws InterruptedException
*/
@Test(groups={"timeout"})
public void testJsonDocumentWrite()
throws IOException, DataConversionException, ExecutionException, InterruptedException {
CouchbaseWriter writer = new CouchbaseWriter(_couchbaseEnvironment, ConfigFactory.empty());
try {
String key = "hello";
String testContent = "hello world";
HashMap<String, String> contentMap = new HashMap<>();
contentMap.put("value", testContent);
Gson gson = new Gson();
String jsonString = gson.toJson(contentMap);
RawJsonDocument jsonDocument = RawJsonDocument.create(key, jsonString);
writer.write(jsonDocument, null).get();
RawJsonDocument returnDoc = writer.getBucket().get(key, RawJsonDocument.class);
Map<String, String> returnedMap = gson.fromJson(returnDoc.content(), Map.class);
Assert.assertEquals(testContent, returnedMap.get("value"));
} finally {
writer.close();
}
}
/**
* Test that a single Json document can be written successfully with TTL
* @throws IOException
* @throws DataConversionException
* @throws ExecutionException
* @throws InterruptedException
*/
@Test(groups={"timeout"})
public void testJsonDocumentWriteTTL()
throws IOException, DataConversionException, ExecutionException, InterruptedException {
int ttl = 1000;
int expiry = Math.toIntExact(System.currentTimeMillis() / 1000) + ttl;
Config config = getConfig("default", Optional.of(ttl), Optional.empty(), Optional.empty());
CouchbaseWriter writer = new CouchbaseWriter(_couchbaseEnvironment, config);
try {
String key = "hello";
String testContent = "hello world";
HashMap<String, String> contentMap = new HashMap<>();
contentMap.put("value", testContent);
Gson gson = new Gson();
String jsonString = gson.toJson(contentMap);
RawJsonDocument jsonDocument = RawJsonDocument.create(key, jsonString);
AbstractDocument storedDoc = ((GenericWriteResponse<AbstractDocument>) writer.write(jsonDocument, null).get()).getRawResponse();
RawJsonDocument returnDoc = writer.getBucket().get(key, RawJsonDocument.class);
Map<String, String> returnedMap = gson.fromJson(returnDoc.content(), Map.class);
Assert.assertEquals(testContent, returnedMap.get("value"));
Assert.assertEquals(storedDoc.expiry(), expiry, 50);
} finally {
writer.close();
}
}
/**
* Test that a single Json document can be written successfully with TTL and timeunits
* @throws IOException
* @throws DataConversionException
* @throws ExecutionException
* @throws InterruptedException
*/
@Test(groups={"timeout"})
public void testJsonDocumentWriteTTLWithTimeUnits()
throws IOException, DataConversionException, ExecutionException, InterruptedException {
int ttl = 1;
TimeUnit timeUnit = TimeUnit.DAYS;
int expiry = Math.toIntExact(System.currentTimeMillis() / 1000) + (int) TimeUnit.SECONDS.convert(ttl, timeUnit);
Config config = getConfig("default", Optional.of(ttl), Optional.of(timeUnit), Optional.empty());
CouchbaseWriter writer = new CouchbaseWriter(_couchbaseEnvironment, config);
try {
String key = "hello";
String testContent = "hello world";
HashMap<String, String> contentMap = new HashMap<>();
contentMap.put("value", testContent);
Gson gson = new Gson();
String jsonString = gson.toJson(contentMap);
RawJsonDocument jsonDocument = RawJsonDocument.create(key, jsonString);
AbstractDocument storedDoc = ((GenericWriteResponse<AbstractDocument>) writer.write(jsonDocument, null).get()).getRawResponse();
RawJsonDocument returnDoc = writer.getBucket().get(key, RawJsonDocument.class);
Map<String, String> returnedMap = gson.fromJson(returnDoc.content(), Map.class);
Assert.assertEquals(testContent, returnedMap.get("value"));
Assert.assertEquals(storedDoc.expiry() - expiry, 0, 50);
} finally {
writer.close();
}
}
/**
* Test that a single Json document can be written successfully with TTL and timeunits
* @throws IOException
* @throws DataConversionException
* @throws ExecutionException
* @throws InterruptedException
*/
@Test(groups={"timeout"})
public void testJsonDocumentWriteTtlWithField()
throws ExecutionException, InterruptedException {
int ttl = 30;
int originDiffFromNow = 5;
TimeUnit timeUnit = TimeUnit.DAYS;
String ttlOriginField = "time";
long now = System.currentTimeMillis();
long originDelta = TimeUnit.MILLISECONDS.convert(originDiffFromNow, TimeUnit.DAYS);
long origin = now - originDelta;
long expiry = TimeUnit.SECONDS.convert(now, TimeUnit.MILLISECONDS) + TimeUnit.SECONDS.convert(ttl, timeUnit) - TimeUnit.SECONDS.convert(originDiffFromNow, timeUnit) ;
Config config = getConfig("default", Optional.of(ttl), Optional.of(timeUnit), Optional.of(ttlOriginField));
CouchbaseWriter writer = new CouchbaseWriter(_couchbaseEnvironment, config);
try {
String key = "hello";
String testContent = "hello world";
HashMap<String, String> contentMap = new HashMap<>();
contentMap.put("value", testContent);
contentMap.put(ttlOriginField, "" + origin);
Gson gson = new Gson();
String jsonString = gson.toJson(contentMap);
RawJsonDocument jsonDocument = RawJsonDocument.create(key, jsonString);
AbstractDocument storedDoc = ((GenericWriteResponse<AbstractDocument>) writer.write(jsonDocument, null).get()).getRawResponse();
RawJsonDocument returnDoc = writer.getBucket().get(key, RawJsonDocument.class);
Map<String, String> returnedMap = gson.fromJson(returnDoc.content(), Map.class);
Assert.assertEquals(testContent, returnedMap.get("value"));
Assert.assertEquals(storedDoc.expiry() , expiry, 50);
} finally {
writer.close();
}
}
@Test(groups={"timeout"})
public void testJsonDocumentWriteTtlWithNestedField()
throws ExecutionException, InterruptedException {
int ttl = 30;
int originDiffFromNow = 5;
TimeUnit timeUnit = TimeUnit.DAYS;
String ttlOriginField = "a.b.time";
long now = System.currentTimeMillis();
long originDelta = TimeUnit.MILLISECONDS.convert(originDiffFromNow, timeUnit);
long origin = now - originDelta;
long expiry = TimeUnit.SECONDS.convert(now, TimeUnit.MILLISECONDS) + TimeUnit.SECONDS.convert(ttl, timeUnit) - TimeUnit.SECONDS.convert(originDiffFromNow, timeUnit) ;
Config config = getConfig("default", Optional.of(ttl), Optional.of(timeUnit), Optional.of(ttlOriginField));
CouchbaseWriter writer = new CouchbaseWriter(_couchbaseEnvironment, config);
try {
JsonObject jsonRoot = new JsonObject();
String key = "keyValue";
String testContent = "hello world";
String valueKey = "value";
jsonRoot.addProperty(valueKey, testContent);
jsonRoot.addProperty(ttlOriginField, origin);
RawJsonDocument jsonDocument = RawJsonDocument.create(key, jsonRoot.toString());
AbstractDocument storedDoc = ((GenericWriteResponse<AbstractDocument>) writer.write(jsonDocument, null).get()).getRawResponse();
RawJsonDocument returnDoc = writer.getBucket().get(key, RawJsonDocument.class);
Map<String, String> returnedMap = new Gson().fromJson(returnDoc.content(), Map.class);
Assert.assertEquals(returnedMap.get(valueKey), testContent);
Assert.assertEquals(storedDoc.expiry() , expiry, 50);
} finally {
writer.close();
}
}
private void drainQueue(BlockingQueue<Pair<AbstractDocument, Future>> queue, int threshold, long sleepTime,
TimeUnit sleepUnit, List<Pair<AbstractDocument, Future>> failedFutures) {
while (queue.remainingCapacity() < threshold) {
if (sleepTime > 0) {
Pair<AbstractDocument, Future> topElement = queue.peek();
if (topElement != null) {
try {
topElement.getSecond().get(sleepTime, sleepUnit);
} catch (Exception te) {
failedFutures.add(topElement);
}
queue.poll();
}
}
}
}
/**
* An iterator that applies the {@link AnyToCouchbaseJsonConverter} converter to Objects
*/
class JsonDocumentIterator implements Iterator<AbstractDocument> {
private final int _maxRecords;
private int _currRecord;
private Iterator<Object> _objectIterator;
private final Converter<String, String, Object, RawJsonDocument> _recordConverter =
new AnyToCouchbaseJsonConverter();
JsonDocumentIterator(Iterator<Object> genericRecordIterator) {
this(genericRecordIterator, -1);
}
JsonDocumentIterator(Iterator<Object> genericRecordIterator, int maxRecords) {
_objectIterator = genericRecordIterator;
_maxRecords = maxRecords;
_currRecord = 0;
}
@Override
public boolean hasNext() {
if (_maxRecords < 0) {
return _objectIterator.hasNext();
} else {
return _objectIterator.hasNext() && (_currRecord < _maxRecords);
}
}
@Override
public AbstractDocument next() {
_currRecord++;
Object record = _objectIterator.next();
try {
return _recordConverter.convertRecord("", record, null).iterator().next();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public void remove() {
}
}
/**
* An iterator that applies the {@link AvroToCouchbaseTupleConverter} converter to GenericRecords
*/
class TupleDocumentIterator implements Iterator<AbstractDocument> {
private final int _maxRecords;
private int _currRecord;
private Iterator<GenericRecord> _genericRecordIterator;
private final Converter<Schema, String, GenericRecord, TupleDocument> _recordConverter =
new AvroToCouchbaseTupleConverter();
TupleDocumentIterator(Iterator<GenericRecord> genericRecordIterator) {
this(genericRecordIterator, -1);
}
TupleDocumentIterator(Iterator<GenericRecord> genericRecordIterator, int maxRecords) {
_genericRecordIterator = genericRecordIterator;
_maxRecords = maxRecords;
_currRecord = 0;
}
@Override
public boolean hasNext() {
if (_maxRecords < 0) {
return _genericRecordIterator.hasNext();
} else {
return _genericRecordIterator.hasNext() && (_currRecord < _maxRecords);
}
}
@Override
public TupleDocument next() {
_currRecord++;
GenericRecord record = _genericRecordIterator.next();
try {
return _recordConverter.convertRecord("", record, null).iterator().next();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public void remove() {
}
}
class Verifier {
private final Map<String, byte[]> verificationCache = new HashMap<>(1000);
private Class recordClass;
void onWrite(AbstractDocument doc)
throws UnsupportedEncodingException {
recordClass = doc.getClass();
if (doc instanceof TupleDocument) {
ByteBuf outgoingBuf = (((TupleDocument) doc).content()).value1();
byte[] outgoingBytes = new byte[outgoingBuf.readableBytes()];
outgoingBuf.getBytes(0, outgoingBytes);
verificationCache.put(doc.id(), outgoingBytes);
} else if (doc instanceof RawJsonDocument) {
verificationCache.put(doc.id(), ((RawJsonDocument) doc).content().getBytes("UTF-8"));
} else {
throw new UnsupportedOperationException("Can only support TupleDocument or RawJsonDocument at this time");
}
}
void verify(Bucket bucket)
throws UnsupportedEncodingException {
// verify
System.out.println("Starting verification procedure");
for (Map.Entry<String, byte[]> cacheEntry : verificationCache.entrySet()) {
Object doc = bucket.get(cacheEntry.getKey(), recordClass);
if (doc instanceof TupleDocument) {
ByteBuf returnedBuf = (((TupleDocument) doc).content()).value1();
byte[] returnedBytes = new byte[returnedBuf.readableBytes()];
returnedBuf.getBytes(0, returnedBytes);
Assert.assertEquals(returnedBytes, cacheEntry.getValue(), "Returned content for TupleDoc should be equal");
} else if (doc instanceof RawJsonDocument) {
byte[] returnedBytes = ((RawJsonDocument) doc).content().getBytes("UTF-8");
Assert.assertEquals(returnedBytes, cacheEntry.getValue(), "Returned content for JsonDoc should be equal");
} else {
Assert.fail("Returned type was neither TupleDocument nor RawJsonDocument");
}
}
System.out.println("Verification success!");
}
}
/**
* Uses the {@link AsyncWriterManager} to write records through a couchbase writer
* It keeps a copy of the key, value combinations written and checks after all the writes have gone through.
* @param recordIterator
* @throws IOException
*/
private void writeRecordsWithAsyncWriter(Iterator<AbstractDocument> recordIterator)
throws IOException {
boolean verbose = false;
Config config = getConfig("default", Optional.empty(), Optional.empty(), Optional.empty());
CouchbaseWriter writer = new CouchbaseWriter(_couchbaseEnvironment, config);
try {
AsyncWriterManager asyncWriterManager =
AsyncWriterManager.builder().asyncDataWriter(writer).maxOutstandingWrites(100000).retriesEnabled(true)
.numRetries(5).build();
if (verbose) {
// Create a reporter for metrics. This reporter will write metrics to STDOUT.
OutputStreamReporter.Factory.newBuilder().build(new Properties());
// Start all metric reporters.
RootMetricContext.get().startReporting();
}
Verifier verifier = new Verifier();
while (recordIterator.hasNext()) {
AbstractDocument doc = recordIterator.next();
verifier.onWrite(doc);
asyncWriterManager.write(doc);
}
asyncWriterManager.commit();
verifier.verify(writer.getBucket());
} finally {
writer.close();
}
}
private List<Pair<AbstractDocument, Future>> writeRecords(Iterator<AbstractDocument> recordIterator,
CouchbaseWriter writer, int outstandingRequests, long kvTimeout, TimeUnit kvTimeoutUnit)
throws DataConversionException, UnsupportedEncodingException {
final BlockingQueue<Pair<AbstractDocument, Future>> outstandingCallQueue =
new LinkedBlockingDeque<>(outstandingRequests);
final List<Pair<AbstractDocument, Future>> failedFutures = new ArrayList<>(outstandingRequests);
int index = 0;
long runTime = 0;
final AtomicInteger callbackSuccesses = new AtomicInteger(0);
final AtomicInteger callbackFailures = new AtomicInteger(0);
final ConcurrentLinkedDeque<Throwable> callbackExceptions = new ConcurrentLinkedDeque<>();
Verifier verifier = new Verifier();
while (recordIterator.hasNext()) {
AbstractDocument doc = recordIterator.next();
index++;
verifier.onWrite(doc);
final long startTime = System.nanoTime();
Future callFuture = writer.write(doc, new WriteCallback<TupleDocument>() {
@Override
public void onSuccess(WriteResponse<TupleDocument> writeResponse) {
callbackSuccesses.incrementAndGet();
}
@Override
public void onFailure(Throwable throwable) {
callbackFailures.incrementAndGet();
callbackExceptions.add(throwable);
}
});
drainQueue(outstandingCallQueue, 1, kvTimeout, kvTimeoutUnit, failedFutures);
outstandingCallQueue.add(new Pair<>(doc, callFuture));
runTime += System.nanoTime() - startTime;
}
int failedWrites = 0;
long responseStartTime = System.nanoTime();
drainQueue(outstandingCallQueue, outstandingRequests, kvTimeout, kvTimeoutUnit, failedFutures);
runTime += System.nanoTime() - responseStartTime;
for (Throwable failure : callbackExceptions) {
System.out.println(failure.getClass() + " : " + failure.getMessage());
}
failedWrites += failedFutures.size();
System.out.println(
"Total time to send " + index + " records = " + runTime / 1000000.0 + "ms, " + "Failed writes = " + failedWrites
+ " Callback Successes = " + callbackSuccesses.get() + "Callback Failures = " + callbackFailures.get());
verifier.verify(writer.getBucket());
return failedFutures;
}
@Test
public void testMultiTupleDocumentWrite()
throws IOException, DataConversionException, ExecutionException, InterruptedException {
CouchbaseWriter writer = new CouchbaseWriter(_couchbaseEnvironment, ConfigFactory.empty());
try {
final Schema dataRecordSchema =
SchemaBuilder.record("Data").fields().name("data").type().bytesType().noDefault().name("flags").type().intType()
.noDefault().endRecord();
final Schema schema =
SchemaBuilder.record("TestRecord").fields().name("key").type().stringType().noDefault().name("data")
.type(dataRecordSchema).noDefault().endRecord();
final int numRecords = 1000;
int outstandingRequests = 99;
Iterator<GenericRecord> recordIterator = new Iterator<GenericRecord>() {
private int currentIndex;
@Override
public void remove() {
}
@Override
public boolean hasNext() {
return (currentIndex < numRecords);
}
@Override
public GenericRecord next() {
GenericData.Record testRecord = new GenericData.Record(schema);
String testContent = "hello world" + currentIndex;
GenericData.Record dataRecord = new GenericData.Record(dataRecordSchema);
dataRecord.put("data", ByteBuffer.wrap(testContent.getBytes(Charset.forName("UTF-8"))));
dataRecord.put("flags", 0);
testRecord.put("key", "hello" + currentIndex);
testRecord.put("data", dataRecord);
currentIndex++;
return testRecord;
}
};
long kvTimeout = 10000;
TimeUnit kvTimeoutUnit = TimeUnit.MILLISECONDS;
writeRecords(new TupleDocumentIterator(recordIterator), writer, outstandingRequests, kvTimeout, kvTimeoutUnit);
} finally {
writer.close();
}
}
@Test
public void testMultiJsonDocumentWriteWithAsyncWriter()
throws IOException, DataConversionException, ExecutionException, InterruptedException {
final int numRecords = 1000;
Iterator<Object> recordIterator = new Iterator<Object>() {
private int currentIndex;
@Override
public boolean hasNext() {
return (currentIndex < numRecords);
}
@Override
public Object next() {
String testContent = "hello world" + currentIndex;
String key = "hello" + currentIndex;
HashMap<String, String> contentMap = new HashMap<>();
contentMap.put("key", key);
contentMap.put("value", testContent);
currentIndex++;
return contentMap;
}
@Override
public void remove() {
}
};
writeRecordsWithAsyncWriter(new JsonDocumentIterator(recordIterator));
}
@Test
public void testMultiTupleDocumentWriteWithAsyncWriter()
throws IOException, DataConversionException, ExecutionException, InterruptedException {
final Schema dataRecordSchema =
SchemaBuilder.record("Data").fields().name("data").type().bytesType().noDefault().name("flags").type().intType()
.noDefault().endRecord();
final Schema schema =
SchemaBuilder.record("TestRecord").fields().name("key").type().stringType().noDefault().name("data")
.type(dataRecordSchema).noDefault().endRecord();
final int numRecords = 1000;
Iterator<GenericRecord> recordIterator = new Iterator<GenericRecord>() {
private int currentIndex;
@Override
public void remove() {
}
@Override
public boolean hasNext() {
return (currentIndex < numRecords);
}
@Override
public GenericRecord next() {
GenericData.Record testRecord = new GenericData.Record(schema);
String testContent = "hello world" + currentIndex;
GenericData.Record dataRecord = new GenericData.Record(dataRecordSchema);
dataRecord.put("data", ByteBuffer.wrap(testContent.getBytes(Charset.forName("UTF-8"))));
dataRecord.put("flags", 0);
testRecord.put("key", "hello" + currentIndex);
testRecord.put("data", dataRecord);
currentIndex++;
return testRecord;
}
};
writeRecordsWithAsyncWriter(new TupleDocumentIterator(recordIterator));
}
}
| 3,546 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-couchbase/src/main/java/org/apache/gobblin/couchbase | Create_ds/gobblin/gobblin-modules/gobblin-couchbase/src/main/java/org/apache/gobblin/couchbase/converter/AvroToCouchbaseTupleConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.couchbase.converter;
import java.nio.ByteBuffer;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import com.couchbase.client.core.lang.Tuple;
import com.couchbase.client.deps.io.netty.buffer.ByteBuf;
import com.couchbase.client.deps.io.netty.buffer.Unpooled;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.gobblin.couchbase.common.TupleDocument;
public class AvroToCouchbaseTupleConverter extends Converter<Schema, String, GenericRecord, TupleDocument> {
private String keyField = "key";
private String dataRecordField = "data";
private String valueField = "data";
private String flagsField = "flags";
@Override
public String convertSchema(Schema inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
//TODO: Use the schema and config to determine which fields to pull out
return "";
}
@Override
public Iterable<TupleDocument> convertRecord(String outputSchema, GenericRecord inputRecord, WorkUnitState workUnit)
throws DataConversionException {
String key = inputRecord.get(keyField).toString();
GenericRecord data = (GenericRecord) inputRecord.get(dataRecordField);
ByteBuffer dataBytes = (ByteBuffer) data.get(valueField);
Integer flags = (Integer) data.get(flagsField);
ByteBuf buffer = Unpooled.copiedBuffer(dataBytes);
return new SingleRecordIterable<>(new TupleDocument(key, Tuple.create(buffer, flags)));
}
}
| 3,547 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-couchbase/src/main/java/org/apache/gobblin/couchbase | Create_ds/gobblin/gobblin-modules/gobblin-couchbase/src/main/java/org/apache/gobblin/couchbase/converter/AnyToCouchbaseJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.couchbase.converter;
import com.couchbase.client.java.document.RawJsonDocument;
import com.google.gson.Gson;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.gobblin.util.ForkOperatorUtils;
/**
* Takes any object in and converts it into a CouchBase compatible
* {@link com.couchbase.client.java.document.RawJsonDocument}
* It expects to be configured to pick out the String key field from the incoming Object
*/
@Slf4j
public class AnyToCouchbaseJsonConverter extends Converter<String, String, Object, RawJsonDocument> {
private static final Gson GSON = new Gson();
private String keyField = "key";
public static final String KEY_FIELD_CONFIG = "converter.any2couchbase.key.field";
@Override
public Converter<String, String, Object, RawJsonDocument> init(WorkUnitState workUnit) {
String keyFieldPath =
ForkOperatorUtils.getPropertyNameForBranch(workUnit, KEY_FIELD_CONFIG);
if (!workUnit.contains(keyFieldPath)) {
log.warn("No configuration for which field to use as the key. Using the default {}", this.keyField);
} else {
this.keyField = workUnit.getProp(keyFieldPath);
log.info("Using the field {} from config for writing converter", this.keyField);
}
return this;
}
@Override
public String convertSchema(String inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
return "";
}
@Override
public Iterable<RawJsonDocument> convertRecord(String outputSchema, Object inputRecord, WorkUnitState workUnit)
throws DataConversionException {
JsonElement jsonElement = GSON.toJsonTree(inputRecord);
if (!jsonElement.isJsonObject())
{
throw new DataConversionException("Expecting json element " + jsonElement.toString()
+ " to be of type JsonObject.");
}
JsonObject jsonObject = jsonElement.getAsJsonObject();
if (!jsonObject.has(keyField))
{
throw new DataConversionException("Could not find key field " + keyField
+ " in json object " + jsonObject.toString());
}
JsonElement keyValueElement = jsonObject.get(keyField);
String keyString;
try {
keyString = keyValueElement.getAsString();
}
catch (Exception e)
{
throw new DataConversionException("Could not get the key " + keyValueElement.toString() + " as a string", e);
}
String valueString = GSON.toJson(jsonElement);
RawJsonDocument jsonDocument = RawJsonDocument.create(keyString, valueString);
return new SingleRecordIterable<>(jsonDocument);
}
}
| 3,548 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-couchbase/src/main/java/org/apache/gobblin/couchbase | Create_ds/gobblin/gobblin-modules/gobblin-couchbase/src/main/java/org/apache/gobblin/couchbase/common/TupleDocument.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.couchbase.common;
import com.couchbase.client.core.lang.Tuple2;
import com.couchbase.client.core.message.kv.MutationToken;
import com.couchbase.client.deps.io.netty.buffer.ByteBuf;
import com.couchbase.client.java.document.AbstractDocument;
/**
* A document type to store raw binary data in Couchbase
*/
public class TupleDocument extends AbstractDocument<Tuple2<ByteBuf, Integer>>
{
public TupleDocument(String id, Tuple2<ByteBuf, Integer> content)
{
this(id, 0, content, 0);
}
public TupleDocument(String id, int expiry, Tuple2<ByteBuf, Integer> content, long cas)
{
super(id, expiry, content, cas);
}
public TupleDocument(String id, int expiry, Tuple2<ByteBuf, Integer> content, long cas, MutationToken mutationToken)
{
super(id, expiry, content, cas, mutationToken);
}
}
| 3,549 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-couchbase/src/main/java/org/apache/gobblin/couchbase | Create_ds/gobblin/gobblin-modules/gobblin-couchbase/src/main/java/org/apache/gobblin/couchbase/writer/CouchbaseWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.couchbase.writer;
import com.couchbase.client.core.lang.Tuple;
import com.couchbase.client.core.lang.Tuple2;
import com.couchbase.client.core.message.ResponseStatus;
import com.couchbase.client.core.message.kv.MutationToken;
import com.couchbase.client.deps.io.netty.buffer.ByteBuf;
import com.couchbase.client.java.Bucket;
import com.couchbase.client.java.Cluster;
import com.couchbase.client.java.CouchbaseCluster;
import com.couchbase.client.java.auth.CertAuthenticator;
import com.couchbase.client.java.document.AbstractDocument;
import com.couchbase.client.java.document.RawJsonDocument;
import com.couchbase.client.java.env.CouchbaseEnvironment;
import com.couchbase.client.java.transcoder.Transcoder;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import com.typesafe.config.Config;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.math3.util.Pair;
import org.apache.gobblin.couchbase.common.TupleDocument;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.writer.AsyncDataWriter;
import org.apache.gobblin.writer.GenericWriteResponse;
import org.apache.gobblin.writer.GenericWriteResponseWrapper;
import org.apache.gobblin.writer.SyncDataWriter;
import org.apache.gobblin.writer.WriteCallback;
import org.apache.gobblin.writer.WriteResponse;
import org.apache.gobblin.writer.WriteResponseFuture;
import org.apache.gobblin.writer.WriteResponseMapper;
import rx.Observable;
import rx.Subscriber;
/**
* A single bucket Couchbase writer.
*/
@Slf4j
public class CouchbaseWriter<D extends AbstractDocument> implements AsyncDataWriter<D>, SyncDataWriter<D> {
private final Cluster _cluster;
private final Bucket _bucket;
private final long _operationTimeout;
private final int _documentTTL;
private final TimeUnit _documentTTLTimeUnits;
private final String _documentTTLOriginField;
private final TimeUnit _documentTTLOriginUnits;
private final TimeUnit _operationTimeunit;
private final WriteResponseMapper<D> _defaultWriteResponseMapper;
// A basic transcoder that just passes through the embedded binary content.
private final Transcoder<TupleDocument, Tuple2<ByteBuf, Integer>> _tupleDocumentTranscoder =
new Transcoder<TupleDocument, Tuple2<ByteBuf, Integer>>() {
@Override
public TupleDocument decode(String id, ByteBuf content, long cas, int expiry, int flags,
ResponseStatus status) {
return newDocument(id, expiry, Tuple.create(content, flags), cas);
}
@Override
public Tuple2<ByteBuf, Integer> encode(TupleDocument document) {
return document.content();
}
@Override
public TupleDocument newDocument(String id, int expiry, Tuple2<ByteBuf, Integer> content, long cas) {
return new TupleDocument(id, expiry, content, cas);
}
@Override
public TupleDocument newDocument(String id, int expiry, Tuple2<ByteBuf, Integer> content, long cas,
MutationToken mutationToken) {
return new TupleDocument(id, expiry, content, cas);
}
@Override
public Class<TupleDocument> documentType() {
return TupleDocument.class;
}
};
public CouchbaseWriter(CouchbaseEnvironment couchbaseEnvironment, Config config) {
List<String> hosts = ConfigUtils.getStringList(config, CouchbaseWriterConfigurationKeys.BOOTSTRAP_SERVERS);
boolean usesCertAuth = ConfigUtils.getBoolean(config, CouchbaseWriterConfigurationKeys.CERT_AUTH_ENABLED, false);
String password = ConfigUtils.getString(config, CouchbaseWriterConfigurationKeys.PASSWORD, "");
log.info("Using hosts hosts: {}", hosts.stream().collect(Collectors.joining(",")));
_documentTTL = ConfigUtils.getInt(config, CouchbaseWriterConfigurationKeys.DOCUMENT_TTL, 0);
_documentTTLTimeUnits =
ConfigUtils.getTimeUnit(config, CouchbaseWriterConfigurationKeys.DOCUMENT_TTL_UNIT, CouchbaseWriterConfigurationKeys.DOCUMENT_TTL_UNIT_DEFAULT);
_documentTTLOriginField =
ConfigUtils.getString(config, CouchbaseWriterConfigurationKeys.DOCUMENT_TTL_ORIGIN_FIELD, null);
_documentTTLOriginUnits =
ConfigUtils.getTimeUnit(config, CouchbaseWriterConfigurationKeys.DOCUMENT_TTL_ORIGIN_FIELD_UNITS,
CouchbaseWriterConfigurationKeys.DOCUMENT_TTL_ORIGIN_FIELD_UNITS_DEFAULT);
String bucketName = ConfigUtils.getString(config, CouchbaseWriterConfigurationKeys.BUCKET,
CouchbaseWriterConfigurationKeys.BUCKET_DEFAULT);
_cluster = CouchbaseCluster.create(couchbaseEnvironment, hosts);
if (usesCertAuth) {
_cluster.authenticate(CertAuthenticator.INSTANCE);
_bucket = _cluster.openBucket(bucketName, Collections.singletonList(_tupleDocumentTranscoder));
} else if (password.isEmpty()) {
_bucket = _cluster.openBucket(bucketName, Collections.singletonList(_tupleDocumentTranscoder));
} else {
_bucket = _cluster.openBucket(bucketName, password, Collections.singletonList(_tupleDocumentTranscoder));
}
_operationTimeout = ConfigUtils.getLong(config, CouchbaseWriterConfigurationKeys.OPERATION_TIMEOUT_MILLIS,
CouchbaseWriterConfigurationKeys.OPERATION_TIMEOUT_DEFAULT);
_operationTimeunit = TimeUnit.MILLISECONDS;
_defaultWriteResponseMapper = new GenericWriteResponseWrapper<>();
log.info("Couchbase writer configured with: hosts: {}, bucketName: {}, operationTimeoutInMillis: {}", hosts,
bucketName, _operationTimeout);
}
@VisibleForTesting
Bucket getBucket() {
return _bucket;
}
private void assertRecordWritable(D record) {
boolean recordIsTupleDocument = (record instanceof TupleDocument);
boolean recordIsJsonDocument = (record instanceof RawJsonDocument);
Preconditions.checkArgument(recordIsTupleDocument || recordIsJsonDocument,
"This writer only supports TupleDocument or RawJsonDocument. Found " + record.getClass().getName());
}
@Override
public Future<WriteResponse> write(final D record, final WriteCallback callback) {
assertRecordWritable(record);
if (record instanceof TupleDocument) {
((TupleDocument) record).content().value1().retain();
}
Observable<D> observable;
try {
observable = _bucket.async().upsert(setDocumentTTL(record));
} catch (DataRecordException e) {
throw new RuntimeException("Caught exception trying to set TTL of the document", e);
}
if (callback == null) {
return new WriteResponseFuture<>(
observable.timeout(_operationTimeout, _operationTimeunit).toBlocking().toFuture(),
_defaultWriteResponseMapper);
} else {
final AtomicBoolean callbackFired = new AtomicBoolean(false);
final BlockingQueue<Pair<WriteResponse, Throwable>> writeResponseQueue = new ArrayBlockingQueue<>(1);
final Future<WriteResponse> writeResponseFuture = new Future<WriteResponse>() {
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return false;
}
@Override
public boolean isCancelled() {
return false;
}
@Override
public boolean isDone() {
return callbackFired.get();
}
@Override
public WriteResponse get() throws InterruptedException, ExecutionException {
Pair<WriteResponse, Throwable> writeResponseThrowablePair = writeResponseQueue.take();
return getWriteResponseOrThrow(writeResponseThrowablePair);
}
@Override
public WriteResponse get(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
Pair<WriteResponse, Throwable> writeResponseThrowablePair = writeResponseQueue.poll(timeout, unit);
if (writeResponseThrowablePair == null) {
throw new TimeoutException("Timeout exceeded while waiting for future to be done");
} else {
return getWriteResponseOrThrow(writeResponseThrowablePair);
}
}
};
observable.timeout(_operationTimeout, _operationTimeunit).subscribe(new Subscriber<D>() {
@Override
public void onCompleted() {
}
@Override
public void onError(Throwable e) {
callbackFired.set(true);
writeResponseQueue.add(new Pair<WriteResponse, Throwable>(null, e));
callback.onFailure(e);
}
@Override
public void onNext(D doc) {
try {
callbackFired.set(true);
WriteResponse writeResponse = new GenericWriteResponse<D>(doc);
writeResponseQueue.add(new Pair<WriteResponse, Throwable>(writeResponse, null));
callback.onSuccess(writeResponse);
} finally {
if (doc instanceof TupleDocument) {
((TupleDocument) doc).content().value1().release();
}
}
}
});
return writeResponseFuture;
}
}
@Override
public void flush() throws IOException {
}
private WriteResponse getWriteResponseOrThrow(Pair<WriteResponse, Throwable> writeResponseThrowablePair)
throws ExecutionException {
if (writeResponseThrowablePair.getFirst() != null) {
return writeResponseThrowablePair.getFirst();
} else if (writeResponseThrowablePair.getSecond() != null) {
throw new ExecutionException(writeResponseThrowablePair.getSecond());
} else {
throw new ExecutionException(new RuntimeException("Could not find non-null WriteResponse pair"));
}
}
@Override
public void cleanup() throws IOException {
}
/**
* Returns a new document with 32 bit (int) timestamp expiration date for the document. Note this is a current limitation in couchbase.
* This approach should work for documents that do not expire until 2038. This should be enough headroom for couchbase
* to reimplement the design.
* Source: https://forums.couchbase.com/t/document-expiry-in-seconds-or-a-timestamp/6519/6
* @param record
* @return
*/
private D setDocumentTTL(D record) throws DataRecordException {
boolean recordIsTupleDocument = record instanceof TupleDocument;
boolean recordIsJsonDocument = record instanceof RawJsonDocument;
long ttlSpanSec = TimeUnit.SECONDS.convert(_documentTTL, _documentTTLTimeUnits);
long eventOriginSec = 0;
String dataJson = null;
if (_documentTTL == 0) {
return record;
} else if (_documentTTLOriginField != null && !_documentTTLOriginField.isEmpty()) {
if (recordIsTupleDocument) {
ByteBuf dataByteBuffer = ((Tuple2<ByteBuf, Integer>) record.content()).value1();
dataJson = new String(dataByteBuffer.array(), StandardCharsets.UTF_8);
} else {
dataJson = (String) record.content();
}
JsonElement jsonDataRootElement = new JsonParser().parse(dataJson);
if (!jsonDataRootElement.isJsonObject()) {
throw new DataRecordException(
String.format("Document TTL Field is set but the record's value is not a valid json object.: '%s'",
jsonDataRootElement.toString()));
}
JsonObject jsonDataRoot = jsonDataRootElement.getAsJsonObject();
long documentTTLOrigin = jsonDataRoot.get(_documentTTLOriginField).getAsLong();
eventOriginSec = TimeUnit.SECONDS.convert(documentTTLOrigin, _documentTTLOriginUnits);
} else {
eventOriginSec = System.currentTimeMillis() / 1000;
}
try {
int expiration = Math.toIntExact(ttlSpanSec + eventOriginSec);
if (recordIsTupleDocument) {
return (D) _tupleDocumentTranscoder.newDocument(record.id(), expiration,
(Tuple2<ByteBuf, Integer>) record.content(), record.cas(), record.mutationToken());
} else if (recordIsJsonDocument) {
return (D) RawJsonDocument.create(record.id(), expiration, (String) record.content(), record.cas(),
record.mutationToken());
} else {
throw new RuntimeException(" Only TupleDocument and RawJsonDocument documents are supported");
}
} catch (ArithmeticException e) {
throw new RuntimeException(
"There was an overflow calculating the expiry timestamp. couchbase currently only supports expiry until January 19, 2038 03:14:07 GMT",
e);
}
}
@Override
public WriteResponse write(D record) throws IOException {
try {
D doc = _bucket.upsert(setDocumentTTL(record));
Preconditions.checkNotNull(doc);
return new GenericWriteResponse(doc);
} catch (Exception e) {
throw new IOException("Failed to write to Couchbase cluster", e);
}
}
@Override
public void close() {
if (!_bucket.isClosed()) {
try {
_bucket.close();
} catch (Exception e) {
log.warn("Failed to close bucket", e);
}
}
try {
_cluster.disconnect();
} catch (Exception e) {
log.warn("Failed to disconnect from cluster", e);
}
}
}
| 3,550 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-couchbase/src/main/java/org/apache/gobblin/couchbase | Create_ds/gobblin/gobblin-modules/gobblin-couchbase/src/main/java/org/apache/gobblin/couchbase/writer/CouchbaseEnvironmentFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.couchbase.writer;
import com.couchbase.client.java.env.CouchbaseEnvironment;
import com.couchbase.client.java.env.DefaultCouchbaseEnvironment;
import com.typesafe.config.Config;
import org.apache.gobblin.util.ConfigUtils;
/**
* A factory to hand out {@link com.couchbase.client.java.env.CouchbaseEnvironment} instances
*/
public class CouchbaseEnvironmentFactory {
private static CouchbaseEnvironment couchbaseEnvironment = null;
/**
* Currently hands out a singleton DefaultCouchbaseEnvironment.
* This is done because it is recommended to use a single couchbase environment instance per JVM.
* TODO: Determine if we need to use the config to tweak certain parameters
* @param config
* @return
*/
public static synchronized CouchbaseEnvironment getInstance(Config config)
{
Boolean sslEnabled = ConfigUtils.getBoolean(config, CouchbaseWriterConfigurationKeys.SSL_ENABLED, false);
String sslKeystoreFile = ConfigUtils.getString(config, CouchbaseWriterConfigurationKeys.SSL_KEYSTORE_FILE, "");
String sslKeystorePassword = ConfigUtils.getString(config, CouchbaseWriterConfigurationKeys.SSL_KEYSTORE_PASSWORD, "");
String sslTruststoreFile = ConfigUtils.getString(config, CouchbaseWriterConfigurationKeys.SSL_TRUSTSTORE_FILE, "");
String sslTruststorePassword = ConfigUtils.getString(config, CouchbaseWriterConfigurationKeys.SSL_TRUSTSTORE_PASSWORD, "");
Boolean certAuthEnabled = ConfigUtils.getBoolean(config, CouchbaseWriterConfigurationKeys.CERT_AUTH_ENABLED, false);
Boolean dnsSrvEnabled = ConfigUtils.getBoolean(config, CouchbaseWriterConfigurationKeys.DNS_SRV_ENABLED, false);
Integer socketConnectTimeout = ConfigUtils.getInt(config, CouchbaseWriterConfigurationKeys.SOCKET_CONNECT_TIMEOUT,
DefaultCouchbaseEnvironment.SOCKET_CONNECT_TIMEOUT);
DefaultCouchbaseEnvironment.Builder builder = DefaultCouchbaseEnvironment.builder()
.sslEnabled(sslEnabled)
.sslKeystoreFile(sslKeystoreFile)
.sslKeystorePassword(sslKeystorePassword)
.sslTruststoreFile(sslTruststoreFile)
.sslTruststorePassword(sslTruststorePassword)
.certAuthEnabled(certAuthEnabled)
.dnsSrvEnabled(dnsSrvEnabled)
.socketConnectTimeout(socketConnectTimeout);
if (couchbaseEnvironment == null)
{
couchbaseEnvironment = builder.build();
}
return couchbaseEnvironment;
}
}
| 3,551 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-couchbase/src/main/java/org/apache/gobblin/couchbase | Create_ds/gobblin/gobblin-modules/gobblin-couchbase/src/main/java/org/apache/gobblin/couchbase/writer/CouchbaseWriterConfigurationKeys.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.couchbase.writer;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
public class CouchbaseWriterConfigurationKeys {
public static final String COUCHBASE_WRITER_PREFIX="writer.couchbase.";
private static String prefix(String value) { return COUCHBASE_WRITER_PREFIX + value;};
public static final String BOOTSTRAP_SERVERS= prefix("bootstrapServers");
public static final List<String> BOOTSTRAP_SERVERS_DEFAULT= Collections.singletonList("localhost");
public static final String BUCKET=prefix("bucket");
public static final String BUCKET_DEFAULT = "default";
public static final String PASSWORD = prefix("password");
public static final String SSL_ENABLED = prefix("sslEnabled");
public static final String SSL_KEYSTORE_FILE = prefix("sslKeystoreFile");
public static final String SSL_KEYSTORE_PASSWORD = prefix("sslKeystorePassword");
public static final String SSL_TRUSTSTORE_FILE = prefix("sslTruststoreFile");
public static final String SSL_TRUSTSTORE_PASSWORD = prefix("sslTruststorePassword");
public static final String CERT_AUTH_ENABLED = prefix("certAuthEnabled");
public static final String DNS_SRV_ENABLED = prefix("dnsSrvEnabled");
public static final String SOCKET_CONNECT_TIMEOUT = prefix("socketConnectTimeout");
public static final String DOCUMENT_TTL = prefix("documentTTL");
public static final String DOCUMENT_TTL_UNIT = prefix("documentTTLUnits");
public static final TimeUnit DOCUMENT_TTL_UNIT_DEFAULT = TimeUnit.SECONDS;
public static final String DOCUMENT_TTL_ORIGIN_FIELD = prefix("documentTTLOriginField");
public static final String DOCUMENT_TTL_ORIGIN_FIELD_UNITS = prefix("documentTTLOriginUnits");
public static final TimeUnit DOCUMENT_TTL_ORIGIN_FIELD_UNITS_DEFAULT = TimeUnit.MILLISECONDS;
public static final String OPERATION_TIMEOUT_MILLIS = prefix("operationTimeoutMillis");
public static final long OPERATION_TIMEOUT_DEFAULT = 10000; // 10 second default timeout
public static final String RETRIES_ENABLED = prefix("retriesEnabled");
public static final boolean RETRIES_ENABLED_DEFAULT = false;
public static final String MAX_RETRIES = prefix("maxRetries");
public static final int MAX_RETRIES_DEFAULT = 5;
static final String FAILURE_ALLOWANCE_PCT_CONFIG = prefix("failureAllowancePercentage");
static final double FAILURE_ALLOWANCE_PCT_DEFAULT = 0.0;
}
| 3,552 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-couchbase/src/main/java/org/apache/gobblin/couchbase | Create_ds/gobblin/gobblin-modules/gobblin-couchbase/src/main/java/org/apache/gobblin/couchbase/writer/CouchbaseWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.couchbase.writer;
import com.couchbase.client.java.env.CouchbaseEnvironment;
import com.google.common.base.Preconditions;
import com.typesafe.config.Config;
import java.io.IOException;
import java.util.Properties;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.writer.AsyncDataWriter;
import org.apache.gobblin.writer.AsyncWriterManager;
import org.apache.gobblin.writer.DataWriter;
import org.apache.gobblin.writer.DataWriterBuilder;
import org.apache.log4j.Logger;
public class CouchbaseWriterBuilder extends DataWriterBuilder {
private static final Logger LOG = Logger.getLogger(CouchbaseWriterBuilder.class);
public DataWriter build(Config config) throws IOException {
Preconditions.checkArgument(config != null, "Config cannot be null");
config.entrySet().stream().forEach(x -> String.format("Config passed to factory builder '%s':'%s'", x.getKey(), x.getValue().toString()));
CouchbaseEnvironment couchbaseEnvironment = CouchbaseEnvironmentFactory.getInstance(config);
//TODO: Read config to decide whether to build a blocking writer or an async writer
double failureAllowance =
ConfigUtils.getDouble(config, CouchbaseWriterConfigurationKeys.FAILURE_ALLOWANCE_PCT_CONFIG,
CouchbaseWriterConfigurationKeys.FAILURE_ALLOWANCE_PCT_DEFAULT) / 100.0;
boolean retriesEnabled = ConfigUtils.getBoolean(config, CouchbaseWriterConfigurationKeys.RETRIES_ENABLED,
CouchbaseWriterConfigurationKeys.RETRIES_ENABLED_DEFAULT);
int maxRetries = ConfigUtils.getInt(config, CouchbaseWriterConfigurationKeys.MAX_RETRIES,
CouchbaseWriterConfigurationKeys.MAX_RETRIES_DEFAULT);
// build an async couchbase writer
AsyncDataWriter couchbaseWriter = new CouchbaseWriter(couchbaseEnvironment, config);
return AsyncWriterManager.builder()
.asyncDataWriter(couchbaseWriter)
.failureAllowanceRatio(failureAllowance)
.retriesEnabled(retriesEnabled)
.numRetries(maxRetries)
.config(config)
.build();
}
@Override
public DataWriter build() throws IOException {
State state = this.destination.getProperties();
Properties taskProps = state.getProperties();
Config config = ConfigUtils.propertiesToConfig(taskProps);
return build(config);
}
}
| 3,553 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-metrics-hadoop/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-metrics-hadoop/src/test/java/org/apache/gobblin/metrics/hadoop/NewAPIHadoopCounterReporterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.hadoop;
import java.util.SortedMap;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import org.mockito.Mockito;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.codahale.metrics.ExponentiallyDecayingReservoir;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Meter;
import com.codahale.metrics.MetricFilter;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import com.google.common.collect.ImmutableSortedMap;
import org.apache.gobblin.metrics.Measurements;
import org.apache.gobblin.metrics.MetricContext;
import static org.apache.gobblin.metrics.test.TestConstants.*;
/**
* Unit tests for {@link NewAPIHadoopCounterReporter}.
*
* @author Yinan Li
*/
@Test(groups = {"gobblin.metrics.hadoop"})
public class NewAPIHadoopCounterReporterTest {
private NewAPIHadoopCounterReporter<Object, Object, Object, Object> hadoopCounterReporter;
private Counter recordsProcessedCount;
private Counter recordProcessRateCount;
private Counter recordSizeDistributionCount;
private Counter totalDurationCount;
private Counter queueSize;
private String name = CONTEXT_NAME + "_" + UUID.randomUUID().toString();
@BeforeClass
@SuppressWarnings("unchecked")
public void setUp() {
TaskInputOutputContext<Object, Object, Object, Object> mockContext = Mockito.mock(TaskInputOutputContext.class);
this.recordsProcessedCount = Mockito.mock(Counter.class);
Mockito.when(mockContext.getCounter(
this.name, MetricRegistry.name(RECORDS_PROCESSED, Measurements.COUNT.getName())))
.thenReturn(this.recordsProcessedCount);
this.recordProcessRateCount = Mockito.mock(Counter.class);
Mockito.when(mockContext.getCounter(
this.name, MetricRegistry.name(RECORD_PROCESS_RATE, Measurements.COUNT.getName())))
.thenReturn(this.recordProcessRateCount);
this.recordSizeDistributionCount = Mockito.mock(Counter.class);
Mockito.when(mockContext.getCounter(
this.name, MetricRegistry.name(RECORD_SIZE_DISTRIBUTION, Measurements.COUNT.getName())))
.thenReturn(this.recordSizeDistributionCount);
this.totalDurationCount = Mockito.mock(Counter.class);
Mockito.when(mockContext.getCounter(
this.name, MetricRegistry.name(TOTAL_DURATION, Measurements.COUNT.getName())))
.thenReturn(this.totalDurationCount);
this.queueSize = Mockito.mock(Counter.class);
Mockito.when(mockContext.getCounter(this.name, QUEUE_SIZE)).thenReturn(this.queueSize);
this.hadoopCounterReporter = NewAPIHadoopCounterReporter.builder(mockContext)
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.SECONDS)
.filter(MetricFilter.ALL)
.build(MetricContext.builder(this.name).build());
}
@Test
public void testReportMetrics() {
Gauge<Integer> queueSizeGauge = new Gauge<Integer>() {
@Override
public Integer getValue() {
return 1000;
}
};
com.codahale.metrics.Counter recordsProcessedCounter = new com.codahale.metrics.Counter();
recordsProcessedCounter.inc(10l);
Histogram recordSizeDistributionHistogram = new Histogram(new ExponentiallyDecayingReservoir());
recordSizeDistributionHistogram.update(1);
recordSizeDistributionHistogram.update(2);
recordSizeDistributionHistogram.update(3);
Meter recordProcessRateMeter = new Meter();
recordProcessRateMeter.mark(1l);
recordProcessRateMeter.mark(2l);
recordProcessRateMeter.mark(3l);
Timer totalDurationTimer = new Timer();
totalDurationTimer.update(1, TimeUnit.SECONDS);
totalDurationTimer.update(2, TimeUnit.SECONDS);
totalDurationTimer.update(3, TimeUnit.SECONDS);
SortedMap<String, com.codahale.metrics.Counter> counters =
ImmutableSortedMap.<String, com.codahale.metrics.Counter>naturalOrder()
.put(RECORDS_PROCESSED, recordsProcessedCounter).build();
SortedMap<String, Gauge> gauges = ImmutableSortedMap.<String, Gauge>naturalOrder()
.put(QUEUE_SIZE, queueSizeGauge).build();
SortedMap<String, Histogram> histograms = ImmutableSortedMap.<String, Histogram>naturalOrder()
.put(RECORD_SIZE_DISTRIBUTION, recordSizeDistributionHistogram).build();
SortedMap<String, Meter> meters = ImmutableSortedMap.<String, Meter>naturalOrder()
.put(RECORD_PROCESS_RATE, recordProcessRateMeter).build();
SortedMap<String, Timer> timers = ImmutableSortedMap.<String, Timer>naturalOrder()
.put(TOTAL_DURATION, totalDurationTimer).build();
this.hadoopCounterReporter.report(gauges, counters, histograms, meters, timers);
Mockito.verify(this.recordsProcessedCount).increment(10l);
Mockito.verify(this.recordProcessRateCount).increment(6l);
Mockito.verify(this.recordSizeDistributionCount).increment(3l);
Mockito.verify(this.totalDurationCount).increment(3l);
Mockito.verify(this.queueSize).setValue(1000);
recordsProcessedCounter.inc(5l);
recordSizeDistributionHistogram.update(4);
recordProcessRateMeter.mark(4l);
totalDurationTimer.update(4, TimeUnit.SECONDS);
this.hadoopCounterReporter.report(gauges, counters, histograms, meters, timers);
Mockito.verify(this.recordsProcessedCount).increment(5l);
Mockito.verify(this.recordProcessRateCount).increment(4l);
Mockito.verify(this.recordSizeDistributionCount).increment(1l);
Mockito.verify(this.totalDurationCount).increment(1l);
}
@AfterClass
public void tearDown() {
if (this.hadoopCounterReporter != null) {
this.hadoopCounterReporter.close();
}
}
}
| 3,554 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-metrics-hadoop/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-metrics-hadoop/src/test/java/org/apache/gobblin/metrics/hadoop/HadoopCounterReporterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.hadoop;
import java.util.SortedMap;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.mapred.Counters;
import org.apache.hadoop.mapred.Reporter;
import org.mockito.Mockito;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.codahale.metrics.Counter;
import com.codahale.metrics.ExponentiallyDecayingReservoir;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Meter;
import com.codahale.metrics.MetricFilter;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import com.google.common.collect.ImmutableSortedMap;
import org.apache.gobblin.metrics.Measurements;
import org.apache.gobblin.metrics.MetricContext;
import static org.apache.gobblin.metrics.test.TestConstants.*;
/**
* Unit tests for {@link HadoopCounterReporter}.
*
* @author Yinan Li
*/
@Test(groups = {"gobblin.metrics.hadoop"})
public class HadoopCounterReporterTest {
private HadoopCounterReporter hadoopCounterReporter;
private Counters.Counter recordsProcessedCount;
private Counters.Counter recordProcessRateCount;
private Counters.Counter recordSizeDistributionCount;
private Counters.Counter totalDurationCount;
private Counters.Counter queueSize;
@BeforeClass
public void setUp() throws Exception {
String contextName = CONTEXT_NAME + "_" + UUID.randomUUID().toString();
Reporter mockedReporter = Mockito.mock(Reporter.class);
this.recordsProcessedCount = Mockito.mock(Counters.Counter.class);
Mockito.when(mockedReporter.getCounter(
contextName, MetricRegistry.name(RECORDS_PROCESSED, Measurements.COUNT.getName())))
.thenReturn(this.recordsProcessedCount);
this.recordProcessRateCount = Mockito.mock(Counters.Counter.class);
Mockito.when(mockedReporter.getCounter(
contextName, MetricRegistry.name(RECORD_PROCESS_RATE, Measurements.COUNT.getName())))
.thenReturn(this.recordProcessRateCount);
this.recordSizeDistributionCount = Mockito.mock(Counters.Counter.class);
Mockito.when(mockedReporter.getCounter(
contextName, MetricRegistry.name(RECORD_SIZE_DISTRIBUTION, Measurements.COUNT.getName())))
.thenReturn(this.recordSizeDistributionCount);
this.totalDurationCount = Mockito.mock(Counters.Counter.class);
Mockito.when(mockedReporter.getCounter(
contextName, MetricRegistry.name(TOTAL_DURATION, Measurements.COUNT.getName())))
.thenReturn(this.totalDurationCount);
this.queueSize = Mockito.mock(Counters.Counter.class);
Mockito.when(mockedReporter.getCounter(contextName, QUEUE_SIZE)).thenReturn(this.queueSize);
this.hadoopCounterReporter = HadoopCounterReporter.builder(mockedReporter)
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.SECONDS)
.filter(MetricFilter.ALL)
.build(MetricContext.builder(contextName).buildStrict());
}
@Test
public void testReportMetrics() {
Gauge<Integer> queueSizeGauge = new Gauge<Integer>() {
@Override
public Integer getValue() {
return 1000;
}
};
Counter recordsProcessedCounter = new Counter();
recordsProcessedCounter.inc(10l);
Histogram recordSizeDistributionHistogram = new Histogram(new ExponentiallyDecayingReservoir());
recordSizeDistributionHistogram.update(1);
recordSizeDistributionHistogram.update(2);
recordSizeDistributionHistogram.update(3);
Meter recordProcessRateMeter = new Meter();
recordProcessRateMeter.mark(1l);
recordProcessRateMeter.mark(2l);
recordProcessRateMeter.mark(3l);
Timer totalDurationTimer = new Timer();
totalDurationTimer.update(1, TimeUnit.SECONDS);
totalDurationTimer.update(2, TimeUnit.SECONDS);
totalDurationTimer.update(3, TimeUnit.SECONDS);
SortedMap<String, Counter> counters = ImmutableSortedMap.<String, Counter>naturalOrder()
.put(RECORDS_PROCESSED, recordsProcessedCounter).build();
SortedMap<String, Gauge> gauges = ImmutableSortedMap.<String, Gauge>naturalOrder()
.put(QUEUE_SIZE, queueSizeGauge).build();
SortedMap<String, Histogram> histograms = ImmutableSortedMap.<String, Histogram>naturalOrder()
.put(RECORD_SIZE_DISTRIBUTION, recordSizeDistributionHistogram).build();
SortedMap<String, Meter> meters = ImmutableSortedMap.<String, Meter>naturalOrder()
.put(RECORD_PROCESS_RATE, recordProcessRateMeter).build();
SortedMap<String, Timer> timers = ImmutableSortedMap.<String, Timer>naturalOrder()
.put(TOTAL_DURATION, totalDurationTimer).build();
this.hadoopCounterReporter.report(gauges, counters, histograms, meters, timers);
Mockito.verify(this.recordsProcessedCount).increment(10l);
Mockito.verify(this.recordProcessRateCount).increment(6l);
Mockito.verify(this.recordSizeDistributionCount).increment(3l);
Mockito.verify(this.totalDurationCount).increment(3l);
Mockito.verify(this.queueSize).setValue(1000);
recordsProcessedCounter.inc(5l);
recordSizeDistributionHistogram.update(4);
recordProcessRateMeter.mark(4l);
totalDurationTimer.update(4, TimeUnit.SECONDS);
this.hadoopCounterReporter.report(gauges, counters, histograms, meters, timers);
Mockito.verify(this.recordsProcessedCount).increment(5l);
Mockito.verify(this.recordProcessRateCount).increment(4l);
Mockito.verify(this.recordSizeDistributionCount).increment(1l);
Mockito.verify(this.totalDurationCount).increment(1l);
}
@AfterClass
public void tearDown() {
if (this.hadoopCounterReporter != null) {
this.hadoopCounterReporter.close();
}
}
}
| 3,555 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-metrics-hadoop/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-metrics-hadoop/src/main/java/org/apache/gobblin/metrics/hadoop/NewAPIHadoopCounterReporter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.hadoop;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import com.codahale.metrics.MetricFilter;
import org.apache.gobblin.metrics.reporter.ContextAwareScheduledReporter;
import org.apache.gobblin.metrics.MetricContext;
/**
* An implementation of {@link org.apache.gobblin.metrics.reporter.ContextAwareScheduledReporter} that reports applicable
* metrics as Hadoop counters using a {@link org.apache.hadoop.mapreduce.TaskInputOutputContext}.
*
* @param <KI> the input key type of {@code hadoopContext}
* @param <VI> the input value type of {@code hadoopContext}
* @param <KO> the output key type of {@code hadoopContext}
* @param <VO> the output value type of {@code hadoopContext}
*
* @author Yinan Li
*/
public class NewAPIHadoopCounterReporter<KI, VI, KO, VO> extends AbstractHadoopCounterReporter {
private final TaskInputOutputContext<KI, VI, KO, VO> hadoopContext;
protected NewAPIHadoopCounterReporter(MetricContext context, String name, MetricFilter filter,
TimeUnit rateUnit, TimeUnit durationUnit, TaskInputOutputContext<KI, VI, KO, VO> hadoopContext) {
super(context, name, filter, rateUnit, durationUnit);
this.hadoopContext = hadoopContext;
}
@Override
protected void reportIncremental(MetricContext context, String name, long incremental) {
this.hadoopContext.getCounter(context.getName(), name).increment(incremental);
}
@Override
protected void reportValue(MetricContext context, String name, long value) {
this.hadoopContext.getCounter(context.getName(), name).setValue(value);
}
/**
* Create a new {@link org.apache.gobblin.metrics.hadoop.NewAPIHadoopCounterReporter.Builder}
* that uses the simple name of {@link NewAPIHadoopCounterReporter} as the reporter name.
*
* @param hadoopContext a {@link org.apache.hadoop.mapreduce.TaskInputOutputContext}
* used to access Hadoop counters
* @param <KI> the input key type of {@code hadoopContext}
* @param <VI> the input value type of {@code hadoopContext}
* @param <KO> the output key type of {@code hadoopContext}
* @param <VO> the output value type of {@code hadoopContext}
* @return a new {@link org.apache.gobblin.metrics.hadoop.NewAPIHadoopCounterReporter.Builder}
*/
public static <KI, VI, KO, VO> Builder<KI, VI, KO, VO> builder(
TaskInputOutputContext<KI, VI, KO, VO> hadoopContext) {
return builder(NewAPIHadoopCounterReporter.class.getName(), hadoopContext);
}
/**
* Create a new {@link org.apache.gobblin.metrics.hadoop.NewAPIHadoopCounterReporter.Builder}
* that uses a given reporter name.
*
* @param name the given reporter name
* @param hadoopContext a {@link org.apache.hadoop.mapreduce.TaskInputOutputContext}
* used to access Hadoop counters
* @param <KI> the input key type of {@code hadoopContext}
* @param <VI> the input value type of {@code hadoopContext}
* @param <KO> the output key type of {@code hadoopContext}
* @param <VO> the output value type of {@code hadoopContext}
* @return a new {@link org.apache.gobblin.metrics.hadoop.NewAPIHadoopCounterReporter.Builder}
*/
public static <KI, VI, KO, VO> Builder<KI, VI, KO, VO> builder(String name,
TaskInputOutputContext<KI, VI, KO, VO> hadoopContext) {
return new Builder<KI, VI, KO, VO>(name, hadoopContext);
}
/**
* A builder class for {@link NewAPIHadoopCounterReporter}.
*
* @param <KI> the input key type of {@code hadoopContext}
* @param <VI> the input value type of {@code hadoopContext}
* @param <KO> the output key type of {@code hadoopContext}
* @param <VO> the output value type of {@code hadoopContext}
*/
public static class Builder<KI, VI, KO, VO> extends ContextAwareScheduledReporter.Builder<
NewAPIHadoopCounterReporter<KI, VI, KO, VO>, Builder<KI, VI, KO, VO>> {
private final TaskInputOutputContext<KI, VI, KO, VO> hadoopContext;
public Builder(String name, TaskInputOutputContext<KI, VI, KO, VO> hadoopContext) {
super(name);
this.hadoopContext = hadoopContext;
}
@Override
public NewAPIHadoopCounterReporter<KI, VI, KO, VO> build(MetricContext context) {
return new NewAPIHadoopCounterReporter<KI, VI, KO, VO>(
context, this.name, this.filter, this.rateUnit, this.durationUnit, this.hadoopContext);
}
}
}
| 3,556 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-metrics-hadoop/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-metrics-hadoop/src/main/java/org/apache/gobblin/metrics/hadoop/AbstractHadoopCounterReporter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.hadoop;
import java.util.Map;
import java.util.SortedMap;
import java.util.concurrent.TimeUnit;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Meter;
import com.codahale.metrics.MetricFilter;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import com.google.common.collect.Maps;
import org.apache.gobblin.metrics.reporter.ContextAwareScheduledReporter;
import org.apache.gobblin.metrics.Measurements;
import org.apache.gobblin.metrics.MetricContext;
/**
* An extension to {@link ContextAwareScheduledReporter} that serves as the basis
* for implementations that report applicable metrics through Hadoop counters.
*
* @author Yinan Li
*/
public abstract class AbstractHadoopCounterReporter extends ContextAwareScheduledReporter {
private final Map<String, Long> previousCounts = Maps.newHashMap();
protected AbstractHadoopCounterReporter(MetricContext context, String name, MetricFilter filter,
TimeUnit rateUnit, TimeUnit durationUnit) {
super(context, name, filter, rateUnit, durationUnit);
}
@Override
protected void reportInContext(MetricContext context,
SortedMap<String, Gauge> gauges,
SortedMap<String, Counter> counters,
SortedMap<String, Histogram> histograms,
SortedMap<String, Meter> meters,
SortedMap<String, Timer> timers) {
for (Map.Entry<String, Gauge> entry : gauges.entrySet()) {
Gauge gauge = entry.getValue();
if (gauge.getValue() instanceof Long ||
gauge.getValue() instanceof Integer ||
gauge.getValue() instanceof Short ||
gauge.getValue() instanceof Byte)
reportValue(context, entry.getKey(), ((Number) gauge.getValue()).longValue());
}
for (Map.Entry<String, Counter> entry : counters.entrySet()) {
reportCount(context, entry.getKey(), entry.getValue().getCount());
}
for (Map.Entry<String, Meter> entry : meters.entrySet()) {
reportCount(context, entry.getKey(), entry.getValue().getCount());
}
for (Map.Entry<String, Histogram> entry : histograms.entrySet()) {
reportCount(context, entry.getKey(), entry.getValue().getCount());
}
for (Map.Entry<String, Timer> entry : timers.entrySet()) {
reportCount(context, entry.getKey(), entry.getValue().getCount());
}
}
/**
* Report a given incremental value of a metric.
*
* @param context the {@link MetricContext} this is associated to
* @param name metric name
* @param incremental the given incremental value
*/
protected abstract void reportIncremental(MetricContext context, String name, long incremental);
/**
* Report a given value of a metric.
*
* @param context the {@link MetricContext} this is associated to
* @param name metric name
* @param value the given value
*/
protected abstract void reportValue(MetricContext context, String name, long value);
private void reportCount(MetricContext context, String name, long currentCount) {
reportIncremental(context, MetricRegistry.name(name, Measurements.COUNT.getName()),
calculateIncremental(name, currentCount));
// Remember the current count
this.previousCounts.put(name, currentCount);
}
private long calculateIncremental(String name, long currentCount) {
if (this.previousCounts.containsKey(name)) {
return currentCount - this.previousCounts.get(name);
}
return currentCount;
}
}
| 3,557 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-metrics-hadoop/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-metrics-hadoop/src/main/java/org/apache/gobblin/metrics/hadoop/HadoopCounterReporter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.hadoop;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.mapred.Reporter;
import com.codahale.metrics.MetricFilter;
import org.apache.gobblin.metrics.reporter.ContextAwareScheduledReporter;
import org.apache.gobblin.metrics.MetricContext;
/**
* An implementation of {@link org.apache.gobblin.metrics.reporter.ContextAwareScheduledReporter} that reports
* applicable metrics as Hadoop counters using a {@link org.apache.hadoop.mapred.Reporter}.
*
* @author Yinan Li
*/
public class HadoopCounterReporter extends AbstractHadoopCounterReporter {
private final Reporter reporter;
protected HadoopCounterReporter(MetricContext context, String name, MetricFilter filter,
TimeUnit rateUnit, TimeUnit durationUnit, Reporter reporter) {
super(context, name, filter, rateUnit, durationUnit);
this.reporter = reporter;
}
@Override
protected void reportIncremental(MetricContext context, String name, long incremental) {
this.reporter.getCounter(context.getName(), name).increment(incremental);
}
@Override
protected void reportValue(MetricContext context, String name, long value) {
this.reporter.getCounter(context.getName(), name).setValue(value);
}
/**
* Create a new {@link org.apache.gobblin.metrics.hadoop.HadoopCounterReporter.Builder} that
* uses the simple name of {@link HadoopCounterReporter} as the reporter name.
*
* @param reporter a {@link org.apache.hadoop.mapred.Reporter} used to access Hadoop counters
* @return a new {@link org.apache.gobblin.metrics.hadoop.HadoopCounterReporter.Builder}
*/
public static Builder builder(Reporter reporter) {
return builder(HadoopCounterReporter.class.getName(), reporter);
}
/**
* Create a new {@link org.apache.gobblin.metrics.hadoop.HadoopCounterReporter.Builder} that
* uses a given reporter name.
*
* @param name the given reporter name
* @param reporter a {@link org.apache.hadoop.mapred.Reporter} used to access Hadoop counters
* @return a new {@link org.apache.gobblin.metrics.hadoop.HadoopCounterReporter.Builder}
*/
public static Builder builder(String name, Reporter reporter) {
return new Builder(name, reporter);
}
/**
* A builder class for {@link HadoopCounterReporter}.
*/
public static class Builder extends ContextAwareScheduledReporter.Builder<HadoopCounterReporter, Builder> {
private final Reporter reporter;
public Builder(String name, Reporter reporter) {
super(name);
this.reporter = reporter;
}
@Override
public HadoopCounterReporter build(MetricContext context) {
return new HadoopCounterReporter(
context, this.name, this.filter, this.rateUnit, this.durationUnit, this.reporter);
}
}
}
| 3,558 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-zuora/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-zuora/src/main/java/org/apache/gobblin/zuora/ZuoraExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.zuora;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.StringUtils;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.exception.HighWatermarkException;
import org.apache.gobblin.source.extractor.exception.RecordCountException;
import org.apache.gobblin.source.extractor.exception.SchemaException;
import org.apache.gobblin.source.extractor.extract.Command;
import org.apache.gobblin.source.extractor.extract.CommandOutput;
import org.apache.gobblin.source.extractor.extract.QueryBasedExtractor;
import org.apache.gobblin.source.extractor.extract.restapi.RestApiCommand;
import org.apache.gobblin.source.extractor.schema.Schema;
import org.apache.gobblin.source.extractor.utils.Utils;
import org.apache.gobblin.source.extractor.watermark.Predicate;
import org.apache.gobblin.source.extractor.watermark.WatermarkType;
import org.apache.gobblin.source.workunit.WorkUnit;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.gson.Gson;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
@Alpha
@Slf4j
public class ZuoraExtractor extends QueryBasedExtractor<JsonArray, JsonElement> {
private static final Gson GSON = new Gson();
private static final String TIMESTAMP_FORMAT = "yyyy-MM-dd'T'HH:mm:ss";
private static final String DATE_FORMAT = "yyyy-MM-dd";
private static final String HOUR_FORMAT = "HH";
private final ZuoraClient _client;
private ZuoraClientFilesStreamer _fileStreamer;
private List<String> _fileIds;
private List<String> _header;
public ZuoraExtractor(WorkUnitState workUnitState) {
super(workUnitState);
_client = new ZuoraClientImpl(workUnitState);
}
@Override
public Iterator<JsonElement> getRecordSet(String schema, String entity, WorkUnit workUnit,
List<Predicate> predicateList)
throws DataRecordException, IOException {
if (_fileStreamer == null || _fileStreamer.isJobFailed()) {
_fileStreamer = new ZuoraClientFilesStreamer(workUnitState, _client);
}
if (_fileIds == null) {
List<Command> cmds = _client.buildPostCommand(predicateList);
CommandOutput<RestApiCommand, String> postResponse = _client.executePostRequest(cmds.get(0));
String jobId = ZuoraClientImpl.getJobId(postResponse);
_fileIds = _client.getFileIds(jobId);
}
if (!_fileStreamer.isJobFinished()) {
return _fileStreamer.streamFiles(_fileIds, _header).iterator();
}
return null;
}
@Override
protected boolean isInitialPull() {
return _fileIds == null || _fileStreamer.isJobFailed();
}
@Override
public void extractMetadata(String schema, String entity, WorkUnit workUnit)
throws SchemaException, IOException {
String deltaFields = workUnit.getProp(ConfigurationKeys.EXTRACT_DELTA_FIELDS_KEY);
String primaryKeyColumn = workUnit.getProp(ConfigurationKeys.EXTRACT_PRIMARY_KEY_FIELDS_KEY);
JsonArray columnArray = new JsonArray();
_header = new ArrayList<>();
try {
JsonArray array =
GSON.fromJson(workUnit.getProp(ConfigurationKeys.SOURCE_SCHEMA), JsonArray.class).getAsJsonArray();
for (JsonElement columnElement : array) {
Schema obj = GSON.fromJson(columnElement, Schema.class);
String columnName = obj.getColumnName();
_header.add(columnName);
boolean isWaterMarkColumn = isWatermarkColumn(deltaFields, columnName);
if (isWaterMarkColumn) {
obj.setWaterMark(true);
obj.setNullable(false);
}
int primarykeyIndex = getPrimarykeyIndex(primaryKeyColumn, columnName);
obj.setPrimaryKey(primarykeyIndex);
boolean isPrimaryKeyColumn = primarykeyIndex > 0;
if (isPrimaryKeyColumn) {
obj.setNullable(false);
}
String jsonStr = GSON.toJson(obj);
JsonObject jsonObject = GSON.fromJson(jsonStr, JsonObject.class).getAsJsonObject();
columnArray.add(jsonObject);
}
log.info("Update Schema is:" + columnArray);
setOutputSchema(columnArray);
} catch (Exception e) {
throw new SchemaException("Failed to get schema using rest api; error - " + e.getMessage(), e);
}
}
@Override
public long getMaxWatermark(String schema, String entity, String watermarkColumn,
List<Predicate> snapshotPredicateList, String watermarkSourceFormat)
throws HighWatermarkException {
throw new HighWatermarkException(
"GetMaxWatermark with query is not supported! Please set source.querybased.skip.high.watermark.calc to true.");
}
@Override
public long getSourceCount(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList)
throws RecordCountException {
// Set source.querybased.skip.count.calc to true will set SourceCount to -1. However, ...
// This ExpectedRecordCount will determine tablesWithNoUpdatesOnPreviousRun in QueryBasedSource.
// We need to return a positive number to bypass this check and move Low watermark forward.
return 1;
}
@Override
public String getWatermarkSourceFormat(WatermarkType watermarkType) {
switch (watermarkType) {
case TIMESTAMP:
return TIMESTAMP_FORMAT;
case DATE:
return DATE_FORMAT;
case HOUR:
return HOUR_FORMAT;
default:
throw new RuntimeException("Watermark type " + watermarkType.toString() + " is not supported");
}
}
@Override
public String getHourPredicateCondition(String column, long value, String valueFormat, String operator) {
String hourPredicate = String
.format("%s %s '%s'", column, operator, Utils.toDateTimeFormat(Long.toString(value), valueFormat, HOUR_FORMAT));
log.info("Hour predicate is: " + hourPredicate);
return hourPredicate;
}
@Override
public String getDatePredicateCondition(String column, long value, String valueFormat, String operator) {
String datePredicate = String
.format("%s %s '%s'", column, operator, Utils.toDateTimeFormat(Long.toString(value), valueFormat, DATE_FORMAT));
log.info("Date predicate is: " + datePredicate);
return datePredicate;
}
@Override
public String getTimestampPredicateCondition(String column, long value, String valueFormat, String operator) {
String timeStampPredicate = String.format("%s %s '%s'", column, operator,
Utils.toDateTimeFormat(Long.toString(value), valueFormat, TIMESTAMP_FORMAT));
log.info("Timestamp predicate is: " + timeStampPredicate);
return timeStampPredicate;
}
@Override
public Map<String, String> getDataTypeMap() {
Map<String, String> dataTypeMap =
ImmutableMap.<String, String>builder().put("date", "date").put("datetime", "timestamp").put("time", "time")
.put("string", "string").put("int", "int").put("long", "long").put("float", "float").put("double", "double")
.put("decimal", "double").put("varchar", "string").put("boolean", "boolean").build();
return dataTypeMap;
}
List<String> extractHeader(ArrayList<String> firstLine) {
List<String> header = ZuoraUtil.getHeader(firstLine);
if (StringUtils.isBlank(workUnitState.getProp(ConfigurationKeys.SOURCE_SCHEMA))) {
List<String> timeStampColumns = Lists.newArrayList();
String timeStampColumnString = workUnitState.getProp(ZuoraConfigurationKeys.ZUORA_TIMESTAMP_COLUMNS);
if (StringUtils.isNotBlank(timeStampColumnString)) {
timeStampColumns = Arrays.asList(timeStampColumnString.toLowerCase().replaceAll(" ", "").split(","));
}
setSchema(header, timeStampColumns);
}
log.info("Record header: " + header);
return header;
}
private void setSchema(List<String> cols, List<String> timestampColumns) {
JsonArray columnArray = new JsonArray();
for (String columnName : cols) {
Schema obj = new Schema();
obj.setColumnName(columnName);
obj.setComment("resolved");
obj.setWaterMark(isWatermarkColumn(workUnit.getProp(ConfigurationKeys.EXTRACT_DELTA_FIELDS_KEY), columnName));
if (isWatermarkColumn(workUnit.getProp(ConfigurationKeys.EXTRACT_DELTA_FIELDS_KEY), columnName)) {
obj.setNullable(false);
obj.setDataType(convertDataType(columnName, "timestamp", null, null));
} else if (getPrimarykeyIndex(workUnit.getProp(ConfigurationKeys.EXTRACT_PRIMARY_KEY_FIELDS_KEY), columnName)
== 0) {
// set all columns as nullable except primary key and watermark columns
obj.setNullable(true);
}
if (timestampColumns != null && timestampColumns.contains(columnName.toLowerCase())) {
obj.setDataType(convertDataType(columnName, "timestamp", null, null));
}
obj.setPrimaryKey(
getPrimarykeyIndex(workUnit.getProp(ConfigurationKeys.EXTRACT_PRIMARY_KEY_FIELDS_KEY), columnName));
String jsonStr = GSON.toJson(obj);
JsonObject jsonObject = GSON.fromJson(jsonStr, JsonObject.class).getAsJsonObject();
columnArray.add(jsonObject);
}
log.info("Resolved Schema: " + columnArray);
this.setOutputSchema(columnArray);
}
@Override
public void closeConnection()
throws Exception {
}
@Override
public Iterator<JsonElement> getRecordSetFromSourceApi(String schema, String entity, WorkUnit workUnit,
List<Predicate> predicateList)
throws IOException {
throw new RuntimeException("Not supported");
}
@Override
public void setTimeOut(int timeOut) {
// Ignore for now
}
}
| 3,559 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-zuora/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-zuora/src/main/java/org/apache/gobblin/zuora/ZuoraQuery.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.zuora;
import java.io.Serializable;
import org.apache.gobblin.annotation.Alpha;
import com.google.common.base.Strings;
@Alpha
public class ZuoraQuery implements Serializable {
private static final long serialVersionUID = 1L;
public String name;
public String query;
public String type = "zoqlexport";
//Check the documentation here:
//https://knowledgecenter.zuora.com/DC_Developers/T_Aggregate_Query_API/BA_Stateless_and_Stateful_Modes
public ZuoraDeletedColumn deleted = null;
ZuoraQuery(String name, String query, String deleteColumn) {
super();
this.name = name;
this.query = query;
if (!Strings.isNullOrEmpty(deleteColumn)) {
deleted = new ZuoraDeletedColumn(deleteColumn);
}
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getQuery() {
return query;
}
public void setQuery(String query) {
this.query = query;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
}
| 3,560 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-zuora/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-zuora/src/main/java/org/apache/gobblin/zuora/ZuoraClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.zuora;
import java.io.IOException;
import java.util.List;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.extract.Command;
import org.apache.gobblin.source.extractor.extract.CommandOutput;
import org.apache.gobblin.source.extractor.extract.restapi.RestApiCommand;
import org.apache.gobblin.source.extractor.watermark.Predicate;
@Alpha
public interface ZuoraClient {
List<Command> buildPostCommand(List<Predicate> predicateList);
CommandOutput<RestApiCommand, String> executePostRequest(final Command command)
throws DataRecordException;
List<String> getFileIds(final String jobId)
throws DataRecordException, IOException;
CommandOutput<RestApiCommand, String> executeGetRequest(final Command cmd)
throws Exception;
String getEndPoint(String relativeUrl);
}
| 3,561 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-zuora/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-zuora/src/main/java/org/apache/gobblin/zuora/ZuoraConfigurationKeys.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.zuora;
import org.apache.gobblin.annotation.Alpha;
@Alpha
public class ZuoraConfigurationKeys {
private ZuoraConfigurationKeys() {
}
public static final String ZUORA_OUTPUT_FORMAT = "zuora.output.format";
public static final String ZUORA_API_NAME = "zuora.api.name";
public static final String ZUORA_PARTNER = "zuora.partner";
public static final String ZUORA_PROJECT = "zuora.project";
/**
* If you add a deleted column, for example, zuora.deleted.column=IsDeleted
* the schema needs to be changed accordingly.
* For example, the column below needs to be included as the first column in your schema definition
* { "columnName":"IsDeleted", "isNullable":"FALSE", "dataType":{"type":"boolean"}, "comment":"" }
*
* Check the documentation at
* https://knowledgecenter.zuora.com/DC_Developers/T_Aggregate_Query_API/BA_Stateless_and_Stateful_Modes
*/
public static final String ZUORA_DELTED_COLUMN = "zuora.deleted.column";
public static final String ZUORA_TIMESTAMP_COLUMNS = "zuora.timestamp.columns";
public static final String ZUORA_ROW_LIMIT = "zuora.row.limit";
public static final String ZUORA_API_RETRY_POST_COUNT = "zuora.api.retry.post.count";
public static final String ZUORA_API_RETRY_POST_WAIT_TIME = "zuora.api.retry.post.wait_time";
public static final String ZUORA_API_RETRY_GET_FILES_COUNT = "zuora.api.retry.get_files.count";
public static final String ZUORA_API_RETRY_GET_FILES_WAIT_TIME = "zuora.api.retry.get_files.wait_time";
public static final String ZUORA_API_RETRY_STREAM_FILES_COUNT = "zuora.api.retry.stream_files.count";
public static final String ZUORA_API_RETRY_STREAM_FILES_WAIT_TIME = "zuora.api.retry.stream_files.wait_time";
}
| 3,562 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-zuora/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-zuora/src/main/java/org/apache/gobblin/zuora/ZuoraUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.zuora;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.InetSocketAddress;
import java.net.Proxy;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import javax.net.ssl.HttpsURLConnection;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.lang.StringUtils;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.password.PasswordManager;
import com.google.common.collect.Lists;
@Alpha
@Slf4j
public class ZuoraUtil {
private ZuoraUtil() {
}
public static HttpsURLConnection getConnection(String urlPath, WorkUnitState workUnitState)
throws IOException {
log.info("URL: " + urlPath);
URL url = new URL(urlPath);
HttpsURLConnection connection;
String proxyUrl = workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_USE_PROXY_URL);
if (StringUtils.isNotBlank(proxyUrl)) {
int proxyPort = workUnitState.getPropAsInt(ConfigurationKeys.SOURCE_CONN_USE_PROXY_PORT);
Proxy proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(proxyUrl, proxyPort));
connection = (HttpsURLConnection) url.openConnection(proxy);
} else {
connection = (HttpsURLConnection) url.openConnection();
}
connection.setRequestProperty("Content-Type", "application/json");
String userName = workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_USERNAME);
if (StringUtils.isNotBlank(userName)) {
String password =
PasswordManager.getInstance(workUnitState).readPassword(workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_PASSWORD));
String userpass = userName + ":" + password;
String basicAuth = "Basic " + new String(new Base64().encode(userpass.getBytes("UTF-8")), "UTF-8");
connection.setRequestProperty("Authorization", basicAuth);
}
connection.setConnectTimeout(workUnitState.getPropAsInt(ConfigurationKeys.SOURCE_CONN_TIMEOUT, 30000));
return connection;
}
public static String getStringFromInputStream(InputStream is) {
BufferedReader br = null;
StringBuilder sb = new StringBuilder();
String line;
try {
br = new BufferedReader(new InputStreamReader(is, "UTF-8"));
while ((line = br.readLine()) != null) {
sb.append(line);
}
} catch (IOException e) {
e.printStackTrace();
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
return sb.toString();
}
public static List<String> getHeader(ArrayList<String> cols) {
List<String> columns = Lists.newArrayList();
for (String col : cols) {
String[] colRefs = col.split(":");
String columnName;
if (colRefs.length >= 2) {
columnName = colRefs[1];
} else {
columnName = colRefs[0];
}
columns.add(columnName.replaceAll(" ", "").trim());
}
return columns;
}
}
| 3,563 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-zuora/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-zuora/src/main/java/org/apache/gobblin/zuora/ZuoraClientImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.zuora;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import javax.net.ssl.HttpsURLConnection;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.StringUtils;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.extract.Command;
import org.apache.gobblin.source.extractor.extract.CommandOutput;
import org.apache.gobblin.source.extractor.extract.restapi.RestApiCommand;
import org.apache.gobblin.source.extractor.extract.restapi.RestApiCommandOutput;
import org.apache.gobblin.source.extractor.watermark.Predicate;
import org.apache.gobblin.source.jdbc.SqlQueryUtils;
import com.github.rholder.retry.Retryer;
import com.github.rholder.retry.RetryerBuilder;
import com.github.rholder.retry.StopStrategies;
import com.github.rholder.retry.WaitStrategies;
import com.google.common.collect.Lists;
import com.google.gson.Gson;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
@Alpha
@Slf4j
class ZuoraClientImpl implements ZuoraClient {
private static final Gson GSON = new Gson();
private final WorkUnitState _workUnitState;
private final String _hostName;
private final Retryer<CommandOutput<RestApiCommand, String>> _postRetryer;
private final Retryer<List<String>> _getRetryer;
ZuoraClientImpl(WorkUnitState workUnitState) {
_workUnitState = workUnitState;
_hostName = _workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_HOST_NAME);
_postRetryer =
RetryerBuilder.<CommandOutput<RestApiCommand, String>>newBuilder().retryIfExceptionOfType(IOException.class)
.withStopStrategy(StopStrategies
.stopAfterAttempt(workUnitState.getPropAsInt(ZuoraConfigurationKeys.ZUORA_API_RETRY_POST_COUNT, 20)))
.withWaitStrategy(WaitStrategies
.fixedWait(workUnitState.getPropAsInt(ZuoraConfigurationKeys.ZUORA_API_RETRY_POST_WAIT_TIME, 60000),
TimeUnit.MILLISECONDS)).build();
_getRetryer = RetryerBuilder.<List<String>>newBuilder().retryIfExceptionOfType(IOException.class).withStopStrategy(
StopStrategies
.stopAfterAttempt(workUnitState.getPropAsInt(ZuoraConfigurationKeys.ZUORA_API_RETRY_GET_FILES_COUNT, 30)))
.withWaitStrategy(WaitStrategies
.fixedWait(workUnitState.getPropAsInt(ZuoraConfigurationKeys.ZUORA_API_RETRY_GET_FILES_WAIT_TIME, 30000),
TimeUnit.MILLISECONDS)).build();
}
@Override
public List<Command> buildPostCommand(List<Predicate> predicateList) {
String host = getEndPoint("batch-query/");
List<String> params = Lists.newLinkedList();
params.add(host);
String query = _workUnitState.getProp(ConfigurationKeys.SOURCE_QUERYBASED_QUERY,
"SELECT * FROM " + _workUnitState.getProp(ConfigurationKeys.SOURCE_ENTITY));
if (predicateList != null) {
for (Predicate predicate : predicateList) {
query = SqlQueryUtils.addPredicate(query, predicate.getCondition());
}
}
String rowLimit = _workUnitState.getProp(ZuoraConfigurationKeys.ZUORA_ROW_LIMIT);
if (StringUtils.isNotBlank(rowLimit)) {
query += " LIMIT " + rowLimit;
}
List<ZuoraQuery> queries = Lists.newArrayList();
queries.add(new ZuoraQuery(_workUnitState.getProp(ConfigurationKeys.JOB_NAME_KEY), query,
_workUnitState.getProp(ZuoraConfigurationKeys.ZUORA_DELTED_COLUMN, "")));
ZuoraParams filterPayload = new ZuoraParams(_workUnitState.getProp(ZuoraConfigurationKeys.ZUORA_PARTNER, "sample"),
_workUnitState.getProp(ZuoraConfigurationKeys.ZUORA_PROJECT, "sample"), queries,
_workUnitState.getProp(ZuoraConfigurationKeys.ZUORA_API_NAME, "sample"),
_workUnitState.getProp(ZuoraConfigurationKeys.ZUORA_OUTPUT_FORMAT, "csv"),
_workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_VERSION, "1.1"));
params.add(GSON.toJson(filterPayload));
return Collections.singletonList(new RestApiCommand().build(params, RestApiCommand.RestApiCommandType.POST));
}
@Override
public CommandOutput<RestApiCommand, String> executePostRequest(final Command command)
throws DataRecordException {
try {
return _postRetryer.call(new Callable<CommandOutput<RestApiCommand, String>>() {
@Override
public CommandOutput<RestApiCommand, String> call()
throws Exception {
return executePostRequestInternal(command);
}
});
} catch (Exception e) {
throw new DataRecordException("Post request failed for command: " + command.toString(), e);
}
}
public static String getJobId(CommandOutput<?, ?> postResponse)
throws DataRecordException {
Iterator<String> itr = (Iterator<String>) postResponse.getResults().values().iterator();
if (!itr.hasNext()) {
throw new DataRecordException("Failed to get data from RightNowCloud; REST postResponse has no output");
}
String stringResponse = itr.next();
log.info("Zuora post response: " + stringResponse);
JsonObject jsonObject = GSON.fromJson(stringResponse, JsonObject.class).getAsJsonObject();
return jsonObject.get("id").getAsString();
}
@Override
public List<String> getFileIds(final String jobId)
throws DataRecordException, IOException {
log.info("Getting files for job " + jobId);
String url = getEndPoint("batch-query/jobs/" + jobId);
final Command cmd = new RestApiCommand().build(Collections.singleton(url), RestApiCommand.RestApiCommandType.GET);
try {
return _getRetryer.call(new Callable<List<String>>() {
@Override
public List<String> call()
throws Exception {
return executeGetRequestInternal(cmd, jobId);
}
});
} catch (Exception e) {
throw new DataRecordException("Get request failed for command: " + cmd.toString(), e);
}
}
private List<String> executeGetRequestInternal(Command cmd, String jobId)
throws IOException, DataRecordException {
CommandOutput<RestApiCommand, String> response = executeGetRequest(cmd);
Iterator<String> itr = response.getResults().values().iterator();
if (!itr.hasNext()) {
throw new DataRecordException("Failed to get file Ids based on job id " + jobId);
}
String output = itr.next();
JsonObject jsonResp = GSON.fromJson(output, JsonObject.class).getAsJsonObject();
String status = jsonResp.get("status").getAsString();
log.info(String.format("Job %s %s: %s", jobId, status, output));
if (!status.equals("completed")) {
throw new IOException("Retrying... This exception will be handled by retryer.");
}
List<String> fileIds = Lists.newArrayList();
for (JsonElement jsonObj : jsonResp.get("batches").getAsJsonArray()) {
fileIds.add(jsonObj.getAsJsonObject().get("fileId").getAsString());
}
log.info("Get Files Response - FileIds: " + fileIds);
return fileIds;
}
@Override
public CommandOutput<RestApiCommand, String> executeGetRequest(final Command cmd)
throws IOException {
HttpsURLConnection connection = null;
try {
String urlPath = cmd.getParams().get(0);
connection = ZuoraUtil.getConnection(urlPath, _workUnitState);
connection.setRequestProperty("Accept", "application/json");
String result = ZuoraUtil.getStringFromInputStream(connection.getInputStream());
CommandOutput<RestApiCommand, String> output = new RestApiCommandOutput();
output.put((RestApiCommand) cmd, result);
return output;
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
private CommandOutput<RestApiCommand, String> executePostRequestInternal(Command command)
throws IOException {
List<String> params = command.getParams();
String payLoad = params.get(1);
log.info("Executing post request with payLoad:" + payLoad);
BufferedReader br = null;
HttpsURLConnection connection = null;
try {
connection = ZuoraUtil.getConnection(params.get(0), _workUnitState);
connection.setDoOutput(true);
connection.setRequestMethod("POST");
OutputStream os = connection.getOutputStream();
os.write(payLoad.getBytes("UTF-8"));
os.flush();
br = new BufferedReader(new InputStreamReader(connection.getInputStream(), "UTF-8"));
StringBuilder result = new StringBuilder();
String line;
while ((line = br.readLine()) != null) {
result.append(line);
}
CommandOutput<RestApiCommand, String> output = new RestApiCommandOutput();
output.put((RestApiCommand) command, result.toString());
return output;
} finally {
if (br != null) {
br.close();
}
if (connection != null) {
connection.disconnect();
}
}
}
@Override
public String getEndPoint(String relativeUrl) {
return _hostName + relativeUrl;
}
}
| 3,564 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-zuora/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-zuora/src/main/java/org/apache/gobblin/zuora/ZuoraSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.zuora;
import java.io.IOException;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.exception.ExtractPrepareException;
import org.apache.gobblin.source.extractor.extract.QueryBasedSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
/**
* An implementation of Zuora source to get work units
*/
@Alpha
public class ZuoraSource extends QueryBasedSource<JsonArray, JsonElement> {
private static final Logger LOG = LoggerFactory.getLogger(QueryBasedSource.class);
public Extractor<JsonArray, JsonElement> getExtractor(WorkUnitState state) throws IOException {
Extractor<JsonArray, JsonElement> extractor = null;
try {
extractor = new ZuoraExtractor(state).build();
} catch (ExtractPrepareException e) {
LOG.error("Failed to prepare extractor: error - " + e.getMessage());
throw new IOException(e);
}
return extractor;
}
}
| 3,565 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-zuora/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-zuora/src/main/java/org/apache/gobblin/zuora/ZuoraParams.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.zuora;
import java.io.Serializable;
import java.util.List;
import org.apache.gobblin.annotation.Alpha;
@Alpha
public class ZuoraParams implements Serializable {
private static final long serialVersionUID = 1L;
String name;
String partner;
String project;
List<ZuoraQuery> queries;
String format;
String version;
String encrypted = "none";
String useQueryLabels = "false";
String dateTimeUtc = "true";
ZuoraParams(String partner, String project, List<ZuoraQuery> queries, String name, String format, String version) {
super();
this.partner = partner;
this.project = project;
this.queries = queries;
this.name = name;
this.format = format;
this.version = version;
}
public String getFormat() {
return format;
}
public void setFormat(String format) {
this.format = format;
}
public String getVersion() {
return version;
}
public void setVersion(String version) {
this.version = version;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getEncrypted() {
return encrypted;
}
public void setEncrypted(String encrypted) {
this.encrypted = encrypted;
}
public String getUseQueryLabels() {
return useQueryLabels;
}
public void setUseQueryLabels(String useQueryLabels) {
this.useQueryLabels = useQueryLabels;
}
public String getPartner() {
return partner;
}
public void setPartner(String partner) {
this.partner = partner;
}
public String getProject() {
return project;
}
public void setProject(String project) {
this.project = project;
}
public String getDateTimeUtc() {
return dateTimeUtc;
}
public void setDateTimeUtc(String dateTimeUtc) {
this.dateTimeUtc = dateTimeUtc;
}
public List<ZuoraQuery> getQueries() {
return queries;
}
public void setQueries(List<ZuoraQuery> queries) {
this.queries = queries;
}
}
| 3,566 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-zuora/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-zuora/src/main/java/org/apache/gobblin/zuora/ZuoraDeletedColumn.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.zuora;
import java.io.Serializable;
import org.apache.gobblin.annotation.Alpha;
@Alpha
public class ZuoraDeletedColumn implements Serializable {
private static final long serialVersionUID = 1L;
String column;
ZuoraDeletedColumn(String columnName) {
column = columnName;
}
public String getColumn() {
return column;
}
public void setColumn(String column) {
this.column = column;
}
}
| 3,567 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-zuora/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-zuora/src/main/java/org/apache/gobblin/zuora/ZuoraClientFilesStreamer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.zuora;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import java.util.zip.GZIPInputStream;
import javax.net.ssl.HttpsURLConnection;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.resultset.RecordSet;
import org.apache.gobblin.source.extractor.resultset.RecordSetList;
import org.apache.gobblin.source.extractor.utils.InputStreamCSVReader;
import org.apache.gobblin.source.extractor.utils.Utils;
import com.github.rholder.retry.Retryer;
import com.github.rholder.retry.RetryerBuilder;
import com.github.rholder.retry.StopStrategies;
import com.github.rholder.retry.WaitStrategies;
import com.google.gson.JsonElement;
@Alpha
@Slf4j
public class ZuoraClientFilesStreamer {
private final String outputFormat;
private final WorkUnitState _workUnitState;
private final ZuoraClient _client;
private final int batchSize;
private final Retryer<Void> _getRetryer;
private boolean _jobFinished = false;
private boolean _jobFailed = false;
private long _totalRecords = 0;
private BufferedReader _currentReader;
private int _currentFileIndex = -1;
private int _skipHeaderIndex = 0; //Indicate whether the header has been skipped for a file.
private HttpsURLConnection _currentConnection;
public ZuoraClientFilesStreamer(WorkUnitState workUnitState, ZuoraClient client) {
_workUnitState = workUnitState;
_client = client;
batchSize = workUnitState.getPropAsInt(ConfigurationKeys.SOURCE_QUERYBASED_FETCH_SIZE, 2000);
outputFormat = _workUnitState.getProp(ZuoraConfigurationKeys.ZUORA_OUTPUT_FORMAT);
_getRetryer = RetryerBuilder.<Void>newBuilder().retryIfExceptionOfType(IOException.class).withStopStrategy(
StopStrategies
.stopAfterAttempt(workUnitState.getPropAsInt(ZuoraConfigurationKeys.ZUORA_API_RETRY_STREAM_FILES_COUNT, 3)))
.withWaitStrategy(WaitStrategies
.fixedWait(workUnitState.getPropAsInt(ZuoraConfigurationKeys.ZUORA_API_RETRY_STREAM_FILES_WAIT_TIME, 10000),
TimeUnit.MILLISECONDS)).build();
}
public RecordSet<JsonElement> streamFiles(List<String> fileList, List<String> header)
throws DataRecordException {
try {
if (currentReaderDone()) {
++_currentFileIndex;
closeCurrentSession();
if (_currentFileIndex >= fileList.size()) {
log.info("Finished streaming all files.");
_jobFinished = true;
return new RecordSetList<>();
}
initializeForNewFile(fileList);
}
log.info(String
.format("Streaming file at index %s with id %s ...", _currentFileIndex, fileList.get(_currentFileIndex)));
InputStreamCSVReader reader = new InputStreamCSVReader(_currentReader);
if (_skipHeaderIndex == _currentFileIndex) {
reader.nextRecord(); //skip header
++_skipHeaderIndex;
}
RecordSetList<JsonElement> rs = new RecordSetList<>();
List<String> csvRecord;
int count = 0;
while ((csvRecord = reader.nextRecord()) != null) {
rs.add(Utils.csvToJsonObject(header, csvRecord, header.size()));
++_totalRecords;
if (++count >= batchSize) {
break;
}
}
log.info("Total number of records downloaded: " + _totalRecords);
return rs;
} catch (IOException e) {
try {
closeCurrentSession();
} catch (IOException e1) {
log.error(e1.getMessage());
}
_jobFailed = true;
throw new DataRecordException("Failed to get records from Zuora: " + e.getMessage(), e);
}
}
private void initializeForNewFile(List<String> fileList)
throws DataRecordException {
final String fileId = fileList.get(_currentFileIndex);
log.info(String.format("Start streaming file at index %s with id %s", _currentFileIndex, fileId));
try {
_getRetryer.call(new Callable<Void>() {
@Override
public Void call()
throws Exception {
Pair<HttpsURLConnection, BufferedReader> initialized = createReader(fileId, _workUnitState);
_currentConnection = initialized.getLeft();
_currentReader = initialized.getRight();
return null;
}
});
} catch (Exception e) {
throw new DataRecordException(
String.format("Retryer failed: Build connection for streaming failed for file id: %s", fileId), e);
}
}
private Pair<HttpsURLConnection, BufferedReader> createReader(String fileId, WorkUnitState workUnitState)
throws IOException {
HttpsURLConnection connection = ZuoraUtil.getConnection(_client.getEndPoint("file/" + fileId), workUnitState);
connection.setRequestProperty("Accept", "application/json");
InputStream stream = connection.getInputStream();
if (StringUtils.isNotBlank(outputFormat) && outputFormat.equalsIgnoreCase("gzip")) {
stream = new GZIPInputStream(stream);
}
return new ImmutablePair<>(connection, new BufferedReader(new InputStreamReader(stream, "UTF-8")));
}
private void closeCurrentSession()
throws IOException {
if (_currentConnection != null) {
_currentConnection.disconnect();
}
if (_currentReader != null) {
_currentReader.close();
}
}
private boolean currentReaderDone()
throws IOException {
//_currentReader.ready() will be false when there is nothing in _currentReader to be read
return _currentReader == null || !_currentReader.ready();
}
public boolean isJobFinished() {
return _jobFinished;
}
public boolean isJobFailed() {
return _jobFailed;
}
} | 3,568 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-service-kafka/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-service-kafka/src/main/java/org/apache/gobblin/service/SimpleKafkaSpecConsumer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.io.ByteArrayInputStream;
import java.io.Closeable;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.lang.reflect.InvocationTargetException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.Future;
import java.util.regex.Pattern;
import org.apache.avro.io.BinaryDecoder;
import org.apache.avro.io.Decoder;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.specific.SpecificDatumReader;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.slf4j.Logger;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import org.apache.gobblin.kafka.client.ByteArrayBasedKafkaRecord;
import org.apache.gobblin.kafka.client.DecodeableKafkaRecord;
import org.apache.gobblin.kafka.client.GobblinKafkaConsumerClient;
import org.apache.gobblin.kafka.client.KafkaConsumerRecord;
import org.apache.gobblin.metrics.reporter.util.FixedSchemaVersionWriter;
import org.apache.gobblin.metrics.reporter.util.SchemaVersionWriter;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.Spec;
import org.apache.gobblin.runtime.api.SpecConsumer;
import org.apache.gobblin.runtime.api.SpecExecutor;
import org.apache.gobblin.runtime.job_spec.AvroJobSpec;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaOffsetRetrievalFailureException;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaPartition;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaTopic;
import org.apache.gobblin.util.CompletedFuture;
import org.apache.gobblin.util.ConfigUtils;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class SimpleKafkaSpecConsumer implements SpecConsumer<Spec>, Closeable {
private static final String CONSUMER_CLIENT_FACTORY_CLASS_KEY = "spec.kafka.consumerClientClassFactory";
private static final String DEFAULT_CONSUMER_CLIENT_FACTORY_CLASS =
"org.apache.gobblin.kafka.client.Kafka08ConsumerClient$Factory";
// Consumer
protected final GobblinKafkaConsumerClient _kafkaConsumer;
protected final List<KafkaPartition> _partitions;
protected final List<Long> _lowWatermark;
protected final List<Long> _nextWatermark;
protected final List<Long> _highWatermark;
private Iterator<KafkaConsumerRecord> messageIterator = null;
private int currentPartitionIdx = -1;
private boolean isFirstRun = true;
private final BinaryDecoder _decoder;
private final SpecificDatumReader<AvroJobSpec> _reader;
private final SchemaVersionWriter<?> _versionWriter;
public SimpleKafkaSpecConsumer(Config config, Optional<Logger> log) {
// Consumer
String kafkaConsumerClientClass = ConfigUtils.getString(config, CONSUMER_CLIENT_FACTORY_CLASS_KEY,
DEFAULT_CONSUMER_CLIENT_FACTORY_CLASS);
try {
Class<?> clientFactoryClass = (Class<?>) Class.forName(kafkaConsumerClientClass);
final GobblinKafkaConsumerClient.GobblinKafkaConsumerClientFactory factory =
(GobblinKafkaConsumerClient.GobblinKafkaConsumerClientFactory)
ConstructorUtils.invokeConstructor(clientFactoryClass);
_kafkaConsumer = factory.create(config);
} catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException | InstantiationException | InvocationTargetException e) {
if (log.isPresent()) {
log.get().error("Failed to instantiate Kafka consumer from class " + kafkaConsumerClientClass, e);
}
throw new RuntimeException("Failed to instantiate Kafka consumer", e);
}
List<KafkaTopic> kafkaTopics = _kafkaConsumer.getFilteredTopics(Collections.EMPTY_LIST,
Lists.newArrayList(Pattern.compile(config.getString(SimpleKafkaSpecExecutor.SPEC_KAFKA_TOPICS_KEY))));
_partitions = kafkaTopics.get(0).getPartitions();
_lowWatermark = Lists.newArrayList(Collections.nCopies(_partitions.size(), 0L));
_nextWatermark = Lists.newArrayList(Collections.nCopies(_partitions.size(), 0L));
_highWatermark = Lists.newArrayList(Collections.nCopies(_partitions.size(), 0L));
InputStream dummyInputStream = new ByteArrayInputStream(new byte[0]);
_decoder = DecoderFactory.get().binaryDecoder(dummyInputStream, null);
_reader = new SpecificDatumReader<AvroJobSpec>(AvroJobSpec.SCHEMA$);
_versionWriter = new FixedSchemaVersionWriter();
}
public SimpleKafkaSpecConsumer(Config config, Logger log) {
this(config, Optional.of(log));
}
/** Constructor with no logging */
public SimpleKafkaSpecConsumer(Config config) {
this(config, Optional.<Logger>absent());
}
@Override
public Future<? extends List<Pair<SpecExecutor.Verb, Spec>>> changedSpecs() {
List<Pair<SpecExecutor.Verb, Spec>> changesSpecs = new ArrayList<>();
initializeWatermarks();
this.currentPartitionIdx = -1;
while (!allPartitionsFinished()) {
if (currentPartitionFinished()) {
moveToNextPartition();
continue;
}
if (this.messageIterator == null || !this.messageIterator.hasNext()) {
try {
this.messageIterator = fetchNextMessageBuffer();
} catch (Exception e) {
log.error(String.format("Failed to fetch next message buffer for partition %s. Will skip this partition.",
getCurrentPartition()), e);
moveToNextPartition();
continue;
}
if (this.messageIterator == null || !this.messageIterator.hasNext()) {
moveToNextPartition();
continue;
}
}
while (!currentPartitionFinished()) {
if (!this.messageIterator.hasNext()) {
break;
}
KafkaConsumerRecord nextValidMessage = this.messageIterator.next();
// Even though we ask Kafka to give us a message buffer starting from offset x, it may
// return a buffer that starts from offset smaller than x, so we need to skip messages
// until we get to x.
if (nextValidMessage.getOffset() < _nextWatermark.get(this.currentPartitionIdx)) {
continue;
}
_nextWatermark.set(this.currentPartitionIdx, nextValidMessage.getNextOffset());
try {
final AvroJobSpec record;
if (nextValidMessage instanceof ByteArrayBasedKafkaRecord) {
record = decodeRecord((ByteArrayBasedKafkaRecord)nextValidMessage);
} else if (nextValidMessage instanceof DecodeableKafkaRecord){
record = ((DecodeableKafkaRecord<?, AvroJobSpec>) nextValidMessage).getValue();
} else {
throw new IllegalStateException(
"Unsupported KafkaConsumerRecord type. The returned record can either be ByteArrayBasedKafkaRecord"
+ " or DecodeableKafkaRecord");
}
JobSpec.Builder jobSpecBuilder = JobSpec.builder(record.getUri());
Properties props = new Properties();
props.putAll(record.getProperties());
jobSpecBuilder.withJobCatalogURI(record.getUri()).withVersion(record.getVersion())
.withDescription(record.getDescription()).withConfigAsProperties(props);
if (!record.getTemplateUri().isEmpty()) {
jobSpecBuilder.withTemplate(new URI(record.getTemplateUri()));
}
String verbName = record.getMetadata().get(SpecExecutor.VERB_KEY);
SpecExecutor.Verb verb = SpecExecutor.Verb.valueOf(verbName);
changesSpecs.add(new ImmutablePair<>(verb, jobSpecBuilder.build()));
} catch (Throwable t) {
log.error("Could not decode record at partition " + this.currentPartitionIdx +
" offset " + nextValidMessage.getOffset());
}
}
}
return new CompletedFuture<>(changesSpecs, null);
}
private void initializeWatermarks() {
initializeLowWatermarks();
initializeHighWatermarks();
}
private void initializeLowWatermarks() {
try {
int i=0;
for (KafkaPartition kafkaPartition : _partitions) {
if (isFirstRun) {
long earliestOffset = _kafkaConsumer.getEarliestOffset(kafkaPartition);
_lowWatermark.set(i, earliestOffset);
} else {
_lowWatermark.set(i, _highWatermark.get(i));
}
i++;
}
isFirstRun = false;
} catch (KafkaOffsetRetrievalFailureException e) {
throw new RuntimeException(e);
}
}
private void initializeHighWatermarks() {
try {
int i=0;
for (KafkaPartition kafkaPartition : _partitions) {
long latestOffset = _kafkaConsumer.getLatestOffset(kafkaPartition);
_highWatermark.set(i, latestOffset);
i++;
}
} catch (KafkaOffsetRetrievalFailureException e) {
throw new RuntimeException(e);
}
}
private boolean allPartitionsFinished() {
return this.currentPartitionIdx >= _nextWatermark.size();
}
private boolean currentPartitionFinished() {
if (this.currentPartitionIdx == -1) {
return true;
} else if (_nextWatermark.get(this.currentPartitionIdx) >= _highWatermark.get(this.currentPartitionIdx)) {
return true;
} else {
return false;
}
}
private int moveToNextPartition() {
this.messageIterator = null;
return this.currentPartitionIdx ++;
}
private KafkaPartition getCurrentPartition() {
return _partitions.get(this.currentPartitionIdx);
}
private Iterator<KafkaConsumerRecord> fetchNextMessageBuffer() {
return _kafkaConsumer.consume(_partitions.get(this.currentPartitionIdx),
_nextWatermark.get(this.currentPartitionIdx), _highWatermark.get(this.currentPartitionIdx));
}
private AvroJobSpec decodeRecord(ByteArrayBasedKafkaRecord kafkaConsumerRecord) throws IOException {
InputStream is = new ByteArrayInputStream(kafkaConsumerRecord.getMessageBytes());
_versionWriter.readSchemaVersioningInformation(new DataInputStream(is));
Decoder decoder = DecoderFactory.get().binaryDecoder(is, _decoder);
return _reader.read(null, decoder);
}
@Override
public void close() throws IOException {
_kafkaConsumer.close();
}
} | 3,569 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-service-kafka/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-service-kafka/src/main/java/org/apache/gobblin/service/StreamingKafkaSpecConsumer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.io.Closeable;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.slf4j.Logger;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.util.concurrent.AbstractIdleService;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.instrumented.StandardMetricsBridge;
import org.apache.gobblin.metrics.ContextAwareMeter;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.MutableJobCatalog;
import org.apache.gobblin.runtime.api.Spec;
import org.apache.gobblin.runtime.api.SpecConsumer;
import org.apache.gobblin.runtime.api.SpecExecutor;
import org.apache.gobblin.runtime.job_monitor.AvroJobSpecKafkaJobMonitor;
import org.apache.gobblin.runtime.job_monitor.KafkaJobMonitor;
import org.apache.gobblin.runtime.std.DefaultJobCatalogListenerImpl;
import org.apache.gobblin.util.CompletedFuture;
import org.apache.gobblin.util.ConfigUtils;
import static org.apache.gobblin.service.SimpleKafkaSpecExecutor.SPEC_KAFKA_TOPICS_KEY;
@Slf4j
/**
* SpecConsumer that consumes from kafka in a streaming manner
* Implemented {@link AbstractIdleService} for starting up and shutting down.
*/
public class StreamingKafkaSpecConsumer extends AbstractIdleService implements SpecConsumer<Spec>, Closeable, StandardMetricsBridge {
private static final int DEFAULT_SPEC_STREAMING_BLOCKING_QUEUE_SIZE = 100;
@Getter
private final AvroJobSpecKafkaJobMonitor _jobMonitor;
private final BlockingQueue<ImmutablePair<SpecExecutor.Verb, Spec>> _jobSpecQueue;
private final MutableJobCatalog _jobCatalog;
private final MetricContext _metricContext;
private final Metrics _metrics;
private final int _jobSpecQueueSize;
public StreamingKafkaSpecConsumer(Config config, MutableJobCatalog jobCatalog, Optional<Logger> log) {
String topic = config.getString(SPEC_KAFKA_TOPICS_KEY);
Config defaults = ConfigFactory.parseMap(ImmutableMap.of(AvroJobSpecKafkaJobMonitor.TOPIC_KEY, topic,
KafkaJobMonitor.KAFKA_AUTO_OFFSET_RESET_KEY, KafkaJobMonitor.KAFKA_AUTO_OFFSET_RESET_SMALLEST));
try {
_jobMonitor = (AvroJobSpecKafkaJobMonitor)(new AvroJobSpecKafkaJobMonitor.Factory())
.forConfig(config.withFallback(defaults), jobCatalog);
} catch (IOException e) {
throw new RuntimeException("Could not create job monitor", e);
}
_jobCatalog = jobCatalog;
_jobSpecQueueSize = ConfigUtils.getInt(config, "SPEC_STREAMING_BLOCKING_QUEUE_SIZE",
DEFAULT_SPEC_STREAMING_BLOCKING_QUEUE_SIZE);
_jobSpecQueue = new LinkedBlockingQueue<>(_jobSpecQueueSize);
_metricContext = Instrumented.getMetricContext(ConfigUtils.configToState(config), this.getClass());
_metrics = new Metrics(this._metricContext);
}
public StreamingKafkaSpecConsumer(Config config, MutableJobCatalog jobCatalog, Logger log) {
this(config, jobCatalog, Optional.of(log));
}
/** Constructor with no logging */
public StreamingKafkaSpecConsumer(Config config, MutableJobCatalog jobCatalog) {
this(config, jobCatalog, Optional.<Logger>absent());
}
/**
* This method returns job specs receive from Kafka. It will block if there are no job specs.
* @return list of (verb, jobspecs) pairs.
*/
@Override
public Future<? extends List<Pair<SpecExecutor.Verb, Spec>>> changedSpecs() {
List<Pair<SpecExecutor.Verb, Spec>> changesSpecs = new ArrayList<>();
try {
Pair<SpecExecutor.Verb, Spec> specPair = _jobSpecQueue.take();
int numSpecFetched = 0;
do {
_metrics.specConsumerJobSpecDeq.mark();
numSpecFetched ++;
changesSpecs.add(specPair);
// if there are more elements then pass them along in this call
specPair = _jobSpecQueue.poll();
// comparing numSpecFetched to _jobSpecQueueSize to make sure the loop will not run infinitely even in peak time
} while (specPair != null && numSpecFetched < _jobSpecQueueSize);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
return new CompletedFuture(changesSpecs, null);
}
@Override
protected void startUp() {
// listener will add job specs to a blocking queue to send to callers of changedSpecs()
// IMPORTANT: This addListener should be invoked after job catalog has been initialized. This is guaranteed because
// StreamingKafkaSpecConsumer is boot after jobCatalog in GobblinClusterManager::startAppLauncherAndServices()
_jobCatalog.addListener(new JobSpecListener());
_jobMonitor.startAsync().awaitRunning();
addJobMonitorMetrics();
}
private void addJobMonitorMetrics() {
_metrics.getContextAwareMetrics().add(_jobMonitor.getNewSpecs());
_metrics.getContextAwareMetrics().add(_jobMonitor.getUpdatedSpecs());
_metrics.getContextAwareMetrics().add(_jobMonitor.getRemovedSpecs());
_metrics.getContextAwareMetrics().add(_jobMonitor.getCancelledSpecs());
_metrics.getContextAwareMetrics().add(_jobMonitor.getTotalSpecs());
_metrics.getContextAwareMetrics().add(_jobMonitor.getMessageParseFailures());
}
@Override
protected void shutDown() {
_jobMonitor.stopAsync().awaitTerminated();
}
@Override
public void close() throws IOException {
shutDown();
}
/**
* JobCatalog listener that puts messages into a blocking queue for consumption by changedSpecs method of
* {@link StreamingKafkaSpecConsumer}
*/
protected class JobSpecListener extends DefaultJobCatalogListenerImpl {
public JobSpecListener() {
super(StreamingKafkaSpecConsumer.this.log);
}
@Override public void onAddJob(JobSpec addedJob) {
super.onAddJob(addedJob);
try {
_jobSpecQueue.put(new ImmutablePair<SpecExecutor.Verb, Spec>(SpecExecutor.Verb.ADD, addedJob));
_metrics.specConsumerJobSpecEnq.mark();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
@Override public void onDeleteJob(URI deletedJobURI, String deletedJobVersion) {
super.onDeleteJob(deletedJobURI, deletedJobVersion);
try {
JobSpec.Builder jobSpecBuilder = JobSpec.builder(deletedJobURI);
Properties props = new Properties();
jobSpecBuilder.withVersion(deletedJobVersion).withConfigAsProperties(props);
_jobSpecQueue.put(new ImmutablePair<SpecExecutor.Verb, Spec>(SpecExecutor.Verb.DELETE, jobSpecBuilder.build()));
_metrics.specConsumerJobSpecEnq.mark();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
@Override
public void onCancelJob(URI cancelledJobURI) {
super.onCancelJob(cancelledJobURI);
try {
JobSpec.Builder jobSpecBuilder = JobSpec.builder(cancelledJobURI);
jobSpecBuilder.withConfigAsProperties(new Properties());
_jobSpecQueue.put(new ImmutablePair<>(SpecExecutor.Verb.CANCEL, jobSpecBuilder.build()));
_metrics.specConsumerJobSpecEnq.mark();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
@Override public void onUpdateJob(JobSpec updatedJob) {
super.onUpdateJob(updatedJob);
try {
_jobSpecQueue.put(new ImmutablePair<SpecExecutor.Verb, Spec>(SpecExecutor.Verb.UPDATE, updatedJob));
_metrics.specConsumerJobSpecEnq.mark();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
}
private class Metrics extends StandardMetricsBridge.StandardMetrics {
private final ContextAwareMeter specConsumerJobSpecEnq;
private final ContextAwareMeter specConsumerJobSpecDeq;
public static final String SPEC_CONSUMER_JOB_SPEC_QUEUE_SIZE = "specConsumerJobSpecQueueSize";
public static final String SPEC_CONSUMER_JOB_SPEC_ENQ = "specConsumerJobSpecEnq";
public static final String SPEC_CONSUMER_JOB_SPEC_DEQ = "specConsumerJobSpecDeq";
public Metrics(MetricContext context) {
this.contextAwareMetrics.add(context.newContextAwareGauge(SPEC_CONSUMER_JOB_SPEC_QUEUE_SIZE,
StreamingKafkaSpecConsumer.this._jobSpecQueue::size));
this.specConsumerJobSpecEnq = context.contextAwareMeter(SPEC_CONSUMER_JOB_SPEC_ENQ);
this.contextAwareMetrics.add(this.specConsumerJobSpecEnq);
this.specConsumerJobSpecDeq = context.contextAwareMeter(SPEC_CONSUMER_JOB_SPEC_DEQ);
this.contextAwareMetrics.add(this.specConsumerJobSpecDeq);
this.contextAwareMetrics.add(_jobMonitor.getNewSpecs());
this.contextAwareMetrics.add(_jobMonitor.getUpdatedSpecs());
this.contextAwareMetrics.add(_jobMonitor.getRemovedSpecs());
this.contextAwareMetrics.add(_jobMonitor.getCancelledSpecs());
this.contextAwareMetrics.add(_jobMonitor.getTotalSpecs());
this.contextAwareMetrics.add(_jobMonitor.getMessageParseFailures());
}
}
@Override
public Collection<StandardMetrics> getStandardMetricsCollection() {
return ImmutableList.of(this._metrics);
}
} | 3,570 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-service-kafka/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-service-kafka/src/main/java/org/apache/gobblin/service/SimpleKafkaSpecProducer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.io.Closeable;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.Future;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import org.slf4j.Logger;
import com.codahale.metrics.Meter;
import com.codahale.metrics.MetricRegistry;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.typesafe.config.Config;
import javax.annotation.concurrent.NotThreadSafe;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.ServiceMetricNames;
import org.apache.gobblin.metrics.reporter.util.AvroBinarySerializer;
import org.apache.gobblin.metrics.reporter.util.AvroSerializer;
import org.apache.gobblin.metrics.reporter.util.FixedSchemaVersionWriter;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.Spec;
import org.apache.gobblin.runtime.api.SpecExecutor;
import org.apache.gobblin.runtime.api.SpecProducer;
import org.apache.gobblin.runtime.job_spec.AvroJobSpec;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.writer.AsyncDataWriter;
import org.apache.gobblin.writer.WriteCallback;
@Slf4j
@NotThreadSafe
public class SimpleKafkaSpecProducer implements SpecProducer<Spec>, Closeable {
private static final String KAFKA_DATA_WRITER_CLASS_KEY = "spec.kafka.dataWriterClass";
private static final String DEFAULT_KAFKA_DATA_WRITER_CLASS =
"org.apache.gobblin.kafka.writer.Kafka08DataWriter";
// Producer
protected AsyncDataWriter<byte[]> _kafkaProducer;
private final AvroSerializer<AvroJobSpec> _serializer;
private Config _config;
private final String _kafkaProducerClassName;
private Meter addSpecMeter;
private Meter deleteSpecMeter;
private Meter updateSpecMeter;
private Meter cancelSpecMeter;
private MetricContext metricContext = Instrumented.getMetricContext(new State(), getClass());
public SimpleKafkaSpecProducer(Config config, Optional<Logger> log) {
_kafkaProducerClassName = ConfigUtils.getString(config, KAFKA_DATA_WRITER_CLASS_KEY,
DEFAULT_KAFKA_DATA_WRITER_CLASS);
this.addSpecMeter = createMeter("-Add");
this.deleteSpecMeter = createMeter("-Delete");
this.updateSpecMeter = createMeter("-Update");
this.cancelSpecMeter = createMeter("-Cancel");
try {
_serializer = new AvroBinarySerializer<>(AvroJobSpec.SCHEMA$, new FixedSchemaVersionWriter());
_config = config;
} catch (IOException e) {
throw new RuntimeException("Could not create AvroBinarySerializer", e);
}
}
public SimpleKafkaSpecProducer(Config config, Logger log) {
this(config, Optional.of(log));
}
/** Constructor with no logging */
public SimpleKafkaSpecProducer(Config config) {
this(config, Optional.<Logger>absent());
}
private Meter createMeter(String suffix) {
return this.metricContext.meter(MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, getClass().getSimpleName(), suffix));
}
private URI getURIWithExecutionId(URI originalURI, Properties props) {
URI result = originalURI;
if (props.containsKey(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)) {
try {
result = new URI(Joiner.on("/").
join(originalURI.toString(), props.getProperty(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)));
} catch (URISyntaxException e) {
log.error("Cannot create job uri to cancel job", e);
}
}
return result;
}
@Override
public Future<?> addSpec(Spec addedSpec) {
AvroJobSpec avroJobSpec = convertToAvroJobSpec(addedSpec, SpecExecutor.Verb.ADD);
log.info("Adding Spec: " + addedSpec + " using Kafka.");
this.addSpecMeter.mark();
return getKafkaProducer().write(_serializer.serializeRecord(avroJobSpec), new KafkaWriteCallback(avroJobSpec));
}
@Override
public Future<?> updateSpec(Spec updatedSpec) {
AvroJobSpec avroJobSpec = convertToAvroJobSpec(updatedSpec, SpecExecutor.Verb.UPDATE);
log.info("Updating Spec: " + updatedSpec + " using Kafka.");
this.updateSpecMeter.mark();
return getKafkaProducer().write(_serializer.serializeRecord(avroJobSpec), new KafkaWriteCallback(avroJobSpec));
}
@Override
public Future<?> deleteSpec(URI deletedSpecURI, Properties headers) {
AvroJobSpec avroJobSpec = AvroJobSpec.newBuilder().setUri(deletedSpecURI.toString())
.setMetadata(ImmutableMap.of(SpecExecutor.VERB_KEY, SpecExecutor.Verb.DELETE.name()))
.setProperties(Maps.fromProperties(headers)).build();
log.info("Deleting Spec: " + deletedSpecURI + " using Kafka.");
this.deleteSpecMeter.mark();
return getKafkaProducer().write(_serializer.serializeRecord(avroJobSpec), new KafkaWriteCallback(avroJobSpec));
}
@Override
public Future<?> cancelJob(URI deletedSpecURI, Properties properties) {
AvroJobSpec avroJobSpec = AvroJobSpec.newBuilder().setUri(deletedSpecURI.toString())
.setMetadata(ImmutableMap.of(SpecExecutor.VERB_KEY, SpecExecutor.Verb.CANCEL.name()))
.setProperties(Maps.fromProperties(properties)).build();
log.info("Cancelling job: " + deletedSpecURI + " using Kafka.");
this.cancelSpecMeter.mark();
return getKafkaProducer().write(_serializer.serializeRecord(avroJobSpec), new KafkaWriteCallback(avroJobSpec));
}
@Override
public Future<? extends List<Spec>> listSpecs() {
throw new UnsupportedOperationException();
}
@Override
public void close() throws IOException {
_kafkaProducer.close();
}
private AsyncDataWriter<byte[]> getKafkaProducer() {
if (null == _kafkaProducer) {
try {
Class<?> kafkaProducerClass = (Class<?>) Class.forName(_kafkaProducerClassName);
_kafkaProducer = (AsyncDataWriter<byte[]>) ConstructorUtils.invokeConstructor(kafkaProducerClass,
ConfigUtils.configToProperties(_config));
} catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException | InstantiationException | InvocationTargetException e) {
log.error("Failed to instantiate Kafka consumer from class " + _kafkaProducerClassName, e);
throw new RuntimeException("Failed to instantiate Kafka consumer", e);
}
}
return _kafkaProducer;
}
private AvroJobSpec convertToAvroJobSpec(Spec spec, SpecExecutor.Verb verb) {
if (spec instanceof JobSpec) {
JobSpec jobSpec = (JobSpec) spec;
AvroJobSpec.Builder avroJobSpecBuilder = AvroJobSpec.newBuilder();
avroJobSpecBuilder.setUri(jobSpec.getUri().toString()).setVersion(jobSpec.getVersion())
.setDescription(jobSpec.getDescription()).setProperties(Maps.fromProperties(jobSpec.getConfigAsProperties()))
.setMetadata(ImmutableMap.of(SpecExecutor.VERB_KEY, verb.name()));
if (jobSpec.getTemplateURI().isPresent()) {
avroJobSpecBuilder.setTemplateUri(jobSpec.getTemplateURI().get().toString());
}
return avroJobSpecBuilder.build();
} else {
throw new RuntimeException("Unsupported spec type " + spec.getClass());
}
}
static class KafkaWriteCallback implements WriteCallback {
AvroJobSpec avroJobSpec;
KafkaWriteCallback(AvroJobSpec avroJobSpec) {
this.avroJobSpec = avroJobSpec;
}
@Override
public void onSuccess(Object result) {
}
@Override
public void onFailure(Throwable throwable) {
log.error("Error while writing the following record to Kafka {}", avroJobSpec.toString(), throwable);
}
}
} | 3,571 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-service-kafka/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-service-kafka/src/main/java/org/apache/gobblin/service/SimpleKafkaSpecExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.io.Serializable;
import java.net.URI;
import java.util.concurrent.Future;
import org.slf4j.Logger;
import com.google.common.base.Optional;
import com.google.common.io.Closer;
import com.typesafe.config.Config;
import org.apache.gobblin.runtime.api.Spec;
import org.apache.gobblin.runtime.api.SpecExecutor;
import org.apache.gobblin.runtime.api.SpecProducer;
import org.apache.gobblin.runtime.spec_executorInstance.AbstractSpecExecutor;
import org.apache.gobblin.util.CompletedFuture;
/**
* An {@link SpecExecutor} that use Kafka as the communication mechanism.
*/
public class SimpleKafkaSpecExecutor extends AbstractSpecExecutor {
public static final String SPEC_KAFKA_TOPICS_KEY = "spec.kafka.topics";
private SpecProducer<Spec> specProducer;
public SimpleKafkaSpecExecutor(Config config, Optional<Logger> log) {
super(config, log);
specProducer = new SimpleKafkaSpecProducer(config, log);
}
/**
* Constructor with no logging, necessary for simple use case.
* @param config
*/
public SimpleKafkaSpecExecutor(Config config) {
this(config, Optional.absent());
}
@Override
public Future<? extends SpecProducer<Spec>> getProducer() {
return new CompletedFuture<>(this.specProducer, null);
}
@Override
public Future<String> getDescription() {
return new CompletedFuture<>("SimpleSpecExecutorInstance with URI: " + specExecutorInstanceUri, null);
}
@Override
protected void startUp() throws Exception {
optionalCloser = Optional.of(Closer.create());
specProducer = optionalCloser.get().register((SimpleKafkaSpecProducer) specProducer);
}
@Override
protected void shutDown() throws Exception {
if (optionalCloser.isPresent()) {
optionalCloser.get().close();
} else {
log.warn("There's no Closer existed in " + this.getClass().getName());
}
}
public static class SpecExecutorInstanceDataPacket implements Serializable {
protected SpecExecutor.Verb _verb;
protected URI _uri;
protected Spec _spec;
public SpecExecutorInstanceDataPacket(SpecExecutor.Verb verb, URI uri, Spec spec) {
_verb = verb;
_uri = uri;
_spec = spec;
}
@Override
public String toString() {
return String.format("Verb: %s, URI: %s, Spec: %s", _verb, _uri, _spec);
}
}
} | 3,572 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/test/java/org/apache/gobblin/crypto/GobblinEncryptionProviderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.crypto;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.io.IOUtils;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.codec.StreamCodec;
public class GobblinEncryptionProviderTest {
private static final long KEY_ID = -4435883136602571409L;
private static final String PRIVATE_KEY = "/testPrivate.key";
private static final String PUBLIC_KEY = "/testPublic.key";
@Test
public void testCanBuildAes() throws IOException {
Map<String, Object> properties = new HashMap<>();
properties.put(EncryptionConfigParser.ENCRYPTION_ALGORITHM_KEY, "aes_rotating");
properties.put(EncryptionConfigParser.ENCRYPTION_KEYSTORE_PATH_KEY, getClass().getResource(
"/encryption_provider_test_keystore").toString());
properties.put(EncryptionConfigParser.ENCRYPTION_KEYSTORE_PASSWORD_KEY, "abcd");
StreamCodec c = EncryptionFactory.buildStreamCryptoProvider(properties);
Assert.assertNotNull(c);
byte[] toEncrypt = "Hello!".getBytes(StandardCharsets.UTF_8);
ByteArrayOutputStream cipherOut = new ByteArrayOutputStream();
OutputStream cipherStream = c.encodeOutputStream(cipherOut);
cipherStream.write(toEncrypt);
cipherStream.close();
Assert.assertTrue(cipherOut.size() > 0, "Expected to be able to write ciphertext!");
}
@Test
public void testCanBuildGPG() throws IOException {
Map<String, Object> encryptionProperties = new HashMap<>();
encryptionProperties.put(EncryptionConfigParser.ENCRYPTION_ALGORITHM_KEY, GPGCodec.TAG);
encryptionProperties.put(EncryptionConfigParser.ENCRYPTION_KEYSTORE_PATH_KEY, GPGFileEncryptor.class.getResource(
PUBLIC_KEY).toString());
encryptionProperties.put(EncryptionConfigParser.ENCRYPTION_KEY_NAME, String.valueOf(GPGFileEncryptorTest.KEY_ID));
testGPG(encryptionProperties);
}
@Test
public void testBuildGPGGoodCipher() throws IOException {
Map<String, Object> encryptionProperties = new HashMap<>();
encryptionProperties.put(EncryptionConfigParser.ENCRYPTION_ALGORITHM_KEY, GPGCodec.TAG);
encryptionProperties.put(EncryptionConfigParser.ENCRYPTION_KEYSTORE_PATH_KEY, GPGFileEncryptor.class.getResource(
PUBLIC_KEY).toString());
encryptionProperties.put(EncryptionConfigParser.ENCRYPTION_KEY_NAME, String.valueOf(GPGFileEncryptorTest.KEY_ID));
encryptionProperties.put(EncryptionConfigParser.ENCRYPTION_CIPHER_KEY, "CAST5");
testGPG(encryptionProperties);
}
@Test(expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp = ".*BadCipher.*")
public void testBuildGPGBadCipher() throws IOException {
Map<String, Object> encryptionProperties = new HashMap<>();
encryptionProperties.put(EncryptionConfigParser.ENCRYPTION_ALGORITHM_KEY, GPGCodec.TAG);
encryptionProperties.put(EncryptionConfigParser.ENCRYPTION_KEYSTORE_PATH_KEY, GPGFileEncryptor.class.getResource(
PUBLIC_KEY).toString());
encryptionProperties.put(EncryptionConfigParser.ENCRYPTION_KEY_NAME, String.valueOf(GPGFileEncryptorTest.KEY_ID));
encryptionProperties.put(EncryptionConfigParser.ENCRYPTION_CIPHER_KEY, "BadCipher");
testGPG(encryptionProperties);
}
private void testGPG(Map<String, Object> encryptionProperties) throws IOException {
StreamCodec encryptor = EncryptionFactory.buildStreamCryptoProvider(encryptionProperties);
Assert.assertNotNull(encryptor);
Map<String, Object> decryptionProperties = new HashMap<>();
decryptionProperties.put(EncryptionConfigParser.ENCRYPTION_ALGORITHM_KEY, GPGCodec.TAG);
decryptionProperties.put(EncryptionConfigParser.ENCRYPTION_KEYSTORE_PATH_KEY, GPGFileEncryptor.class.getResource(
PRIVATE_KEY).toString());
decryptionProperties.put(EncryptionConfigParser.ENCRYPTION_KEYSTORE_PASSWORD_KEY, GPGFileEncryptorTest.PASSPHRASE);
StreamCodec decryptor = EncryptionFactory.buildStreamCryptoProvider(decryptionProperties);
Assert.assertNotNull(decryptor);
ByteArrayOutputStream cipherOut = new ByteArrayOutputStream();
OutputStream cipherStream = encryptor.encodeOutputStream(cipherOut);
cipherStream.write(GPGFileEncryptorTest.EXPECTED_FILE_CONTENT_BYTES);
cipherStream.close();
byte[] encryptedBytes = cipherOut.toByteArray();
Assert.assertTrue(encryptedBytes.length > 0, "Expected to be able to write ciphertext!");
try (InputStream is = decryptor.decodeInputStream(new ByteArrayInputStream(encryptedBytes))) {
byte[] decryptedBytes = IOUtils.toByteArray(is);
Assert.assertNotEquals(GPGFileEncryptorTest.EXPECTED_FILE_CONTENT_BYTES, encryptedBytes);
Assert.assertEquals(GPGFileEncryptorTest.EXPECTED_FILE_CONTENT_BYTES, decryptedBytes);
}
}
}
| 3,573 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/test/java/org/apache/gobblin/converter/SerializedRecordToEncryptedSerializedRecordConverterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import java.util.Iterator;
import org.testng.Assert;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.Test;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.crypto.EncryptionConfigParser;
import org.apache.gobblin.metadata.types.Metadata;
import org.apache.gobblin.test.crypto.InsecureShiftCodec;
import org.apache.gobblin.type.RecordWithMetadata;
public class SerializedRecordToEncryptedSerializedRecordConverterTest {
private WorkUnitState workUnitState;
private SerializedRecordToEncryptedSerializedRecordConverter converter;
private RecordWithMetadata<byte[]> sampleRecord;
private byte[] shiftedValue;
private String insecureShiftTag;
private final String ENCRYPT_PREFIX = "converter.encrypt.";
@BeforeTest
public void setUp() {
workUnitState = new WorkUnitState();
converter = new SerializedRecordToEncryptedSerializedRecordConverter();
sampleRecord = new RecordWithMetadata<>(new byte[]{'a', 'b', 'c', 'd'}, new Metadata());
shiftedValue = new byte[]{'b', 'c', 'd', 'e'};
insecureShiftTag = InsecureShiftCodec.TAG;
}
@Test(expectedExceptions = IllegalStateException.class)
public void throwsIfMisconfigured()
throws DataConversionException {
converter.init(workUnitState);
converter.convertRecord("", sampleRecord, workUnitState);
}
@Test
public void worksWithFork()
throws DataConversionException {
workUnitState.setProp(ConfigurationKeys.FORK_BRANCH_ID_KEY, 2);
workUnitState.getJobState()
.setProp(ENCRYPT_PREFIX + EncryptionConfigParser.ENCRYPTION_ALGORITHM_KEY + ".2",
"insecure_shift");
converter.init(workUnitState);
Iterable<RecordWithMetadata<byte[]>> records = converter.convertRecord("", sampleRecord, workUnitState);
Iterator<RecordWithMetadata<byte[]>> recordIt = records.iterator();
Assert.assertTrue(recordIt.hasNext());
RecordWithMetadata<byte[]> record = recordIt.next();
Assert.assertFalse(recordIt.hasNext());
Assert.assertEquals(record.getMetadata().getGlobalMetadata().getTransferEncoding().get(0), insecureShiftTag);
Assert.assertEquals(record.getRecord(), shiftedValue);
}
@Test
public void worksNoFork()
throws DataConversionException {
workUnitState.getJobState()
.setProp(ENCRYPT_PREFIX + EncryptionConfigParser.ENCRYPTION_ALGORITHM_KEY,
"insecure_shift");
converter.init(workUnitState);
Iterable<RecordWithMetadata<byte[]>> records = converter.convertRecord("", sampleRecord, workUnitState);
Iterator<RecordWithMetadata<byte[]>> recordIt = records.iterator();
Assert.assertTrue(recordIt.hasNext());
RecordWithMetadata<byte[]> record = recordIt.next();
Assert.assertFalse(recordIt.hasNext());
Assert.assertEquals(record.getMetadata().getGlobalMetadata().getTransferEncoding().get(0), insecureShiftTag);
Assert.assertEquals(record.getRecord(), shiftedValue);
}
}
| 3,574 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/test/java/org/apache/gobblin/converter/EncryptedSerializedRecordToSerializedRecordConverterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import java.util.Iterator;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.crypto.EncryptionConfigParser;
import org.apache.gobblin.metadata.types.Metadata;
import org.apache.gobblin.test.crypto.InsecureShiftCodec;
import org.apache.gobblin.type.RecordWithMetadata;
import org.testng.Assert;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.Test;
public class EncryptedSerializedRecordToSerializedRecordConverterTest {
private WorkUnitState workUnitState;
private EncryptedSerializedRecordToSerializedRecordConverter converter;
private RecordWithMetadata<byte[]> sampleRecord;
private byte[] shiftedValue;
private String insecureShiftTag;
private final String DECRYPT_PREFIX = "converter.decrypt.";
@BeforeTest
public void setUp() {
workUnitState = new WorkUnitState();
converter = new EncryptedSerializedRecordToSerializedRecordConverter();
sampleRecord = new RecordWithMetadata<>(new byte[]{'b', 'c', 'd', 'e'}, new Metadata());
shiftedValue = new byte[]{'a', 'b', 'c', 'd'};
insecureShiftTag = InsecureShiftCodec.TAG;
}
@Test(expectedExceptions = IllegalStateException.class)
public void throwsIfMisconfigured()
throws DataConversionException {
converter.init(workUnitState);
converter.convertRecord("", sampleRecord, workUnitState);
}
@Test
public void worksWithFork()
throws DataConversionException {
workUnitState.setProp(ConfigurationKeys.FORK_BRANCH_ID_KEY, 2);
workUnitState.getJobState()
.setProp(DECRYPT_PREFIX + EncryptionConfigParser.ENCRYPTION_ALGORITHM_KEY + ".2",
"insecure_shift");
converter.init(workUnitState);
Iterable<RecordWithMetadata<byte[]>> records = converter.convertRecord("", sampleRecord, workUnitState);
Iterator<RecordWithMetadata<byte[]>> recordIterator = records.iterator();
Assert.assertTrue(recordIterator.hasNext());
RecordWithMetadata<byte[]> record = recordIterator.next();
Assert.assertFalse(recordIterator.hasNext());
Assert.assertEquals(record.getMetadata().getGlobalMetadata().getTransferEncoding().get(0), insecureShiftTag);
Assert.assertEquals(record.getRecord(), shiftedValue);
}
@Test
public void worksNoFork()
throws DataConversionException {
workUnitState.getJobState()
.setProp(DECRYPT_PREFIX + EncryptionConfigParser.ENCRYPTION_ALGORITHM_KEY,
"insecure_shift");
converter.init(workUnitState);
Iterable<RecordWithMetadata<byte[]>> records = converter.convertRecord("", sampleRecord, workUnitState);
Iterator<RecordWithMetadata<byte[]>> recordIterator = records.iterator();
Assert.assertTrue(recordIterator.hasNext());
RecordWithMetadata<byte[]> record = recordIterator.next();
Assert.assertFalse(recordIterator.hasNext());
Assert.assertEquals(record.getMetadata().getGlobalMetadata().getTransferEncoding().get(0), insecureShiftTag);
Assert.assertEquals(record.getRecord(), shiftedValue);
}
}
| 3,575 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/test/java/org/apache/gobblin/converter/AvroStringFieldEncryptorConverterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.generic.GenericArray;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.DatumReader;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.testng.collections.Lists;
import com.google.common.collect.Maps;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.test.TestUtils;
import org.apache.gobblin.test.crypto.InsecureShiftCodec;
public class AvroStringFieldEncryptorConverterTest {
@Test
public void testNestedConversion()
throws DataConversionException, IOException, SchemaConversionException {
AvroStringFieldEncryptorConverter converter = new AvroStringFieldEncryptorConverter();
WorkUnitState wuState = new WorkUnitState();
wuState.getJobState().setProp("converter.fieldsToEncrypt", "nestedRecords.*.fieldToEncrypt");
wuState.getJobState().setProp("converter.encrypt.algorithm", "insecure_shift");
converter.init(wuState);
GenericRecord inputRecord =
getRecordFromFile(getClass().getClassLoader().getResource("record_with_arrays.avro").getPath());
Schema inputSchema = inputRecord.getSchema();
Schema outputSchema = converter.convertSchema(inputSchema, wuState);
List<String> origValues = new ArrayList<>();
for (Object o : (List) inputRecord.get("nestedRecords")) {
GenericRecord r = (GenericRecord) o;
origValues.add(r.get("fieldToEncrypt").toString());
}
Iterable<GenericRecord> recordIt = converter.convertRecord(outputSchema, inputRecord, wuState);
GenericRecord record = recordIt.iterator().next();
Assert.assertEquals(outputSchema, inputSchema);
List<String> decryptedValues = new ArrayList<>();
for (Object o : (List) record.get("nestedRecords")) {
GenericRecord r = (GenericRecord) o;
String encryptedValue = r.get("fieldToEncrypt").toString();
InsecureShiftCodec codec = new InsecureShiftCodec(Maps.<String, Object>newHashMap());
InputStream in =
codec.decodeInputStream(new ByteArrayInputStream(encryptedValue.getBytes(StandardCharsets.UTF_8)));
byte[] decryptedValue = new byte[in.available()];
in.read(decryptedValue);
decryptedValues.add(new String(decryptedValue, StandardCharsets.UTF_8));
}
Assert.assertEquals(decryptedValues, origValues);
}
@Test
@SuppressWarnings("unchecked")
public void testEncryptionOfArray()
throws SchemaConversionException, DataConversionException, IOException {
AvroStringFieldEncryptorConverter converter = new AvroStringFieldEncryptorConverter();
WorkUnitState wuState = new WorkUnitState();
wuState.getJobState().setProp("converter.fieldsToEncrypt", "favorite_quotes");
wuState.getJobState().setProp("converter.encrypt.algorithm", "insecure_shift");
converter.init(wuState);
// The below error is due to invalid avro data. As per avro, the default value must have the same type as the first
// entry in the union. As the default value is null, type with "null" union must have "null" type first and then
// actual type. This is corrected in fieldPickInput.avsc file and fieldPickInput_arrays.avro
// Error: org.apache.avro.AvroTypeException: Invalid default for field favorite_quotes: null
// not a [{"type":"array","items":"string"},"null"]
// Correct data: "type": ["null", { "type": "array", "items": "string"}, "default": null]
GenericRecord inputRecord =
getRecordFromFile(getClass().getClassLoader().getResource("fieldPickInput_arrays.avro").getPath());
GenericArray origValues = (GenericArray) inputRecord.get("favorite_quotes");
for (int i = 0; i < origValues.size(); i++) {
origValues.set(i, origValues.get(i).toString());
}
Schema inputSchema = inputRecord.getSchema();
Schema outputSchema = converter.convertSchema(inputSchema, wuState);
Iterable<GenericRecord> recordIt = converter.convertRecord(outputSchema, inputRecord, wuState);
GenericRecord encryptedRecord = recordIt.iterator().next();
Assert.assertEquals(outputSchema, inputSchema);
GenericArray<String> encryptedVals = (GenericArray<String>) encryptedRecord.get("favorite_quotes");
List<String> decryptedValues = Lists.newArrayList();
for (String encryptedValue: encryptedVals) {
InsecureShiftCodec codec = new InsecureShiftCodec(Maps.<String, Object>newHashMap());
InputStream in =
codec.decodeInputStream(new ByteArrayInputStream(encryptedValue.getBytes(StandardCharsets.UTF_8)));
byte[] decryptedValue = new byte[in.available()];
in.read(decryptedValue);
decryptedValues.add(new String(decryptedValue, StandardCharsets.UTF_8));
}
Assert.assertEquals(decryptedValues, origValues);
}
private GenericArray<String> buildTestArray() {
Schema s = Schema.createArray(Schema.create(Schema.Type.STRING));
GenericArray<String> arr = new GenericData.Array<>(3, s);
arr.add("one");
arr.add("two");
arr.add("three");
return arr;
}
private GenericRecord getRecordFromFile(String path)
throws IOException {
DatumReader<GenericRecord> reader = new GenericDatumReader<>();
DataFileReader<GenericRecord> dataFileReader = new DataFileReader<GenericRecord>(new File(path), reader);
if (dataFileReader.hasNext()) {
return dataFileReader.next();
}
return null;
}
@Test
public void testConversion()
throws DataConversionException, IOException, SchemaConversionException {
AvroStringFieldEncryptorConverter converter = new AvroStringFieldEncryptorConverter();
WorkUnitState wuState = new WorkUnitState();
wuState.getJobState().setProp("converter.fieldsToEncrypt", "field1");
wuState.getJobState().setProp("converter.encrypt.algorithm", "insecure_shift");
converter.init(wuState);
GenericRecord inputRecord = TestUtils.generateRandomAvroRecord();
Schema inputSchema = inputRecord.getSchema();
Schema outputSchema = converter.convertSchema(inputSchema, wuState);
String fieldValue = (String) inputRecord.get("field1");
Iterable<GenericRecord> recordIt = converter.convertRecord(outputSchema, inputRecord, wuState);
GenericRecord record = recordIt.iterator().next();
Assert.assertEquals(outputSchema, inputSchema);
String encryptedValue = (String) record.get("field1");
InsecureShiftCodec codec = new InsecureShiftCodec(Maps.<String, Object>newHashMap());
InputStream in = codec.decodeInputStream(new ByteArrayInputStream(encryptedValue.getBytes(StandardCharsets.UTF_8)));
byte[] decryptedValue = new byte[in.available()];
in.read(decryptedValue);
Assert.assertEquals(new String(decryptedValue, StandardCharsets.UTF_8), fieldValue);
}
}
| 3,576 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/test/java/org/apache/gobblin/converter/AvroStringFieldDecryptorConverterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.DatumReader;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Maps;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.test.TestUtils;
import org.apache.gobblin.test.crypto.InsecureShiftCodec;
public class AvroStringFieldDecryptorConverterTest {
@Test
public void testConversion()
throws DataConversionException, IOException, SchemaConversionException {
AvroStringFieldDecryptorConverter converter = new AvroStringFieldDecryptorConverter();
WorkUnitState wuState = new WorkUnitState();
wuState.getJobState().setProp("converter.fieldsToDecrypt", "field1");
wuState.getJobState().setProp("converter.decrypt.AvroStringFieldDecryptorConverter.algorithm", "insecure_shift");
converter.init(wuState);
GenericRecord inputRecord = TestUtils.generateRandomAvroRecord();
Schema inputSchema = inputRecord.getSchema();
Schema outputSchema = converter.convertSchema(inputSchema, wuState);
String fieldValue = (String) inputRecord.get("field1");
Iterable<GenericRecord> recordIt = converter.convertRecord(outputSchema, inputRecord, wuState);
GenericRecord decryptedRecord = recordIt.iterator().next();
Assert.assertEquals(outputSchema, inputSchema);
String decryptedValue = (String) decryptedRecord.get("field1");
InsecureShiftCodec codec = new InsecureShiftCodec(Maps.<String, Object>newHashMap());
InputStream in = codec.decodeInputStream(new ByteArrayInputStream(fieldValue.getBytes(StandardCharsets.UTF_8)));
byte[] expectedDecryptedValue = new byte[in.available()];
in.read(expectedDecryptedValue);
Assert.assertEquals(new String(expectedDecryptedValue, StandardCharsets.UTF_8), decryptedValue);
}
@Test
@SuppressWarnings("unchecked")
public void testArrayDecryption()
throws DataConversionException, IOException, SchemaConversionException {
AvroStringFieldDecryptorConverter converter = new AvroStringFieldDecryptorConverter();
WorkUnitState wuState = new WorkUnitState();
wuState.getJobState().setProp("converter.fieldsToDecrypt", "array1");
wuState.getJobState().setProp("converter.decrypt.AvroStringFieldDecryptorConverter.algorithm", "insecure_shift");
converter.init(wuState);
GenericRecord inputRecord = generateRecordWithArrays();
Schema inputSchema = inputRecord.getSchema();
Schema outputSchema = converter.convertSchema(inputSchema, wuState);
GenericData.Array<String> fieldValue = (GenericData.Array<String>) inputRecord.get("array1");
Iterable<GenericRecord> recordIt = converter.convertRecord(outputSchema, inputRecord, wuState);
GenericRecord decryptedRecord = recordIt.iterator().next();
Assert.assertEquals(outputSchema, inputSchema);
GenericData.Array<String> decryptedValue = (GenericData.Array<String>) decryptedRecord.get("array1");
for (int i = 0; i < decryptedValue.size(); i++) {
assertDecryptedValuesEqual(decryptedValue.get(i), fieldValue.get(i));
}
}
private void assertDecryptedValuesEqual(String decryptedValue, String originalValue) throws IOException {
InsecureShiftCodec codec = new InsecureShiftCodec(Maps.<String, Object>newHashMap());
InputStream in = codec.decodeInputStream(new ByteArrayInputStream(originalValue.getBytes(StandardCharsets.UTF_8)));
byte[] expectedDecryptedValue = new byte[in.available()];
in.read(expectedDecryptedValue);
Assert.assertEquals(new String(expectedDecryptedValue, StandardCharsets.UTF_8), decryptedValue);
}
private GenericRecord getRecordFromFile(String path) throws IOException {
DatumReader<GenericRecord> reader = new GenericDatumReader<>();
DataFileReader<GenericRecord> dataFileReader = new DataFileReader<GenericRecord>(new File(path), reader);
while (dataFileReader.hasNext()) {
return dataFileReader.next();
}
return null;
}
private GenericRecord generateRecordWithArrays() {
ArrayList<Schema.Field> fields = new ArrayList<Schema.Field>();
String fieldName = "array1";
Schema fieldSchema = Schema.createArray(Schema.create(Schema.Type.STRING));
String docString = "doc";
fields.add(new Schema.Field(fieldName, fieldSchema, docString, null));
Schema schema = Schema.createRecord("name", docString, "test", false);
schema.setFields(fields);
GenericData.Record record = new GenericData.Record(schema);
GenericData.Array<String> arr = new GenericData.Array<>(2, fieldSchema);
arr.add("foobar");
arr.add("foobaz");
record.put("array1", arr);
return record;
}
}
| 3,577 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/main/java/org/apache/gobblin/crypto/JCEKSKeystoreCredentialStoreCli.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.crypto;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.nio.charset.StandardCharsets;
import java.security.KeyStoreException;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import javax.xml.bind.DatatypeConverter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.runtime.cli.CliApplication;
@Alias(value = "keystore", description = "Examine JCE Keystore files")
@Slf4j
public class JCEKSKeystoreCredentialStoreCli implements CliApplication {
private static final Map<String, Action> actionMap = ImmutableMap
.of("generate_keys", new GenerateKeyAction(), "list_keys", new ListKeysAction(), "help", new HelpAction(),
"export", new ExportKeyAction());
@Override
public void run(String[] args) {
if (args.length < 2) {
System.out.println("Must specify an action!");
new HelpAction().run(args);
return;
}
String actionStr = args[1];
Action action = actionMap.get(actionStr);
if (action == null) {
System.out.println("Action " + actionStr + " unknown!");
new HelpAction().run(args);
return;
}
action.run(Arrays.copyOfRange(args, 1, args.length));
}
public static JCEKSKeystoreCredentialStore loadKeystore(String path)
throws IOException {
char[] password = getPasswordFromConsole();
return new JCEKSKeystoreCredentialStore(path, String.valueOf(password));
}
/**
* Abstract class for any action of this tool
*/
static abstract class Action {
/**
* Return any additional Options for this action. The framework will always add a 'help' option.
*/
protected abstract List<Option> getExtraOptions();
/**
* Execute the action
* @param args
*/
abstract void run(String[] args);
protected static final Option HELP = Option.builder("h").longOpt("help").desc("Print usage").build();
protected void printUsage() {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("Options", getOptions());
}
/**
* Helper function to parse CLI arguments
*/
protected CommandLine parseOptions(String[] args)
throws ParseException {
CommandLineParser parser = new DefaultParser();
return parser.parse(getOptions(), args);
}
private Options getOptions() {
List<Option> options = getExtraOptions();
Options optionList = new Options();
optionList.addOption(HELP);
for (Option o : options) {
optionList.addOption(o);
}
return optionList;
}
}
static class HelpAction extends Action {
@Override
protected List<Option> getExtraOptions() {
return Collections.emptyList();
}
@Override
void run(String[] args) {
System.out.println("You can run <actionName> -h to see valid flags for a given action");
for (String validAction : actionMap.keySet()) {
System.out.println(validAction);
}
}
}
/**
* Check how many keys are present in an existing keystore.
*/
static class ListKeysAction extends Action {
private static final Option KEYSTORE_LOCATION =
Option.builder("o").longOpt("out").hasArg().desc("Keystore location").build();
private static final List<Option> options = ImmutableList.of(KEYSTORE_LOCATION);
@Override
protected List<Option> getExtraOptions() {
return options;
}
@Override
void run(String[] args) {
try {
CommandLine cli = parseOptions(args);
if (!paramsAreValid(cli)) {
return;
}
String keystoreLocation = cli.getOptionValue(KEYSTORE_LOCATION.getOpt());
JCEKSKeystoreCredentialStore credentialStore = loadKeystore(keystoreLocation);
Map<String, byte[]> keys = credentialStore.getAllEncodedKeys();
System.out.println("Keystore " + keystoreLocation + " has " + String.valueOf(keys.size()) + " keys.");
} catch (IOException | ParseException e) {
throw new RuntimeException(e);
}
}
private boolean paramsAreValid(CommandLine cli) {
if (cli.hasOption(HELP.getOpt())) {
printUsage();
return false;
}
if (!cli.hasOption(KEYSTORE_LOCATION.getOpt())) {
System.out.println("Must specify keystore location!");
printUsage();
return false;
}
return true;
}
}
/**
* Create a new keystore file with _N_ serialized keys. The password will be read from the console.
*/
static class GenerateKeyAction extends Action {
private static final Option KEYSTORE_LOCATION =
Option.builder("o").longOpt("out").hasArg().desc("Keystore location").build();
private static final Option NUM_KEYS =
Option.builder("n").longOpt("numKeys").hasArg().desc("# of keys to generate").build();
private static final List<Option> OPTIONS = ImmutableList.of(KEYSTORE_LOCATION, NUM_KEYS);
@Override
protected List<Option> getExtraOptions() {
return OPTIONS;
}
@Override
void run(String[] args) {
try {
CommandLine cli = parseOptions(args);
if (!paramsAreValid(cli)) {
return;
}
int numKeys = Integer.parseInt(cli.getOptionValue(NUM_KEYS.getOpt(), "20"));
char[] password = getPasswordFromConsole();
String keystoreLocation = cli.getOptionValue(KEYSTORE_LOCATION.getOpt());
JCEKSKeystoreCredentialStore credentialStore =
new JCEKSKeystoreCredentialStore(cli.getOptionValue(KEYSTORE_LOCATION.getOpt()), String.valueOf(password),
EnumSet.of(JCEKSKeystoreCredentialStore.CreationOptions.CREATE_IF_MISSING));
credentialStore.generateAesKeys(numKeys, 0);
System.out.println("Generated " + String.valueOf(numKeys) + " keys at " + keystoreLocation);
} catch (IOException | KeyStoreException e) {
throw new RuntimeException(e);
} catch (ParseException e) {
System.out.println("Unknown command line params " + e.toString());
printUsage();
}
}
private boolean paramsAreValid(CommandLine cli) {
if (cli.hasOption(HELP.getOpt())) {
printUsage();
return false;
}
if (!cli.hasOption(KEYSTORE_LOCATION.getOpt())) {
System.out.println("Must specify keystore location!");
printUsage();
return false;
}
return true;
}
}
public static char[] getPasswordFromConsole() {
System.out.print("Please enter the keystore password: ");
return System.console().readPassword();
}
static class ExportKeyAction extends Action {
private static final Option KEYSTORE_LOCATION =
Option.builder("i").longOpt("in").hasArg().required().desc("Keystore location").build();
private static final Option OUTPUT_LOCATION =
Option.builder("o").longOpt("out").hasArg().required().desc("Output location").build();
@Override
protected List<Option> getExtraOptions() {
return ImmutableList.of(KEYSTORE_LOCATION, OUTPUT_LOCATION);
}
@Override
void run(String[] args) {
try {
CommandLine cli = parseOptions(args);
JCEKSKeystoreCredentialStore credStore = loadKeystore(cli.getOptionValue(KEYSTORE_LOCATION.getOpt()));
Map<Integer, String> base64Keys = new HashMap<>();
Map<String, byte[]> keys = credStore.getAllEncodedKeys();
for (Map.Entry<String, byte[]> e: keys.entrySet()) {
base64Keys.put(Integer.valueOf(e.getKey()), DatatypeConverter.printBase64Binary(e.getValue()));
}
OutputStreamWriter fOs = new OutputStreamWriter(
new FileOutputStream(new File(cli.getOptionValue(OUTPUT_LOCATION.getOpt()))),
StandardCharsets.UTF_8);
Gson gson = new GsonBuilder().disableHtmlEscaping().create();
fOs.write(gson.toJson(base64Keys));
fOs.flush();
fOs.close();
} catch (ParseException e) {
printUsage();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
}
| 3,578 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/main/java/org/apache/gobblin/crypto/GobblinEncryptionProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.crypto;
import java.io.IOException;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableSet;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.codec.StreamCodec;
import lombok.extern.slf4j.Slf4j;
/**
* Helper and factory methods for encryption algorithms.
*
* Note: Interface will likely change to support registration of algorithms
*/
@Slf4j
@Alpha
public class GobblinEncryptionProvider implements CredentialStoreProvider, EncryptionProvider {
private final static Set<String> SUPPORTED_STREAMING_ALGORITHMS =
ImmutableSet.of("aes_rotating", EncryptionConfigParser.ENCRYPTION_TYPE_ANY);
/**
* Return a set of streaming algorithms (StreamEncoders) that this factory knows how to build
* @return Set of streaming algorithms the factory knows how to build
*/
public static Set<String> supportedStreamingAlgorithms() {
return SUPPORTED_STREAMING_ALGORITHMS;
}
/**
* Return a StreamEncryptor for the given parameters. The algorithm type to use will be extracted
* from the parameters object.
* @param parameters Configured parameters for algorithm.
* @return A StreamCodec for the requested algorithm
* @throws IllegalArgumentException If the given algorithm/parameter pair cannot be built
*/
public StreamCodec buildStreamEncryptor(Map<String, Object> parameters) {
String encryptionType = EncryptionConfigParser.getEncryptionType(parameters);
if (encryptionType == null) {
throw new IllegalArgumentException("Encryption type not present in parameters!");
}
return buildStreamCryptoProvider(encryptionType, parameters);
}
/**
* Return a StreamEncryptor for the given algorithm and with appropriate parameters.
* @param algorithm Algorithm to build
* @param parameters Parameters for algorithm
* @return A StreamEncoder for that algorithm
* @throws IllegalArgumentException If the given algorithm/parameter pair cannot be built
*/
public StreamCodec buildStreamCryptoProvider(String algorithm, Map<String, Object> parameters) {
switch (algorithm) {
case EncryptionConfigParser.ENCRYPTION_TYPE_ANY:
case "aes_rotating":
CredentialStore cs = CredentialStoreFactory.buildCredentialStore(parameters);
if (cs == null) {
throw new IllegalArgumentException("Failed to build credential store; can't instantiate AES");
}
return new RotatingAESCodec(cs);
case GPGCodec.TAG:
String password = EncryptionConfigParser.getKeystorePassword(parameters);
String keystorePathStr = EncryptionConfigParser.getKeystorePath(parameters);
String keyName = EncryptionConfigParser.getKeyName(parameters);
String cipherName = EncryptionConfigParser.getCipher(parameters);
// if not using a keystore then use password based encryption
if (keystorePathStr == null) {
Preconditions.checkNotNull(password, "Must specify an en/decryption password for GPGCodec!");
return new GPGCodec(password, cipherName);
}
// if a key name is not present then use a key id of 0. A GPGCodec may be configured without a key name
// when used only for decryption where the key name is retrieved from the encrypted file
return new GPGCodec(new Path(keystorePathStr), password,
keyName == null ? 0 : Long.parseUnsignedLong(keyName, 16), cipherName);
default:
log.debug("Do not support encryption type {}", algorithm);
return null;
}
}
/**
* Build a credential store with the given parameters.
*/
public CredentialStore buildCredentialStore(Map<String, Object> parameters) {
String ks_type = EncryptionConfigParser.getKeystoreType(parameters);
String ks_path = EncryptionConfigParser.getKeystorePath(parameters);
String ks_password = EncryptionConfigParser.getKeystorePassword(parameters);
try {
switch (ks_type) {
// TODO this is yet another example of building a broad type (CredentialStore) based on a human-readable name
// (json) with a bag of parameters. Need to pull out into its own pattern!
case JCEKSKeystoreCredentialStore.TAG:
return new JCEKSKeystoreCredentialStore(ks_path, ks_password);
case JsonCredentialStore.TAG:
return new JsonCredentialStore(ks_path, buildKeyToStringCodec(parameters));
default:
return null;
}
} catch (IOException e) {
log.error("Error building credential store, returning null", e);
return null;
}
}
/**
* Build a KeyToStringCodec based on parameters. To reduce complexity we don't build these
* through a ServiceLocator since hopefully the # of key encodings is small.
* @param parameters Config parameters used to build the codec
*/
private KeyToStringCodec buildKeyToStringCodec(Map<String, Object> parameters) {
String encodingName = EncryptionConfigParser.getKeystoreEncoding(parameters);
switch (encodingName) {
case HexKeyToStringCodec.TAG:
return new HexKeyToStringCodec();
case Base64KeyToStringCodec.TAG:
return new Base64KeyToStringCodec();
default:
throw new IllegalArgumentException("Don't know how to build key to string codec for type " + encodingName);
}
}
public GobblinEncryptionProvider() {
// for ServiceLocator
}
}
| 3,579 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/main/java/org/apache/gobblin/converter/StringFieldDecryptorConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.apache.commons.io.IOUtils;
import org.apache.gobblin.codec.StreamCodec;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.crypto.EncryptionConfigParser;
import org.apache.gobblin.crypto.EncryptionFactory;
import org.apache.gobblin.recordaccess.RecordAccessor;
import com.google.common.base.Splitter;
/**
* Converter that can decrypt a string field in place. (Note: that means the incoming
* record will be mutated!). Assumes that the input field is of string
* type and that the decryption algorithm chosen will output a UTF-8 encoded byte array.
*/
public abstract class StringFieldDecryptorConverter<SCHEMA, DATA> extends Converter<SCHEMA, SCHEMA, DATA, DATA> {
public static final String FIELDS_TO_DECRYPT_CONFIG_NAME = "converter.fieldsToDecrypt";
private StreamCodec decryptor;
private List<String> fieldsToDecrypt;
@Override
public Converter<SCHEMA, SCHEMA, DATA, DATA> init(WorkUnitState workUnit) {
super.init(workUnit);
Map<String, Object> config = EncryptionConfigParser
.getConfigForBranch(EncryptionConfigParser.EntityType.CONVERTER_DECRYPT, getClass().getSimpleName(), workUnit);
decryptor = EncryptionFactory.buildStreamCryptoProvider(config);
String fieldsToDecryptConfig = workUnit.getProp(FIELDS_TO_DECRYPT_CONFIG_NAME, null);
if (fieldsToDecryptConfig == null) {
throw new IllegalArgumentException("Must fill in the " + FIELDS_TO_DECRYPT_CONFIG_NAME + " config option!");
}
fieldsToDecrypt = Splitter.on(',').splitToList(fieldsToDecryptConfig);
return this;
}
@Override
public SCHEMA convertSchema(SCHEMA inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
return inputSchema;
}
@Override
public Iterable<DATA> convertRecord(SCHEMA outputSchema, DATA inputRecord, WorkUnitState workUnit)
throws DataConversionException {
RecordAccessor accessor = getRecordAccessor(inputRecord);
for (String field : fieldsToDecrypt) {
Map<String, Object> stringsToDecrypt = accessor.getMultiGeneric(field);
try {
for (Map.Entry<String, Object> entry : stringsToDecrypt.entrySet()) {
if (entry.getValue() instanceof String) {
String s = decryptString((String) entry.getValue());
accessor.set(entry.getKey(), s);
} else if (entry.getValue() instanceof List) {
List<String> decryptedValues = new ArrayList<>();
for (Object val : (List)entry.getValue()) {
if (!(val instanceof String)) {
throw new IllegalArgumentException("Expected List of Strings, but encountered a value of type "
+ val.getClass().getCanonicalName());
}
decryptedValues.add(decryptString((String)val));
}
accessor.setStringArray(entry.getKey(), decryptedValues);
} else {
throw new IllegalArgumentException(
"Expected field to be of type String or List<String>, was " + entry.getValue().getClass()
.getCanonicalName());
}
}
} catch (IOException | IllegalArgumentException | IllegalStateException e) {
throw new DataConversionException("Error while encrypting field " + field + ": " + e.getMessage(), e);
}
}
return Collections.singleton(inputRecord);
}
protected List<String> getFieldsToDecrypt() {
return fieldsToDecrypt;
}
protected String decryptString(String val)
throws IOException {
byte[] encryptedBytes = val.getBytes(StandardCharsets.UTF_8);
ByteArrayInputStream inStream = new ByteArrayInputStream(encryptedBytes);
try (InputStream cipherStream = decryptor.decodeInputStream(inStream);
ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) {
IOUtils.copy(cipherStream, outputStream);
byte[] decryptedBytes = outputStream.toByteArray();
return new String(decryptedBytes, StandardCharsets.UTF_8);
}
}
protected abstract RecordAccessor getRecordAccessor(DATA record);
}
| 3,580 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/main/java/org/apache/gobblin/converter/AvroStringFieldEncryptorConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.gobblin.recordaccess.AvroGenericRecordAccessor;
import org.apache.gobblin.recordaccess.RecordAccessor;
/**
* StringFieldEncryptor that works on Avro GenericRecords.
*/
public class AvroStringFieldEncryptorConverter extends StringFieldEncryptorConverter<Schema, GenericRecord> {
@Override
protected RecordAccessor getRecordAccessor(GenericRecord record) {
return new AvroGenericRecordAccessor(record);
}
}
| 3,581 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/main/java/org/apache/gobblin/converter/AvroStringFieldDecryptorConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.gobblin.recordaccess.AvroGenericRecordAccessor;
import org.apache.gobblin.recordaccess.RecordAccessor;
/**
* StringFieldDecryptor that works on Avro GenericRecords.
*/
public class AvroStringFieldDecryptorConverter extends StringFieldDecryptorConverter<Schema, GenericRecord> {
@Override
protected RecordAccessor getRecordAccessor(GenericRecord record) {
return new AvroGenericRecordAccessor(record);
}
}
| 3,582 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/main/java/org/apache/gobblin/converter/StringFieldEncryptorConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import com.google.common.base.Splitter;
import org.apache.gobblin.codec.StreamCodec;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.crypto.EncryptionConfigParser;
import org.apache.gobblin.crypto.EncryptionFactory;
import org.apache.gobblin.recordaccess.RecordAccessor;
/**
* Converter that can encrypt a string field in place. Assumes that the encryption algorithm chosen will output
* a UTF-8 encoded byte array.
*/
public abstract class StringFieldEncryptorConverter<SCHEMA, DATA> extends Converter<SCHEMA, SCHEMA, DATA, DATA> {
public static final String FIELDS_TO_ENCRYPT_CONFIG_NAME = "converter.fieldsToEncrypt";
private StreamCodec encryptor;
private List<String> fieldsToEncrypt;
@Override
public Converter<SCHEMA, SCHEMA, DATA, DATA> init(WorkUnitState workUnit) {
super.init(workUnit);
Map<String, Object> config = EncryptionConfigParser
.getConfigForBranch(EncryptionConfigParser.EntityType.CONVERTER_ENCRYPT, getClass().getSimpleName(), workUnit);
encryptor = EncryptionFactory.buildStreamCryptoProvider(config);
String fieldsToEncryptConfig = workUnit.getProp(FIELDS_TO_ENCRYPT_CONFIG_NAME, null);
if (fieldsToEncryptConfig == null) {
throw new IllegalArgumentException("Must fill in the " + FIELDS_TO_ENCRYPT_CONFIG_NAME + " config option!");
}
fieldsToEncrypt = Splitter.on(',').splitToList(fieldsToEncryptConfig);
return this;
}
@Override
public SCHEMA convertSchema(SCHEMA inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
return inputSchema;
}
@Override
public Iterable<DATA> convertRecord(SCHEMA outputSchema, DATA inputRecord, WorkUnitState workUnit)
throws DataConversionException {
RecordAccessor accessor = getRecordAccessor(inputRecord);
for (String field : fieldsToEncrypt) {
Map<String, Object> stringsToEncrypt = accessor.getMultiGeneric(field);
for (Map.Entry<String, Object> entry : stringsToEncrypt.entrySet()) {
try {
if (entry.getValue() instanceof String) {
accessor.set(entry.getKey(), encryptString((String) entry.getValue()));
} else if (entry.getValue() instanceof List) {
List<String> encryptedVals = new ArrayList<>();
for (Object val: (List)entry.getValue()) {
if (!(val instanceof String)) {
throw new IllegalArgumentException("Unexpected type " + val.getClass().getCanonicalName() +
" while encrypting field " + field);
}
encryptedVals.add(encryptString((String)val));
}
accessor.setStringArray(entry.getKey(), encryptedVals);
}
} catch (IOException | IllegalArgumentException | IllegalStateException e) {
throw new DataConversionException("Error while encrypting field " + field + ": " + e.getMessage(), e);
}
}
}
return Collections.singleton(inputRecord);
}
private String encryptString(String val)
throws IOException {
byte[] bytes = val.getBytes(StandardCharsets.UTF_8);
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
OutputStream cipherStream = encryptor.encodeOutputStream(outputStream);
cipherStream.write(bytes);
cipherStream.flush();
cipherStream.close();
return new String(outputStream.toByteArray(), StandardCharsets.UTF_8);
}
protected List<String> getFieldsToEncrypt() {
return fieldsToEncrypt;
}
protected abstract RecordAccessor getRecordAccessor(DATA record);
}
| 3,583 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/main/java/org/apache/gobblin/converter/EncryptedSerializedRecordToSerializedRecordConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import java.util.Map;
import org.apache.gobblin.codec.StreamCodec;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.crypto.EncryptionConfigParser;
import org.apache.gobblin.crypto.EncryptionFactory;
/**
* Specific implementation of {@link EncryptedSerializedRecordToSerializedRecordConverterBase} that uses Gobblin's
* {@link EncryptionFactory} to build the proper decryption codec based on config.
*/
public class EncryptedSerializedRecordToSerializedRecordConverter extends EncryptedSerializedRecordToSerializedRecordConverterBase {
@Override
protected StreamCodec buildDecryptor(WorkUnitState config) {
Map<String, Object> decryptionConfig =
EncryptionConfigParser.getConfigForBranch(EncryptionConfigParser.EntityType.CONVERTER_DECRYPT,
getClass().getSimpleName(), config);
if (decryptionConfig == null) {
throw new IllegalStateException("No decryption config specified in job - can't decrypt!");
}
return EncryptionFactory.buildStreamCryptoProvider(decryptionConfig);
}
}
| 3,584 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-crypto-provider/src/main/java/org/apache/gobblin/converter/SerializedRecordToEncryptedSerializedRecordConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import java.util.Map;
import org.apache.gobblin.codec.StreamCodec;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.crypto.EncryptionConfigParser;
import org.apache.gobblin.crypto.EncryptionFactory;
/**
* Specific implementation of {@link SerializedRecordToEncryptedSerializedRecordConverterBase} that uses Gobblin's
* {@link EncryptionFactory} to build the proper encryption codec based on config.
*/
public class SerializedRecordToEncryptedSerializedRecordConverter extends SerializedRecordToEncryptedSerializedRecordConverterBase {
@Override
protected StreamCodec buildEncryptor(WorkUnitState config) {
Map<String, Object> encryptionConfig =
EncryptionConfigParser.getConfigForBranch(EncryptionConfigParser.EntityType.CONVERTER_ENCRYPT, getClass().getSimpleName(), config);
if (encryptionConfig == null) {
throw new IllegalStateException("No encryption config specified in job - can't encrypt!");
}
return EncryptionFactory.buildStreamCryptoProvider(encryptionConfig);
}
}
| 3,585 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-codecs/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-codecs/src/main/java/org/apache/gobblin/codec/Base64Codec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.codec;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.lang.reflect.Method;
import org.apache.commons.codec.binary.Base64InputStream;
import org.apache.commons.codec.binary.Base64OutputStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A class that can encode and decrypt Base64 streams.
*
* This class will delegate to the Java 8+ java.util.Base64 algorithm if it can be found;
* otherwise it relies on Apache Common Codec's Base64OutputStream. The Java 8 classes
* are preferred because they are noticeably faster in benchmarking.
*/
public class Base64Codec implements StreamCodec {
private static final Logger log = LoggerFactory.getLogger(Base64Codec.class);
private static Method java8GetEncoder;
private static Method java8WrapStreamEncode;
private static Method java8GetDecoder;
private static Method java8WrapStreamDecode;
private final static boolean foundJava8;
private static boolean forceApacheBase64 = false;
@Override
public OutputStream encodeOutputStream(OutputStream origStream)
throws IOException {
try {
if (canUseJava8()) {
Object encoder = java8GetEncoder.invoke(null);
return (OutputStream) java8WrapStreamEncode.invoke(encoder, origStream);
} else {
return encodeOutputStreamWithApache(origStream);
}
} catch (ReflectiveOperationException e) {
log.warn("Error invoking java8 methods, falling back to Apache", e);
return encodeOutputStreamWithApache(origStream);
}
}
@Override
public InputStream decodeInputStream(InputStream origStream)
throws IOException {
try {
if (canUseJava8()) {
Object decoder = java8GetDecoder.invoke(null);
return (InputStream) java8WrapStreamDecode.invoke(decoder, origStream);
} else {
return decodeInputStreamWithApache(origStream);
}
} catch (ReflectiveOperationException e) {
log.warn("Error invoking java8 methods, falling back to Apache", e);
return decodeInputStreamWithApache(origStream);
}
}
static {
boolean base64Found = false;
try {
Class.forName("java.util.Base64");
java8GetEncoder = getMethod("java.util.Base64", "getEncoder");
java8WrapStreamEncode = getMethod("java.util.Base64$Encoder", "wrap", OutputStream.class);
java8GetDecoder = getMethod("java.util.Base64", "getDecoder");
java8WrapStreamDecode = getMethod("java.util.Base64$Decoder", "wrap", InputStream.class);
base64Found = true;
} catch (ClassNotFoundException | NoSuchMethodException e) {
log.info("Couldn't find java.util.Base64 or methods, falling back to Apache Commons", e);
base64Found = false;
} finally {
foundJava8 = base64Found;
}
}
private static Method getMethod(String className, String methodName, Class<?>... parameterTypes)
throws ClassNotFoundException, NoSuchMethodException {
Class<?> clazz = Class.forName(className);
return clazz.getMethod(methodName, parameterTypes);
}
private OutputStream encodeOutputStreamWithApache(OutputStream origStream) {
return new Base64OutputStream(origStream, true, 0, null);
}
private InputStream decodeInputStreamWithApache(InputStream origStream) {
return new Base64InputStream(origStream);
}
// Force use of the Apache Base64 codec -- used only for benchmarking
static void forceApacheBase64() {
forceApacheBase64 = true;
}
private boolean canUseJava8() {
return !forceApacheBase64 && foundJava8;
}
@Override
public String getTag() {
return "base64";
}
}
| 3,586 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-codecs/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-codecs/src/main/java/org/apache/gobblin/codec/GzipCodec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.codec;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
/**
* Implement GZIP compression and decompression.
*/
public class GzipCodec implements StreamCodec {
public static final String TAG = "gzip";
@Override
public OutputStream encodeOutputStream(OutputStream origStream)
throws IOException {
return new GZIPOutputStream(origStream);
}
@Override
public InputStream decodeInputStream(InputStream origStream)
throws IOException {
return new GZIPInputStream(origStream);
}
@Override
public String getTag() {
return TAG;
}
}
| 3,587 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-troubleshooter/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-troubleshooter/src/test/java/org/apache/gobblin/troubleshooter/AutomaticTroubleshooterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.troubleshooter;
import java.util.Properties;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.testng.annotations.Test;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.GobblinEventBuilder;
import org.apache.gobblin.runtime.troubleshooter.AutomaticTroubleshooter;
import org.apache.gobblin.runtime.troubleshooter.AutomaticTroubleshooterFactory;
import org.apache.gobblin.util.ConfigUtils;
import static org.mockito.Mockito.*;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertTrue;
public class AutomaticTroubleshooterTest {
private final static Logger log = LogManager.getLogger(AutoTroubleshooterLogAppenderTest.class);
@Test
public void canCollectAndRefineIssues()
throws Exception {
Properties properties = new Properties();
AutomaticTroubleshooter troubleshooter =
AutomaticTroubleshooterFactory.createForJob(ConfigUtils.propertiesToConfig(properties));
try {
troubleshooter.start();
log.warn("Test warning");
troubleshooter.refineIssues();
troubleshooter.logIssueSummary();
String summaryMessage = troubleshooter.getIssueSummaryMessage();
assertTrue(summaryMessage.contains("Test warning"));
String detailedMessage = troubleshooter.getIssueDetailsMessage();
assertTrue(detailedMessage.contains("Test warning"));
EventSubmitter eventSubmitter = mock(EventSubmitter.class);
troubleshooter.reportJobIssuesAsEvents(eventSubmitter);
assertEquals(1, troubleshooter.getIssueRepository().getAll().size());
verify(eventSubmitter, times(1)).submit((GobblinEventBuilder) any());
} finally {
troubleshooter.stop();
}
}
@Test
public void canDisable()
throws Exception {
Properties properties = new Properties();
properties.put(ConfigurationKeys.TROUBLESHOOTER_DISABLED, "true");
AutomaticTroubleshooter troubleshooter =
AutomaticTroubleshooterFactory.createForJob(ConfigUtils.propertiesToConfig(properties));
try {
troubleshooter.start();
log.warn("Test warning");
troubleshooter.refineIssues();
troubleshooter.logIssueSummary();
EventSubmitter eventSubmitter = mock(EventSubmitter.class);
troubleshooter.reportJobIssuesAsEvents(eventSubmitter);
assertEquals(0, troubleshooter.getIssueRepository().getAll().size());
verify(eventSubmitter, never()).submit((GobblinEventBuilder) any());
} finally {
troubleshooter.stop();
}
}
@Test
public void canDisableEventReporter()
throws Exception {
Properties properties = new Properties();
properties.put(ConfigurationKeys.TROUBLESHOOTER_DISABLED, "false");
properties.put(ConfigurationKeys.TROUBLESHOOTER_DISABLE_EVENT_REPORTING, "true");
AutomaticTroubleshooter troubleshooter =
AutomaticTroubleshooterFactory.createForJob(ConfigUtils.propertiesToConfig(properties));
try {
troubleshooter.start();
log.warn("Test warning");
troubleshooter.refineIssues();
troubleshooter.logIssueSummary();
EventSubmitter eventSubmitter = mock(EventSubmitter.class);
troubleshooter.reportJobIssuesAsEvents(eventSubmitter);
assertEquals(1, troubleshooter.getIssueRepository().getAll().size());
verify(eventSubmitter, never()).submit((GobblinEventBuilder) any());
} finally {
troubleshooter.stop();
}
}
}
| 3,588 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-troubleshooter/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-troubleshooter/src/test/java/org/apache/gobblin/troubleshooter/AutoTroubleshooterLogAppenderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.troubleshooter;
import java.io.IOException;
import java.time.ZonedDateTime;
import java.time.temporal.ChronoUnit;
import java.util.List;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.apache.log4j.spi.LoggingEvent;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import org.apache.gobblin.runtime.ThrowableWithErrorCode;
import org.apache.gobblin.runtime.troubleshooter.InMemoryIssueRepository;
import org.apache.gobblin.runtime.troubleshooter.Issue;
import org.apache.gobblin.runtime.troubleshooter.IssueRepository;
import org.apache.gobblin.runtime.troubleshooter.IssueSeverity;
import static org.testng.Assert.assertEquals;
public class AutoTroubleshooterLogAppenderTest {
private final static Logger log = LogManager.getLogger(AutoTroubleshooterLogAppenderTest.class);
IssueRepository issueRepository;
AutoTroubleshooterLogAppender appender;
@BeforeMethod
public void setUp() {
issueRepository = new InMemoryIssueRepository(100);
appender = new AutoTroubleshooterLogAppender(issueRepository);
}
@Test
public void canLogWarning()
throws Exception {
appender.append(new LoggingEvent(log.getName(), log, System.currentTimeMillis(), Level.WARN, "test", null));
Issue issue = issueRepository.getAll().get(0);
Assert.assertEquals(issue.getSeverity(), IssueSeverity.WARN);
Assert.assertEquals(issue.getSummary(), "test");
Assert.assertEquals(issue.getSourceClass(), getClass().getName());
Assert.assertTrue(issue.getTime().isAfter(ZonedDateTime.now().minus(1, ChronoUnit.MINUTES)));
Assert.assertTrue(issue.getTime().isBefore(ZonedDateTime.now().plus(1, ChronoUnit.MINUTES)));
Assert.assertTrue(issue.getCode().length() > 1);
assertEquals(appender.getProcessedEventCount(), 1);
}
@Test
public void canLogException()
throws Exception {
Exception exception;
try {
// Throwing exception to get a real stack trace in it
throw new IOException("test exception");
} catch (Exception e) {
exception = e;
}
appender.append(
new LoggingEvent(log.getName(), log, System.currentTimeMillis(), Level.ERROR, "test message", exception));
Issue issue = issueRepository.getAll().get(0);
Assert.assertEquals(issue.getSeverity(), IssueSeverity.ERROR);
Assert.assertTrue(issue.getSummary().contains("test message"));
Assert.assertTrue(issue.getSummary().contains("test exception"));
Assert.assertTrue(issue.getCode().length() > 1);
Assert.assertTrue(issue.getDetails().contains("IOException"));
}
@Test
public void willGetSameErrorCodesForSameStackTraces()
throws Exception {
for (int i = 0; i < 5; i++) {
Exception exception;
try {
// Throwing exception to get a real stack trace in it
// Messages are intentionally different in every loop. We are checking that as all exceptions with
// same stack trace will get the same event code
try {
throw new InvalidOperationException("test inner exception " + i);
} catch (Exception inner) {
throw new IOException("test outer exception " + i, inner);
}
} catch (Exception e) {
exception = e;
}
appender.append(
new LoggingEvent(log.getName(), log, System.currentTimeMillis(), Level.ERROR, "test message", exception));
}
List<Issue> issues = issueRepository.getAll();
Assert.assertEquals(issues.size(), 1); // all issues should have the same error code and get deduplicated
}
@Test
public void canLogExceptionWithSpecificErrorCode()
throws Exception {
Exception exception;
try {
throw new TestException("test exception", "TestCode");
} catch (Exception e) {
exception = e;
}
appender.append(
new LoggingEvent(log.getName(), log, System.currentTimeMillis(), Level.ERROR, "test message", exception));
Issue issue = issueRepository.getAll().get(0);
Assert.assertEquals(issue.getSeverity(), IssueSeverity.ERROR);
Assert.assertEquals(issue.getCode(), "TestCode");
}
private static class TestException extends Exception implements ThrowableWithErrorCode {
String errorCode;
public TestException(String message, String errorCode) {
super(message);
this.errorCode = errorCode;
}
@Override
public String getErrorCode() {
return errorCode;
}
}
} | 3,589 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-troubleshooter/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-troubleshooter/src/main/java/org/apache/gobblin/troubleshooter/AutomaticTroubleshooterImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.troubleshooter;
import java.util.List;
import java.util.Objects;
import org.apache.commons.text.TextStringBuilder;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import com.google.common.collect.ImmutableList;
import javax.inject.Inject;
import javax.inject.Singleton;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.runtime.troubleshooter.AutomaticTroubleshooter;
import org.apache.gobblin.runtime.troubleshooter.AutomaticTroubleshooterConfig;
import org.apache.gobblin.runtime.troubleshooter.Issue;
import org.apache.gobblin.runtime.troubleshooter.IssueEventBuilder;
import org.apache.gobblin.runtime.troubleshooter.IssueRefinery;
import org.apache.gobblin.runtime.troubleshooter.IssueRepository;
import org.apache.gobblin.runtime.troubleshooter.TroubleshooterException;
/**
* @see org.apache.gobblin.runtime.troubleshooter.AutomaticTroubleshooter
* */
@Slf4j
@Singleton
public class AutomaticTroubleshooterImpl implements AutomaticTroubleshooter {
private final AutomaticTroubleshooterConfig config;
private final IssueRefinery issueRefinery;
@Getter
private final IssueRepository issueRepository;
private AutoTroubleshooterLogAppender troubleshooterLogger;
@Inject
public AutomaticTroubleshooterImpl(AutomaticTroubleshooterConfig config, IssueRepository issueRepository,
IssueRefinery issueRefinery) {
this.config = Objects.requireNonNull(config);
this.issueRepository = Objects.requireNonNull(issueRepository);
this.issueRefinery = Objects.requireNonNull(issueRefinery);
if (config.isDisabled()) {
throw new RuntimeException("Cannot create a real troubleshooter because it is disabled in configuration. "
+ "Use AutomaticTroubleshooterFactory that will create "
+ "a NoopAutomaticTroubleshooter for this case.");
}
}
@Override
public void start() {
setupLogAppender();
}
@Override
public void stop() {
removeLogAppender();
}
private void setupLogAppender() {
org.apache.log4j.Logger rootLogger = LogManager.getRootLogger();
troubleshooterLogger = new AutoTroubleshooterLogAppender(issueRepository);
troubleshooterLogger.setThreshold(Level.WARN);
troubleshooterLogger.activateOptions();
rootLogger.addAppender(troubleshooterLogger);
log.info("Configured logger for automatic troubleshooting");
}
private void removeLogAppender() {
org.apache.log4j.Logger rootLogger = LogManager.getRootLogger();
rootLogger.removeAppender(troubleshooterLogger);
log.info("Removed logger for automatic troubleshooting. Processed {} events.",
troubleshooterLogger.getProcessedEventCount());
}
@Override
public void reportJobIssuesAsEvents(EventSubmitter eventSubmitter)
throws TroubleshooterException {
if (config.isDisableEventReporting()) {
log.info(
"Troubleshooter will not report issues as GobblinTrackingEvents. Remove the following property to re-enable it: "
+ ConfigurationKeys.TROUBLESHOOTER_DISABLE_EVENT_REPORTING);
return;
}
List<Issue> issues = issueRepository.getAll();
log.info("Reporting troubleshooter issues as Gobblin tracking events. Issue count: " + issues.size());
for (Issue issue : issues) {
IssueEventBuilder eventBuilder = new IssueEventBuilder(IssueEventBuilder.JOB_ISSUE);
eventBuilder.setIssue(issue);
eventSubmitter.submit(eventBuilder);
}
}
@Override
public void refineIssues()
throws TroubleshooterException {
List<Issue> issues = issueRepository.getAll();
List<Issue> refinedIssues = issueRefinery.refine(ImmutableList.copyOf(issues));
issueRepository.replaceAll(refinedIssues);
}
@Override
public void logIssueSummary()
throws TroubleshooterException {
log.info(getIssueSummaryMessage());
}
@Override
public void logIssueDetails()
throws TroubleshooterException {
log.info(getIssueDetailsMessage());
}
@Override
public String getIssueSummaryMessage()
throws TroubleshooterException {
List<Issue> issues = issueRepository.getAll();
TextStringBuilder sb = new TextStringBuilder();
sb.appendln("");
sb.appendln("vvvvv============= Issues (summary) =============vvvvv");
for (int i = 0; i < issues.size(); i++) {
Issue issue = issues.get(i);
sb.appendln("%s) %s %s %s | source: %s", i + 1, issue.getSeverity().toString(), issue.getCode(),
issue.getSummary(), issue.getSourceClass());
}
sb.append("^^^^^=============================================^^^^^");
return sb.toString();
}
@Override
public String getIssueDetailsMessage()
throws TroubleshooterException {
List<Issue> issues = issueRepository.getAll();
TextStringBuilder sb = new TextStringBuilder();
sb.appendln("");
sb.appendln("vvvvv============= Issues (detailed) =============vvvvv");
for (int i = 0; i < issues.size(); i++) {
Issue issue = issues.get(i);
sb.appendln("%s) %s %s %s", i + 1, issue.getSeverity().toString(), issue.getCode(), issue.getSummary());
sb.appendln("\tsource: %s", issue.getSourceClass());
if (issue.getDetails() != null) {
sb.appendln("\t" + issue.getDetails().replaceAll(System.lineSeparator(), System.lineSeparator() + "\t"));
}
if (issue.getProperties() != null) {
issue.getProperties().forEach((key, value) -> {
sb.appendln("\t%s: %s", key, value);
});
}
}
sb.append("^^^^^================================================^^^^^");
return sb.toString();
}
} | 3,590 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-troubleshooter/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-troubleshooter/src/main/java/org/apache/gobblin/troubleshooter/AutoTroubleshooterLogAppender.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.troubleshooter;
import java.time.Instant;
import java.time.ZoneOffset;
import java.time.ZonedDateTime;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.lang.text.StrBuilder;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.Level;
import org.apache.log4j.spi.LocationInfo;
import org.apache.log4j.spi.LoggingEvent;
import javax.annotation.concurrent.ThreadSafe;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.runtime.ThrowableWithErrorCode;
import org.apache.gobblin.runtime.troubleshooter.Issue;
import org.apache.gobblin.runtime.troubleshooter.IssueRepository;
import org.apache.gobblin.runtime.troubleshooter.IssueSeverity;
import org.apache.gobblin.runtime.troubleshooter.TroubleshooterException;
/**
* Collects messages from log4j and converts them into issues that are used in {@link AutomaticTroubleshooter}.
*/
@Slf4j
@ThreadSafe
public class AutoTroubleshooterLogAppender extends AppenderSkeleton {
private static final int AUTO_GENERATED_HASH_LENGTH = 6;
private static final String AUTO_GENERATED_HASH_PREFIX = "T";
private final IssueRepository repository;
private final AtomicBoolean reportedRepositoryError = new AtomicBoolean(false);
private final AtomicInteger processedEventCount = new AtomicInteger();
public AutoTroubleshooterLogAppender(IssueRepository issueRepository) {
this.repository = Objects.requireNonNull(issueRepository);
}
private static String getHash(String text) {
return AUTO_GENERATED_HASH_PREFIX + DigestUtils.sha256Hex(text).substring(0, AUTO_GENERATED_HASH_LENGTH)
.toUpperCase();
}
public int getProcessedEventCount() {
return processedEventCount.get();
}
@Override
protected void append(LoggingEvent event) {
processedEventCount.incrementAndGet();
Issue issue = convertToIssue(event);
try {
repository.put(issue);
} catch (TroubleshooterException e) {
if (reportedRepositoryError.compareAndSet(false, true)) {
log.warn("Failed to save the issue to the repository", e);
}
}
}
private Issue convertToIssue(LoggingEvent event) {
Issue.IssueBuilder issueBuilder =
Issue.builder().time(ZonedDateTime.ofInstant(Instant.ofEpochMilli(event.getTimeStamp()), ZoneOffset.UTC))
.severity(convert(event.getLevel())).code(getIssueCode(event)).sourceClass(event.getLoggerName());
if (event.getThrowableInformation() != null) {
Throwable throwable = event.getThrowableInformation().getThrowable();
issueBuilder.details(ExceptionUtils.getStackTrace(throwable));
String summarizedException =
StringUtils.substringBefore(ExceptionUtils.getRootCauseMessage(throwable), System.lineSeparator());
issueBuilder.summary(summarizedException + " | " + event.getRenderedMessage());
} else {
issueBuilder.summary(event.getRenderedMessage());
}
return issueBuilder.build();
}
private String getIssueCode(LoggingEvent event) {
if (event.getThrowableInformation() != null) {
return getIssueCode(event.getThrowableInformation().getThrowable());
}
LocationInfo locationInformation = event.getLocationInformation();
if (locationInformation.fullInfo != null) {
String locationInCode = locationInformation.getClassName() + locationInformation.getLineNumber();
return getHash(locationInCode);
} else {
return getHash(event.getLoggerName() + event.getMessage().toString());
}
}
private String getIssueCode(Throwable throwable) {
if (throwable instanceof ThrowableWithErrorCode) {
return ((ThrowableWithErrorCode) throwable).getErrorCode();
}
/*
* Ideally, each exception should have a unique machine-readable error code. Then we can easily group them together
* and remove duplicates. However, it’s not feasible to add error codes to the large legacy codebase overnight, so
* we generate them automatically.
*
* Good error codes should identify one specific problem, so they don’t always map to exception types.
* For example “FileNotFoundException” can mean that a user's file is not found, or some config file that job
* expects was not found, or credentials file is missing, and so on.
*
* Exception messages can have path names, job ids, and other unpredictable variable parts.
* So, even if the problem is exactly the same, the messages could be different.
*
* We pick an option to generate an error code as a hash of exception type and stack trace. This will produce
* a unique error code of the situation. However, when a codebase is refactored, stacktraces can change.
* As a result, such automatic error codes can be different between application versions.
* This should be fine within a single job, but it can affect system-wide reports that process data from
* multiple application versions.
* */
return getHash(getStackTraceWithoutExceptionMessage(throwable));
}
private String getStackTraceWithoutExceptionMessage(Throwable throwable) {
StrBuilder sb = new StrBuilder();
for (Throwable currentThrowable : ExceptionUtils.getThrowableList(throwable)) {
sb.appendln(currentThrowable.getClass().getName());
for (StackTraceElement stackTraceElement : currentThrowable.getStackTrace()) {
sb.appendln(stackTraceElement);
}
sb.appendln("---");
}
return sb.toString();
}
private IssueSeverity convert(Level level) {
if (level == Level.TRACE || level == Level.DEBUG) {
return IssueSeverity.DEBUG;
} else if (level == Level.INFO) {
return IssueSeverity.INFO;
} else if (level == Level.WARN) {
return IssueSeverity.WARN;
} else if (level == Level.ERROR) {
return IssueSeverity.ERROR;
} else if (level == Level.FATAL) {
return IssueSeverity.FATAL;
}
return IssueSeverity.DEBUG;
}
@Override
public void close() {
}
@Override
public boolean requiresLayout() {
return false;
}
}
| 3,591 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-helix/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-helix/src/test/java/org/apache/gobblin/runtime/ZkDatasetStateStoreTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.apache.curator.test.TestingServer;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Predicates;
import org.apache.gobblin.config.ConfigBuilder;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.metastore.DatasetStateStore;
import org.apache.gobblin.metastore.StateStore;
import org.apache.gobblin.metastore.ZkStateStore;
import org.apache.gobblin.metastore.ZkStateStoreConfigurationKeys;
import org.apache.gobblin.util.ClassAliasResolver;
/**
* Unit tests for {@link ZkDatasetStateStore}.
**/
@Test(groups = { "gobblin.runtime" })
public class ZkDatasetStateStoreTest {
private static final String TEST_JOB_NAME = "TestJobName1";
private static final String TEST_JOB_NAME2 = "TestJobName2";
private static final String TEST_JOB_ID = "TestJobId1";
private static final String TEST_TASK_ID_PREFIX = "TestTask-";
private static final String TEST_DATASET_URN = "TestDataset";
private static final String TEST_DATASET_URN2 = "TestDataset2";
private TestingServer testingServer;
private StateStore<JobState> zkJobStateStore;
private DatasetStateStore<JobState.DatasetState> zkDatasetStateStore;
private long startTime = System.currentTimeMillis();
private Properties props;
@BeforeClass
public void setUp() throws Exception {
ConfigBuilder configBuilder = ConfigBuilder.create();
testingServer = new TestingServer(-1);
zkJobStateStore = new ZkStateStore<>(testingServer.getConnectString(), "/STATE_STORE/TEST", false, JobState.class);
configBuilder.addPrimitive(ZkStateStoreConfigurationKeys.STATE_STORE_ZK_CONNECT_STRING_KEY,
testingServer.getConnectString());
configBuilder.addPrimitive(ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY, "/STATE_STORE/TEST2");
ClassAliasResolver<DatasetStateStore.Factory> resolver =
new ClassAliasResolver<>(DatasetStateStore.Factory.class);
DatasetStateStore.Factory stateStoreFactory =
resolver.resolveClass("zk").newInstance();
zkDatasetStateStore = stateStoreFactory.createStateStore(configBuilder.build());
// clear data that may have been left behind by a prior test run
zkJobStateStore.delete(TEST_JOB_NAME);
zkDatasetStateStore.delete(TEST_JOB_NAME);
zkJobStateStore.delete(TEST_JOB_NAME2);
zkDatasetStateStore.delete(TEST_JOB_NAME2);
}
@Test
public void testPersistJobState() throws IOException {
JobState jobState = new JobState(TEST_JOB_NAME, TEST_JOB_ID);
jobState.setId(TEST_JOB_ID);
jobState.setProp("foo", "bar");
jobState.setState(JobState.RunningState.COMMITTED);
jobState.setStartTime(this.startTime);
jobState.setEndTime(this.startTime + 1000);
jobState.setDuration(1000);
for (int i = 0; i < 3; i++) {
TaskState taskState = new TaskState();
taskState.setJobId(TEST_JOB_ID);
taskState.setTaskId(TEST_TASK_ID_PREFIX + i);
taskState.setId(TEST_TASK_ID_PREFIX + i);
taskState.setWorkingState(WorkUnitState.WorkingState.COMMITTED);
jobState.addTaskState(taskState);
}
zkJobStateStore.put(TEST_JOB_NAME,
ZkDatasetStateStore.CURRENT_DATASET_STATE_FILE_SUFFIX + ZkDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX,
jobState);
// second job name for testing getting store names in a later test case
jobState.setJobName(TEST_JOB_NAME2);
zkJobStateStore.put(TEST_JOB_NAME2,
ZkDatasetStateStore.CURRENT_DATASET_STATE_FILE_SUFFIX + ZkDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX,
jobState);
}
@Test(dependsOnMethods = "testPersistJobState")
public void testGetJobState() throws IOException {
JobState jobState = zkJobStateStore.get(TEST_JOB_NAME,
zkDatasetStateStore.CURRENT_DATASET_STATE_FILE_SUFFIX + zkDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX,
TEST_JOB_ID);
Assert.assertEquals(jobState.getId(), TEST_JOB_ID);
Assert.assertEquals(jobState.getJobName(), TEST_JOB_NAME);
Assert.assertEquals(jobState.getJobId(), TEST_JOB_ID);
Assert.assertEquals(jobState.getState(), JobState.RunningState.COMMITTED);
Assert.assertEquals(jobState.getStartTime(), this.startTime);
Assert.assertEquals(jobState.getEndTime(), this.startTime + 1000);
Assert.assertEquals(jobState.getDuration(), 1000);
Assert.assertEquals(jobState.getCompletedTasks(), 3);
for (int i = 0; i < jobState.getCompletedTasks(); i++) {
TaskState taskState = jobState.getTaskStates().get(i);
Assert.assertEquals(taskState.getJobId(), TEST_JOB_ID);
Assert.assertEquals(taskState.getTaskId(), TEST_TASK_ID_PREFIX + i);
Assert.assertEquals(taskState.getId(), TEST_TASK_ID_PREFIX + i);
Assert.assertEquals(taskState.getWorkingState(), WorkUnitState.WorkingState.COMMITTED);
}
}
@Test(dependsOnMethods = "testGetJobState")
public void testPersistDatasetState() throws IOException {
JobState.DatasetState datasetState = new JobState.DatasetState(TEST_JOB_NAME, TEST_JOB_ID);
datasetState.setDatasetUrn(TEST_DATASET_URN);
datasetState.setState(JobState.RunningState.COMMITTED);
datasetState.setId(TEST_DATASET_URN);
datasetState.setStartTime(this.startTime);
datasetState.setEndTime(this.startTime + 1000);
datasetState.setDuration(1000);
for (int i = 0; i < 3; i++) {
TaskState taskState = new TaskState();
taskState.setJobId(TEST_JOB_ID);
taskState.setTaskId(TEST_TASK_ID_PREFIX + i);
taskState.setId(TEST_TASK_ID_PREFIX + i);
taskState.setWorkingState(WorkUnitState.WorkingState.COMMITTED);
datasetState.addTaskState(taskState);
}
zkDatasetStateStore.persistDatasetState(TEST_DATASET_URN, datasetState);
// persist a second dataset state to test that retrieval of multiple dataset states works
datasetState.setDatasetUrn(TEST_DATASET_URN2);
datasetState.setId(TEST_DATASET_URN2);
datasetState.setDuration(2000);
zkDatasetStateStore.persistDatasetState(TEST_DATASET_URN2, datasetState);
// second job name for testing getting store names in a later test case
datasetState.setJobName(TEST_JOB_NAME2);
zkDatasetStateStore.persistDatasetState(TEST_DATASET_URN2, datasetState);
}
@Test(dependsOnMethods = "testPersistDatasetState")
public void testGetDatasetState() throws IOException {
JobState.DatasetState datasetState =
zkDatasetStateStore.getLatestDatasetState(TEST_JOB_NAME, TEST_DATASET_URN);
Assert.assertEquals(datasetState.getDatasetUrn(), TEST_DATASET_URN);
Assert.assertEquals(datasetState.getJobName(), TEST_JOB_NAME);
Assert.assertEquals(datasetState.getJobId(), TEST_JOB_ID);
Assert.assertEquals(datasetState.getState(), JobState.RunningState.COMMITTED);
Assert.assertEquals(datasetState.getStartTime(), this.startTime);
Assert.assertEquals(datasetState.getEndTime(), this.startTime + 1000);
Assert.assertEquals(datasetState.getDuration(), 1000);
Assert.assertEquals(datasetState.getId(), TEST_DATASET_URN);
Assert.assertEquals(datasetState.getCompletedTasks(), 3);
for (int i = 0; i < datasetState.getCompletedTasks(); i++) {
TaskState taskState = datasetState.getTaskStates().get(i);
Assert.assertEquals(taskState.getJobId(), TEST_JOB_ID);
Assert.assertEquals(taskState.getTaskId(), TEST_TASK_ID_PREFIX + i);
Assert.assertEquals(taskState.getId(), TEST_TASK_ID_PREFIX + i);
Assert.assertEquals(taskState.getWorkingState(), WorkUnitState.WorkingState.COMMITTED);
}
}
@Test(dependsOnMethods = { "testGetDatasetState" })
public void testGetStoreNames() throws IOException {
List<String> storeNames = this.zkJobStateStore.getStoreNames(Predicates.alwaysTrue());
Collections.sort(storeNames);
Assert.assertTrue(storeNames.size() == 2);
Assert.assertEquals(storeNames.get(0), TEST_JOB_NAME);
Assert.assertEquals(storeNames.get(1), TEST_JOB_NAME2);
storeNames = this.zkDatasetStateStore.getStoreNames(Predicates.alwaysTrue());
Collections.sort(storeNames);
Assert.assertTrue(storeNames.size() == 2);
Assert.assertEquals(storeNames.get(0), TEST_JOB_NAME);
Assert.assertEquals(storeNames.get(1), TEST_JOB_NAME2);
}
@Test(dependsOnMethods = "testGetStoreNames")
public void testGetPreviousDatasetStatesByUrns() throws IOException {
Map<String, JobState.DatasetState> datasetStatesByUrns =
zkDatasetStateStore.getLatestDatasetStatesByUrns(TEST_JOB_NAME);
Assert.assertEquals(datasetStatesByUrns.size(), 2);
JobState.DatasetState datasetState = datasetStatesByUrns.get(TEST_DATASET_URN);
Assert.assertEquals(datasetState.getId(), TEST_DATASET_URN);
Assert.assertEquals(datasetState.getDatasetUrn(), TEST_DATASET_URN);
Assert.assertEquals(datasetState.getJobName(), TEST_JOB_NAME);
Assert.assertEquals(datasetState.getJobId(), TEST_JOB_ID);
Assert.assertEquals(datasetState.getState(), JobState.RunningState.COMMITTED);
Assert.assertEquals(datasetState.getStartTime(), this.startTime);
Assert.assertEquals(datasetState.getEndTime(), this.startTime + 1000);
Assert.assertEquals(datasetState.getDuration(), 1000);
datasetState = datasetStatesByUrns.get(TEST_DATASET_URN2);
Assert.assertEquals(datasetState.getId(), TEST_DATASET_URN2);
Assert.assertEquals(datasetState.getDatasetUrn(), TEST_DATASET_URN2);
Assert.assertEquals(datasetState.getJobName(), TEST_JOB_NAME);
Assert.assertEquals(datasetState.getJobId(), TEST_JOB_ID);
Assert.assertEquals(datasetState.getState(), JobState.RunningState.COMMITTED);
Assert.assertEquals(datasetState.getStartTime(), this.startTime);
Assert.assertEquals(datasetState.getEndTime(), this.startTime + 1000);
Assert.assertEquals(datasetState.getDuration(), 2000);
}
@Test(dependsOnMethods = "testGetPreviousDatasetStatesByUrns")
public void testDeleteJobState() throws IOException {
JobState jobState = zkJobStateStore.get(TEST_JOB_NAME,
zkDatasetStateStore.CURRENT_DATASET_STATE_FILE_SUFFIX + zkDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX,
TEST_JOB_ID);
Assert.assertNotNull(jobState);
Assert.assertEquals(jobState.getJobId(), TEST_JOB_ID);
zkJobStateStore.delete(TEST_JOB_NAME);
jobState = zkJobStateStore.get(TEST_JOB_NAME,
zkDatasetStateStore.CURRENT_DATASET_STATE_FILE_SUFFIX + zkDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX,
TEST_JOB_ID);
Assert.assertNull(jobState);
}
@Test(dependsOnMethods = "testGetPreviousDatasetStatesByUrns")
public void testDeleteDatasetJobState() throws IOException {
String tableName = TEST_DATASET_URN + "-" + zkDatasetStateStore.CURRENT_DATASET_STATE_FILE_SUFFIX +
zkDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX;
JobState.DatasetState datasetState = zkDatasetStateStore.get(TEST_JOB_NAME, tableName, TEST_DATASET_URN);
Assert.assertNotNull(datasetState);
Assert.assertEquals(datasetState.getJobId(), TEST_JOB_ID);
zkDatasetStateStore.delete(TEST_JOB_NAME, Collections.singletonList(tableName));
datasetState = zkDatasetStateStore.get(TEST_JOB_NAME,
TEST_DATASET_URN + "-" + zkDatasetStateStore.CURRENT_DATASET_STATE_FILE_SUFFIX +
zkDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX, TEST_DATASET_URN);
Assert.assertNull(datasetState);
}
@AfterClass
public void tearDown() throws IOException {
zkJobStateStore.delete(TEST_JOB_NAME);
zkDatasetStateStore.delete(TEST_JOB_NAME);
zkJobStateStore.delete(TEST_JOB_NAME2);
zkDatasetStateStore.delete(TEST_JOB_NAME2);
if (testingServer != null) {
testingServer.close();
}
}
}
| 3,592 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-helix/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-helix/src/test/java/org/apache/gobblin/runtime/StateStoreWatermarkStorageTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.io.IOException;
import java.util.Map;
import org.apache.curator.test.TestingServer;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableList;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metastore.ZkStateStoreConfigurationKeys;
import org.apache.gobblin.source.extractor.CheckpointableWatermark;
import org.apache.gobblin.source.extractor.DefaultCheckpointableWatermark;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
/**
* Unit tests for {@link StateStoreBasedWatermarkStorage}.
**/
@Test(groups = { "gobblin.runtime" })
public class StateStoreWatermarkStorageTest {
private static final String TEST_JOB_ID = "TestJob1";
private TestingServer testingServer;
private long startTime = System.currentTimeMillis();
@BeforeClass
public void setUp() throws Exception {
testingServer = new TestingServer(-1);
}
@Test( groups = {"disabledOnCI"} )
public void testPersistWatermarkStateToZk() throws IOException {
CheckpointableWatermark watermark = new DefaultCheckpointableWatermark("source", new LongWatermark(startTime));
TaskState taskState = new TaskState();
taskState.setJobId(TEST_JOB_ID);
taskState.setProp(ConfigurationKeys.JOB_NAME_KEY, "JobName-" + startTime);
// watermark storage configuration
taskState.setProp(StateStoreBasedWatermarkStorage.WATERMARK_STORAGE_TYPE_KEY, "zk");
taskState.setProp(StateStoreBasedWatermarkStorage.WATERMARK_STORAGE_CONFIG_PREFIX +
ZkStateStoreConfigurationKeys.STATE_STORE_ZK_CONNECT_STRING_KEY, testingServer.getConnectString());
StateStoreBasedWatermarkStorage watermarkStorage = new StateStoreBasedWatermarkStorage(taskState);
watermarkStorage.commitWatermarks(ImmutableList.of(watermark));
Map<String, CheckpointableWatermark> watermarkMap = watermarkStorage.getCommittedWatermarks(DefaultCheckpointableWatermark.class,
ImmutableList.of("source"));
Assert.assertEquals(watermarkMap.size(), 1);
Assert.assertEquals(((LongWatermark) watermarkMap.get("source").getWatermark()).getValue(), startTime);
}
@AfterClass
public void tearDown() throws IOException {
if (testingServer != null) {
testingServer.close();
}
}
} | 3,593 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-helix/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-helix/src/main/java/org/apache/gobblin/runtime/ZkDatasetStateStoreFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metastore.DatasetStateStore;
import org.apache.gobblin.metastore.ZkStateStoreConfigurationKeys;
import org.apache.gobblin.util.ConfigUtils;
@Alias("zk")
public class ZkDatasetStateStoreFactory implements DatasetStateStore.Factory {
@Override
public DatasetStateStore<JobState.DatasetState> createStateStore(Config config) {
String connectString = config.getString(ZkStateStoreConfigurationKeys.STATE_STORE_ZK_CONNECT_STRING_KEY);
String rootDir = config.getString(ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY);
boolean compressedValues = ConfigUtils.getBoolean(config, ConfigurationKeys.STATE_STORE_COMPRESSED_VALUES_KEY,
ConfigurationKeys.DEFAULT_STATE_STORE_COMPRESSED_VALUES);
try {
return new ZkDatasetStateStore(connectString, rootDir, compressedValues);
} catch (Exception e) {
throw new RuntimeException("Failed to create ZkDatasetStateStore with factory", e);
}
}
}
| 3,594 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-helix/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-helix/src/main/java/org/apache/gobblin/runtime/ZkDatasetStateStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.CharMatcher;
import com.google.common.base.Predicate;
import com.google.common.base.Strings;
import com.google.common.collect.Maps;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metastore.DatasetStateStore;
import org.apache.gobblin.metastore.ZkStateStore;
/**
* A custom extension to {@link ZkStateStore} for storing and reading {@link JobState.DatasetState}s.
*
* <p>
* The purpose of having this class is to hide some implementation details that are unnecessarily
* exposed if using the {@link ZkStateStore} to store and serve dataset states between job runs.
* </p>
*
*/
public class ZkDatasetStateStore extends ZkStateStore<JobState.DatasetState>
implements DatasetStateStore<JobState.DatasetState> {
private static final Logger LOGGER = LoggerFactory.getLogger(ZkDatasetStateStore.class);
private static final String CURRENT_SUFFIX = CURRENT_DATASET_STATE_FILE_SUFFIX + DATASET_STATE_STORE_TABLE_SUFFIX;
public ZkDatasetStateStore(String connectString, String storeRootDir, boolean compressedValues) throws IOException {
super(connectString, storeRootDir, compressedValues, JobState.DatasetState.class);
}
/**
* Get a {@link Map} from dataset URNs to the latest {@link JobState.DatasetState}s.
*
* @param jobName the job name
* @return a {@link Map} from dataset URNs to the latest {@link JobState.DatasetState}s
* @throws IOException if there's something wrong reading the {@link JobState.DatasetState}s
*/
public Map<String, JobState.DatasetState> getLatestDatasetStatesByUrns(String jobName) throws IOException {
List<JobState.DatasetState> previousDatasetStates = getAll(jobName, new Predicate<String>() {
@Override
public boolean apply(String input) {
return input.endsWith(CURRENT_SUFFIX);
}});
Map<String, JobState.DatasetState> datasetStatesByUrns = Maps.newHashMap();
for (JobState.DatasetState previousDatasetState : previousDatasetStates) {
datasetStatesByUrns.put(previousDatasetState.getDatasetUrn(), previousDatasetState);
}
// The dataset (job) state from the deprecated "current.jst" will be read even though
// the job has transitioned to the new dataset-based mechanism
if (datasetStatesByUrns.size() > 1) {
datasetStatesByUrns.remove(ConfigurationKeys.DEFAULT_DATASET_URN);
}
return datasetStatesByUrns;
}
/**
* Get the latest {@link JobState.DatasetState} of a given dataset.
*
* @param storeName the name of the dataset state store
* @param datasetUrn the dataset URN
* @return the latest {@link JobState.DatasetState} of the dataset or {@link null} if it is not found
* @throws IOException
*/
public JobState.DatasetState getLatestDatasetState(String storeName, String datasetUrn) throws IOException {
String alias =
Strings.isNullOrEmpty(datasetUrn) ? CURRENT_DATASET_STATE_FILE_SUFFIX + DATASET_STATE_STORE_TABLE_SUFFIX
: CharMatcher.is(':').replaceFrom(datasetUrn, '.') + "-" + CURRENT_DATASET_STATE_FILE_SUFFIX + DATASET_STATE_STORE_TABLE_SUFFIX;
return get(storeName, alias, datasetUrn);
}
/**
* Persist a given {@link JobState.DatasetState}.
*
* @param datasetUrn the dataset URN
* @param datasetState the {@link JobState.DatasetState} to persist
* @throws IOException if there's something wrong persisting the {@link JobState.DatasetState}
*/
public void persistDatasetState(String datasetUrn, JobState.DatasetState datasetState) throws IOException {
String jobName = datasetState.getJobName();
String jobId = datasetState.getJobId();
datasetUrn = CharMatcher.is(':').replaceFrom(datasetUrn, '.');
String tableName = Strings.isNullOrEmpty(datasetUrn) ? jobId + DATASET_STATE_STORE_TABLE_SUFFIX
: datasetUrn + "-" + jobId + DATASET_STATE_STORE_TABLE_SUFFIX;
LOGGER.info("Persisting " + tableName + " to the job state store");
put(jobName, tableName, datasetState);
createAlias(jobName, tableName, getAliasName(datasetUrn));
}
@Override
public void persistDatasetURNs(String storeName, Collection<String> datasetUrns)
throws IOException {
// do nothing for now
}
private static String getAliasName(String datasetUrn) {
return Strings.isNullOrEmpty(datasetUrn) ? CURRENT_DATASET_STATE_FILE_SUFFIX + DATASET_STATE_STORE_TABLE_SUFFIX
: datasetUrn + "-" + CURRENT_DATASET_STATE_FILE_SUFFIX + DATASET_STATE_STORE_TABLE_SUFFIX;
}
} | 3,595 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-helix/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-helix/src/main/java/org/apache/gobblin/metastore/ZkStateStoreConfigurationKeys.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metastore;
public class ZkStateStoreConfigurationKeys {
public static final String STATE_STORE_ZK_CONNECT_STRING_KEY = "state.store.zk.connectString";
public static final String STATE_STORE_ZK_CONNECT_STRING_DEFAULT = "localhost:2181";
}
| 3,596 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-helix/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-helix/src/main/java/org/apache/gobblin/metastore/ZkStateStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metastore;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
import org.apache.commons.lang.ArrayUtils;
import org.apache.hadoop.io.Text;
import org.apache.helix.AccessOption;
import org.apache.helix.manager.zk.ByteArraySerializer;
import org.apache.helix.store.HelixPropertyStore;
import org.apache.helix.store.zk.ZkHelixPropertyStore;
import com.google.common.base.Predicate;
import com.google.common.base.Predicates;
import com.google.common.base.Strings;
import com.google.common.collect.Lists;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.io.StreamUtils;
import org.apache.helix.zookeeper.zkclient.serialize.ZkSerializer;
/**
* An implementation of {@link StateStore} backed by ZooKeeper.
*
* <p>
*
* This implementation stores serialized {@link State}s as a blob in ZooKeeper in the Sequence file format.
* The ZK path is in the format /STORE_ROOT_DIR/STORE_NAME/TABLE_NAME.
* State keys are state IDs (see {@link State#getId()}), and values are objects of {@link State} or
* any of its extensions. Keys will be empty strings if state IDs are not set
* (i.e., {@link State#getId()} returns <em>null</em>). In this case, the
* {@link ZkStateStore#get(String, String, String)} method may not work.
* </p>
*
* @param <T> state object type
**/
public class ZkStateStore<T extends State> implements StateStore<T> {
/**
* Corresponds to {@link AccessOption}, which defines behavior for accessing znodes (get, remove, exists). The value 0 means not to
* throws exceptions if the znode does not exist (i.e. do not enable {@link AccessOption#THROW_EXCEPTION_IFNOTEXIST}
*
* Note: This variable is not be used for create calls like {@link HelixPropertyStore#create(String, Object, int)}
* which require specifying if the znode is {@link AccessOption#PERSISTENT}, {@link AccessOption#EPHEMERAL}, etc.
**/
private static final int DEFAULT_OPTION = 0;
// Class of the state objects to be put into the store
private final Class<T> stateClass;
private final HelixPropertyStore<byte[]> propStore;
private final boolean compressedValues;
/**
* State store that stores instances of {@link State}s in a ZooKeeper-backed {@link HelixPropertyStore}
* storeRootDir will be created when the first entry is written if it does not exist
* @param connectString ZooKeeper connect string
* @param storeRootDir The root directory for the state store
* @param compressedValues should values be compressed for storage?
* @param stateClass The type of state being stored
* @throws IOException
*/
public ZkStateStore(String connectString, String storeRootDir, boolean compressedValues, Class<T> stateClass)
throws IOException {
this.compressedValues = compressedValues;
this.stateClass = stateClass;
ZkSerializer serializer = new ByteArraySerializer();
propStore = new ZkHelixPropertyStore<byte[]>(connectString, serializer, storeRootDir);
}
private String formPath(String storeName) {
return "/" + storeName;
}
private String formPath(String storeName, String tableName) {
return "/" + storeName + "/" + tableName;
}
@Override
public boolean create(String storeName) throws IOException {
String path = formPath(storeName);
return propStore.exists(path, DEFAULT_OPTION) || propStore.create(path, ArrayUtils.EMPTY_BYTE_ARRAY,
AccessOption.PERSISTENT);
}
@Override
public boolean create(String storeName, String tableName) throws IOException {
String path = formPath(storeName, tableName);
if (propStore.exists(path, DEFAULT_OPTION)) {
throw new IOException(String.format("State already exists for storeName %s tableName %s", storeName,
tableName));
}
return propStore.create(path, ArrayUtils.EMPTY_BYTE_ARRAY, AccessOption.PERSISTENT);
}
@Override
public boolean exists(String storeName, String tableName) throws IOException {
String path = formPath(storeName, tableName);
return propStore.exists(path, DEFAULT_OPTION);
}
/**
* Serializes the state to the {@link DataOutput}
* @param dataOutput output target receiving the serialized data
* @param state the state to serialize
* @throws IOException
*/
private void addStateToDataOutputStream(DataOutput dataOutput, T state) throws IOException {
new Text(Strings.nullToEmpty(state.getId())).write(dataOutput);
state.write(dataOutput);
}
/**
* Create a new znode with data if it does not exist otherwise update with data
* @param storeName storeName portion of znode path
* @param tableName tableName portion of znode path
* @param data znode data
* @throws IOException
*/
private void putData(String storeName, String tableName, byte[] data) throws IOException {
String path = formPath(storeName, tableName);
if (!propStore.exists(path, DEFAULT_OPTION)) {
// create with data
if (!propStore.create(path, data, AccessOption.PERSISTENT)) {
throw new IOException("Failed to create a state file for table " + tableName);
}
} else {
// Update
propStore.set(path, data, AccessOption.PERSISTENT);
}
}
@Override
public void put(String storeName, String tableName, T state) throws IOException {
putAll(storeName, tableName, Collections.singletonList(state));
}
@Override
public void putAll(String storeName, String tableName, Collection<T> states) throws IOException {
try (ByteArrayOutputStream byteArrayOs = new ByteArrayOutputStream();
OutputStream os = compressedValues ? new GZIPOutputStream(byteArrayOs) : byteArrayOs;
DataOutputStream dataOutput = new DataOutputStream(os)) {
for (T state : states) {
addStateToDataOutputStream(dataOutput, state);
}
dataOutput.close();
putData(storeName, tableName, byteArrayOs.toByteArray());
}
}
@Override
public T get(String storeName, String tableName, String stateId) throws IOException {
String path = formPath(storeName, tableName);
byte[] data = propStore.get(path, null, DEFAULT_OPTION);
List<T> states = Lists.newArrayList();
deserialize(data, states, stateId);
if (states.isEmpty()) {
return null;
} else {
return states.get(DEFAULT_OPTION);
}
}
/**
* Retrieve states from the state store based on the store name and a filtering predicate
* @param storeName The store name enclosing the state files
* @param predicate The predicate for state file filtering
* @return list of states matching matching the predicate
* @throws IOException
*/
protected List<T> getAll(String storeName, Predicate<String> predicate) throws IOException {
List<T> states = Lists.newArrayList();
String path = formPath(storeName);
byte[] data;
List<String> children = propStore.getChildNames(path, DEFAULT_OPTION);
if (children == null) {
return Collections.emptyList();
}
for (String c : children) {
if (predicate.apply(c)) {
data = propStore.get(path + "/" + c, null, DEFAULT_OPTION);
deserialize(data, states);
}
}
return states;
}
@Override
public List<T> getAll(String storeName, String tableName) throws IOException {
List<T> states = Lists.newArrayList();
String path = formPath(storeName, tableName);
byte[] data = propStore.get(path, null, DEFAULT_OPTION);
deserialize(data, states);
return states;
}
@Override
public List<T> getAll(String storeName) throws IOException {
return getAll(storeName, Predicates.<String>alwaysTrue());
}
@Override
public List<String> getTableNames(String storeName, Predicate<String> predicate) throws IOException {
List<String> names = Lists.newArrayList();
String path = formPath(storeName);
List<String> children = propStore.getChildNames(path, DEFAULT_OPTION);
if (children != null) {
for (String c : children) {
if (predicate.apply(c)) {
names.add(c);
}
}
}
return names;
}
/**
* Get store names in the state store
*
* @param predicate only returns names matching predicate
* @return (possibly empty) list of store names from the given store
* @throws IOException
*/
public List<String> getStoreNames(Predicate<String> predicate)
throws IOException {
List<String> names = Lists.newArrayList();
String path = formPath("");
List<String> children = propStore.getChildNames(path, DEFAULT_OPTION);
if (children != null) {
for (String c : children) {
if (predicate.apply(c)) {
names.add(c);
}
}
}
return names;
}
@Override
public void createAlias(String storeName, String original, String alias) throws IOException {
String pathOriginal = formPath(storeName, original);
byte[] data;
if (!propStore.exists(pathOriginal, DEFAULT_OPTION)) {
throw new IOException(String.format("State does not exist for table %s", original));
}
data = propStore.get(pathOriginal, null, DEFAULT_OPTION);
putData(storeName, alias, data);
}
@Override
public void delete(String storeName, String tableName) throws IOException {
propStore.remove(formPath(storeName, tableName), DEFAULT_OPTION);
}
@Override
public void delete(String storeName, List<String> tableNames) throws IOException {
List<String> paths = tableNames.stream().map(table -> formPath(storeName, table)).collect(Collectors.toList());
propStore.remove(paths, DEFAULT_OPTION);
}
@Override
public void delete(String storeName) throws IOException {
propStore.remove(formPath(storeName), DEFAULT_OPTION);
}
/**
* Deserialize data into a list of {@link State}s.
* @param data byte array
* @param states output list of states
* @param stateId optional key filter. Set to null for no filtering.
* @throws IOException
*/
private void deserialize(byte[] data, List<T> states, String stateId) throws IOException {
if (data != null) {
Text key = new Text();
try (ByteArrayInputStream bais = new ByteArrayInputStream(data);
InputStream is = StreamUtils.isCompressed(data) ? new GZIPInputStream(bais) : bais;
DataInputStream dis = new DataInputStream(is)){
// keep deserializing while we have data
while (dis.available() > 0) {
T state = this.stateClass.newInstance();
key.readFields(dis);
state.readFields(dis);
state.setId(key.toString());
states.add(state);
if (stateId != null && key.toString().equals(stateId)) {
return;
}
}
} catch (EOFException e) {
// no more data. GZIPInputStream.available() doesn't return 0 until after EOF.
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new IOException("failure deserializing state from ZkStateStore", e);
}
}
}
/**
* Deserialize data into a list of {@link State}s.
* @param data byte array
* @param states output list of states
* @throws IOException
*/
private void deserialize(byte[] data, List<T> states) throws IOException {
deserialize(data, states, null);
}
}
| 3,597 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-helix/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-helix/src/main/java/org/apache/gobblin/metastore/ZkStateStoreFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metastore;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.ConfigUtils;
@Alias("zk")
public class ZkStateStoreFactory implements StateStore.Factory {
@Override
public <T extends State> StateStore<T> createStateStore(Config config, Class<T> stateClass) {
String connectString = ConfigUtils.getString(config, ZkStateStoreConfigurationKeys.STATE_STORE_ZK_CONNECT_STRING_KEY,
ZkStateStoreConfigurationKeys.STATE_STORE_ZK_CONNECT_STRING_DEFAULT);
String rootDir = config.getString(ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY);
boolean compressedValues = ConfigUtils.getBoolean(config, ConfigurationKeys.STATE_STORE_COMPRESSED_VALUES_KEY,
ConfigurationKeys.DEFAULT_STATE_STORE_COMPRESSED_VALUES);
try {
return new ZkStateStore(connectString, rootDir, compressedValues, stateClass);
} catch (Exception e) {
throw new RuntimeException("Failed to create ZkStateStore with factory", e);
}
}
} | 3,598 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-09/src/test/java/org/apache/gobblin/metrics/reporter/KafkaKeyValueProducerPusherTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.reporter;
import java.io.IOException;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.typesafe.config.ConfigFactory;
import kafka.consumer.ConsumerIterator;
import kafka.message.MessageAndMetadata;
import org.apache.gobblin.kafka.KafkaTestBase;
import org.apache.gobblin.metrics.kafka.KafkaKeyValueProducerPusher;
import org.apache.gobblin.metrics.kafka.Pusher;
/**
* Test {@link KafkaKeyValueProducerPusher}.
*/
public class KafkaKeyValueProducerPusherTest {
public static final String TOPIC = KafkaKeyValueProducerPusherTest.class.getSimpleName();
private KafkaTestBase kafkaTestHelper;
@BeforeClass
public void setup() throws Exception {
kafkaTestHelper = new KafkaTestBase();
kafkaTestHelper.startServers();
kafkaTestHelper.provisionTopic(TOPIC);
}
@Test
public void test() throws IOException {
// Test that the scoped config overrides the generic config
Pusher pusher = new KafkaKeyValueProducerPusher<byte[], byte[]>("127.0.0.1:dummy", TOPIC,
Optional.of(ConfigFactory.parseMap(ImmutableMap.of(
ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:" + this.kafkaTestHelper.getKafkaServerPort()))));
String msg1 = "msg1";
String msg2 = "msg2";
pusher.pushMessages(Lists.newArrayList(Pair.of("key1", msg1.getBytes()), Pair.of("key2", msg2.getBytes())));
try {
Thread.sleep(1000);
} catch(InterruptedException ex) {
Thread.currentThread().interrupt();
}
ConsumerIterator<byte[], byte[]> iterator = this.kafkaTestHelper.getIteratorForTopic(TOPIC);
assert(iterator.hasNext());
MessageAndMetadata<byte[], byte[]> messageAndMetadata = iterator.next();
Assert.assertEquals(new String(messageAndMetadata.key()), "key1");
Assert.assertEquals(new String(messageAndMetadata.message()), msg1);
assert(iterator.hasNext());
messageAndMetadata = iterator.next();
Assert.assertEquals(new String(messageAndMetadata.key()), "key2");
Assert.assertEquals(new String(messageAndMetadata.message()), msg2);
pusher.close();
}
@AfterClass
public void after() {
try {
this.kafkaTestHelper.close();
} catch(Exception e) {
System.err.println("Failed to close Kafka server.");
}
}
}
| 3,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.