index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/multiprocessing/Lock.java | package com.airbnb.reair.multiprocessing;
/**
* Represents a lock that jobs need to get before running. Shared locks are locks where multiple
* jobs can acquire them where as exclusive locks can only be acquired by one job.
*/
public class Lock {
public enum Type {
SHARED, EXCLUSIVE
}
private Type type;
private String name;
public Lock(Type type, String name) {
this.type = type;
this.name = name;
}
public String getName() {
return name;
}
public Type getType() {
return type;
}
@Override
public String toString() {
return String.format("<Lock type: %s name: %s>", type, name);
}
}
| 9,400 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/utils/RetryingTaskRunner.java | package com.airbnb.reair.utils;
import org.apache.log4j.Logger;
/**
* Utility to re-run a method in case it throws transient exception. E.g. when inserting into a DB,
* the DB might temporarily be down or there might be network issues.
*/
public class RetryingTaskRunner {
public static Logger LOG = Logger.getLogger(RetryingTaskRunner.class);
public static final int DEFAULT_NUM_ATTEMPTS = 10;
public static final int DEFAULT_BASE_SLEEP_INTERVAL = 2;
public static final int DEFAULT_MAX_SLEEP_INTERVAL = 10 * 60;
private int numAttempts;
private int baseSleepInterval;
private int maxSleepInterval;
public RetryingTaskRunner(int numAttempts, int baseSleepInterval) {
this.numAttempts = numAttempts;
this.baseSleepInterval = baseSleepInterval;
}
/**
* @param defaultNumAttempts number of total attempts to run task
* @param baseSleepInterval wait {@code baseSleepInterval}*2^(attempt no.) between attempts.
* @param maxSleepInterval wait {@code }
*/
public RetryingTaskRunner(int defaultNumAttempts, int baseSleepInterval, int maxSleepInterval) {
this.numAttempts = defaultNumAttempts;
this.baseSleepInterval = baseSleepInterval;
this.maxSleepInterval = maxSleepInterval;
}
public RetryingTaskRunner() {
this(DEFAULT_NUM_ATTEMPTS, DEFAULT_BASE_SLEEP_INTERVAL, DEFAULT_MAX_SLEEP_INTERVAL);
}
/**
* Run a task, retrying a fixed number of times if there is a failure.
*
* @param task the task to run.
*
* @throws Exception if there's an error with running the task
*/
public void runWithRetries(RetryableTask task) throws Exception {
boolean maxSleepIntervalHit = false;
for (int i = 0; i < numAttempts; i++) {
try {
task.run();
return;
} catch (Exception e) {
if (i == numAttempts - 1) {
// We ran out of attempts - propagate up
throw e;
}
// Otherwise, retry after a little bit
int sleepTime;
if (maxSleepIntervalHit) {
sleepTime = maxSleepInterval;
} else {
sleepTime = baseSleepInterval * (int) Math.pow(2, i);
}
LOG.error("Got an exception! Sleeping for " + sleepTime + " seconds and retrying.", e);
try {
Thread.sleep(sleepTime * 1000);
} catch (InterruptedException ie) {
LOG.error("Unexpected interruption!", ie);
throw ie;
}
}
}
}
/**
* Run a task and retry until it succeeds.
*
* @param task the task to run
*/
public void runUntilSuccessful(RetryableTask task) {
boolean maxSleepIntervalHit = false;
int numAttempts = 0;
while (true) {
try {
task.run();
return;
} catch (Exception e) {
// Otherwise, retry after a little bit
int sleepTime;
if (maxSleepIntervalHit) {
sleepTime = maxSleepInterval;
} else {
sleepTime = baseSleepInterval * (int) Math.pow(2, numAttempts);
if (sleepTime > maxSleepInterval) {
sleepTime = maxSleepInterval;
maxSleepIntervalHit = true;
}
}
LOG.error("Got an exception! Sleeping for " + sleepTime + " seconds and retrying.", e);
try {
Thread.sleep(sleepTime * 1000);
} catch (InterruptedException ie) {
LOG.error("Unexpected interruption!", ie);
}
}
numAttempts++;
}
}
}
| 9,401 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/utils/ReplicationTestUtils.java | package com.airbnb.reair.utils;
import com.google.common.collect.Lists;
import com.airbnb.reair.common.HiveMetastoreClient;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.common.HiveParameterKeys;
import com.airbnb.reair.common.PathBuilder;
import com.airbnb.reair.db.DbConnectionFactory;
import com.airbnb.reair.db.EmbeddedMySqlDb;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.util.StringUtils;
import java.io.IOException;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Utilities for running replication tests.
*/
public class ReplicationTestUtils {
private static final Log LOG = LogFactory.getLog(
ReplicationTestUtils.class);
private static Path getPathForHiveObject(Path warehouseRoot,
HiveObjectSpec spec) {
PathBuilder pb = new PathBuilder(warehouseRoot);
pb.add(spec.getDbName());
pb.add(spec.getTableName());
if (spec.isPartition()) {
pb.add(spec.getPartitionName());
}
return pb.toPath();
}
private static void createSomeTextFiles(Configuration conf, Path directory)
throws IOException {
createTextFile(conf, directory, "file1.txt", "foobar");
createTextFile(conf, directory, "file2.txt", "123");
}
/**
* Creates the specified text file using Hadoop API's.
*
* @param conf configuration object
* @param directory directory where to create some files
* @param filename TODO
* @param contents TODO
* @throws IOException TODO
*/
public static void createTextFile(Configuration conf,
Path directory,
String filename,
String contents)
throws IOException {
Path filePath = new Path(directory, filename);
FileSystem fs = FileSystem.get(filePath.toUri(), conf);
FSDataOutputStream file1OutputStream = fs.create(filePath);
file1OutputStream.writeBytes(contents);
file1OutputStream.close();
}
/**
* Creates an unpartitioned table with some dummy files.
*
* @param conf TODO
* @param ms TODO
* @param tableSpec TODO
* @param tableType TODO
* @param warehouseRoot TODO
* @return TODO
* @throws IOException TODO
* @throws HiveMetastoreException TODO
*/
public static Table createUnpartitionedTable(Configuration conf,
HiveMetastoreClient ms,
HiveObjectSpec tableSpec,
TableType tableType,
Path warehouseRoot)
throws IOException, HiveMetastoreException {
// Set up the basic properties of the table
Table table = new Table();
table.setDbName(tableSpec.getDbName());
table.setTableName(tableSpec.getTableName());
Map<String, String> parameters = new HashMap<>();
parameters.put(HiveParameterKeys.TLDT, Long.toString(
System.currentTimeMillis()));
table.setParameters(parameters);
table.setPartitionKeys(new ArrayList<>());
table.setTableType(tableType.toString());
// Setup the columns and the storage descriptor
StorageDescriptor sd = new StorageDescriptor();
// Set the schema for the table
List<FieldSchema> columns = new ArrayList<>();
columns.add(new FieldSchema("key", "string",
"my comment"));
sd.setCols(columns);
if (tableType == TableType.MANAGED_TABLE
|| tableType == TableType.EXTERNAL_TABLE) {
Path tableLocation = getPathForHiveObject(warehouseRoot, tableSpec);
sd.setLocation(tableLocation.toString());
// Make some fake files
createSomeTextFiles(conf, tableLocation);
} else if (tableType == TableType.VIRTUAL_VIEW) {
table.setTableType(TableType.VIRTUAL_VIEW.toString());
}
table.setSd(sd);
// Create DB for table if one does not exist
if (!ms.existsDb(table.getDbName())) {
ms.createDatabase(new Database(table.getDbName(), null, null, null));
}
ms.createTable(table);
return table;
}
/**
* Creates a table that is partitioned on ds and hr.
*
* @param conf TODO
* @param ms TODO
* @param tableSpec TODO
* @param tableType TODO
* @param warehouseRoot TODO
* @return TODO
* @throws IOException TODO
* @throws HiveMetastoreException TODO
*/
public static Table createPartitionedTable(Configuration conf,
HiveMetastoreClient ms,
HiveObjectSpec tableSpec,
TableType tableType,
Path warehouseRoot)
throws IOException, HiveMetastoreException {
Path tableLocation = getPathForHiveObject(warehouseRoot, tableSpec);
// Set up the basic properties of the table
Table table = new Table();
table.setDbName(tableSpec.getDbName());
table.setTableName(tableSpec.getTableName());
Map<String, String> parameters = new HashMap<>();
parameters.put(HiveParameterKeys.TLDT, Long.toString(
System.currentTimeMillis()));
table.setParameters(parameters);
table.setTableType(tableType.toString());
// Set up the partitioning scheme
List<FieldSchema> partitionCols = new ArrayList<>();
partitionCols.add(new FieldSchema("ds", "string", "my ds comment"));
partitionCols.add(new FieldSchema("hr", "string", "my hr comment"));
table.setPartitionKeys(partitionCols);
// Setup the columns and the storage descriptor
StorageDescriptor sd = new StorageDescriptor();
// Set the schema for the table
List<FieldSchema> columns = new ArrayList<>();
columns.add(new FieldSchema("key", "string",
"my comment"));
sd.setCols(columns);
if (tableType == TableType.MANAGED_TABLE
|| tableType == TableType.EXTERNAL_TABLE) {
sd.setLocation(tableLocation.toString());
}
sd.setSerdeInfo(new SerDeInfo("LazySimpleSerde",
"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",
new HashMap<>()));
table.setSd(sd);
// Create DB for table if one does not exist
if (!ms.existsDb(table.getDbName())) {
ms.createDatabase(new Database(table.getDbName(), null, null, null));
}
ms.createTable(table);
return table;
}
/**
* Creates a partition in a table with some dummy files.
*
* @param conf TODO
* @param ms TODO
* @param partitionSpec TODO
* @return TODO
* @throws IOException TODO
* @throws HiveMetastoreException TODO
*/
public static Partition createPartition(Configuration conf,
HiveMetastoreClient ms,
HiveObjectSpec partitionSpec)
throws IOException, HiveMetastoreException {
HiveObjectSpec tableSpec = partitionSpec.getTableSpec();
if (! ms.existsTable(tableSpec.getDbName(),
tableSpec.getTableName())) {
throw new HiveMetastoreException("Missing table " + tableSpec);
}
Table table = ms.getTable(tableSpec.getDbName(), tableSpec.getTableName());
Partition partition = new Partition();
partition.setDbName(partitionSpec.getDbName());
partition.setTableName(partitionSpec.getTableName());
Map<String, String> partitionKeyValues = ms.partitionNameToMap(
partitionSpec.getPartitionName());
partition.setValues(Lists.newArrayList(partitionKeyValues.values()));
StorageDescriptor psd = new StorageDescriptor(table.getSd());
TableType tableType = TableType.valueOf(table.getTableType());
if (tableType.equals(TableType.MANAGED_TABLE)
|| tableType.equals(TableType.EXTERNAL_TABLE)) {
// Make the location for the partition to be in a subdirectory of the
// table location. String concatenation here is not great.
String partitionLocation = table.getSd().getLocation() + "/"
+ partitionSpec.getPartitionName();
psd.setLocation(partitionLocation);
createSomeTextFiles(conf, new Path(partitionLocation));
}
// Set the serde info as it can cause an NPE otherwise when creating
// ql Partition objects.
psd.setSerdeInfo(new SerDeInfo("LazySimpleSerde",
"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",
new HashMap<>()));
partition.setSd(psd);
Map<String, String> parameters = new HashMap<>();
parameters.put(HiveParameterKeys.TLDT, Long.toString(
System.currentTimeMillis()));
partition.setParameters(parameters);
ms.addPartition(partition);
return partition;
}
/**
* TODO.
*
* @param ms TODO
* @param objectSpec TODO
*
* @throws HiveMetastoreException TODO
*/
public static void updateModifiedTime(HiveMetastoreClient ms,
HiveObjectSpec objectSpec)
throws HiveMetastoreException {
if (objectSpec.isPartition()) {
Partition partition = ms.getPartition(objectSpec.getDbName(),
objectSpec.getTableName(), objectSpec.getPartitionName());
partition.getParameters().put(HiveParameterKeys.TLDT,
Long.toString(System.currentTimeMillis()));
} else {
Table table = ms.getTable(objectSpec.getDbName(),
objectSpec.getTableName());
table.getParameters().put(HiveParameterKeys.TLDT,
Long.toString(System.currentTimeMillis()));
}
}
/**
* TODO.
*
* @param ms TODO
* @param objectSpec TODO
* @return TODO
*
* @throws HiveMetastoreException TODO
*/
public static String getModifiedTime(HiveMetastoreClient ms,
HiveObjectSpec objectSpec)
throws HiveMetastoreException {
if (objectSpec.isPartition()) {
Partition partition = ms.getPartition(objectSpec.getDbName(),
objectSpec.getTableName(), objectSpec.getPartitionName());
return partition.getParameters().get(HiveParameterKeys.TLDT);
} else {
Table table = ms.getTable(objectSpec.getDbName(),
objectSpec.getTableName());
return table.getParameters().get(HiveParameterKeys.TLDT);
}
}
/**
* TODO.
*
* @param jdbcUrl TODO
* @param username TODO
* @param password TODO
* @param tableName TODO
* @param columnNames TODO
* @return TODO
*
* @throws ClassNotFoundException TODO
* @throws SQLException TODO
*/
public static List<String> getRow(String jdbcUrl, String username,
String password,
String tableName,
List<String> columnNames)
throws ClassNotFoundException, SQLException {
return getRow(jdbcUrl, username, password, tableName, columnNames, null);
}
/**
* TODO.
*
* @param jdbcUrl TODO
* @param username TODO
* @param password TODO
* @param tableName TODO
* @param columnNames TODO
* @param whereClause TODO
* @return TODO
*
* @throws ClassNotFoundException TODO
* @throws SQLException TODO
*/
public static List<String> getRow(String jdbcUrl,
String username,
String password,
String tableName,
List<String> columnNames,
String whereClause)
throws ClassNotFoundException, SQLException {
StringBuilder qb = new StringBuilder();
List<String> columnExpressions = new ArrayList<>();
for (String columnName : columnNames) {
columnExpressions.add(String.format("CAST(%s AS CHAR)", columnName));
}
qb.append("SELECT ");
qb.append(StringUtils.join(", ", columnExpressions));
qb.append(" FROM ");
qb.append(tableName);
if (whereClause != null) {
qb.append(" WHERE ");
qb.append(whereClause);
}
LOG.debug("Running query " + qb.toString());
Class.forName("com.mysql.jdbc.Driver");
Connection connection = DriverManager.getConnection(jdbcUrl, username,
password);
Statement statement = connection.createStatement();
ResultSet rs = statement.executeQuery(qb.toString());
List<String> row = null;
if (rs.next()) {
row = new ArrayList<>();
for (int i = 1; i <= columnNames.size(); i++) {
row.add(rs.getString(i));
}
}
connection.close();
return row;
}
/**
* TODO.
*
* @param dbConnectionFactory TODO
* @param dbName TODO
* @param tableName TODO
* @param columnNames TODO
* @param whereClause TODO
* @return TODO
*
* @throws ClassNotFoundException TODO
* @throws SQLException TODO
*/
public static List<String> getRow(DbConnectionFactory dbConnectionFactory,
String dbName,
String tableName,
List<String> columnNames,
String whereClause)
throws ClassNotFoundException, SQLException {
StringBuilder qb = new StringBuilder();
List<String> columnExpressions = new ArrayList<>();
for (String columnName : columnNames) {
columnExpressions.add(String.format("CAST(%s AS CHAR)", columnName));
}
qb.append("SELECT ");
qb.append(StringUtils.join(", ", columnExpressions));
qb.append(" FROM ");
qb.append(tableName);
if (whereClause != null) {
qb.append(" WHERE ");
qb.append(whereClause);
}
LOG.debug("Running query " + qb.toString());
Class.forName("com.mysql.jdbc.Driver");
Connection connection = dbConnectionFactory.getConnection();
connection.setCatalog(dbName);
Statement statement = connection.createStatement();
ResultSet rs = statement.executeQuery(qb.toString());
List<String> row = null;
if (rs.next()) {
row = new ArrayList<>();
for (int i = 1; i <= columnNames.size(); i++) {
row.add(rs.getString(i));
}
}
connection.close();
return row;
}
public static String getJdbcUrl(EmbeddedMySqlDb db) {
return String.format("jdbc:mysql://%s:%s/",
db.getHost(), db.getPort());
}
public static String getJdbcUrl(EmbeddedMySqlDb db, String dbName) {
return String.format("jdbc:mysql://%s:%s/%s",
db.getHost(), db.getPort(), dbName);
}
/**
* TODO.
*
* @param connectionFactory TODO
* @param dbName TODO
* @throws SQLException TODO
*/
public static void dropDatabase(DbConnectionFactory connectionFactory,
String dbName) throws SQLException {
Connection connection = connectionFactory.getConnection();
String sql = String.format("DROP DATABASE IF EXISTS %s", dbName);
Statement statement = connection.createStatement();
try {
statement.execute(sql);
} finally {
statement.close();
connection.close();
}
}
/**
* Drops all tables from the given Hive DB.
*
* @param ms TODO
* @param dbName TODO
* @throws HiveMetastoreException TODO
*/
public static void dropTables(HiveMetastoreClient ms, String dbName)
throws HiveMetastoreException {
for (String tableName : ms.getTables(dbName, "*")) {
ms.dropTable(dbName, tableName, true);
}
}
/**
* Drops a table from the given Hive DB.
*
* @param ms TODO
* @param spec TODO
* @throws HiveMetastoreException TODO
*/
public static void dropTable(HiveMetastoreClient ms, HiveObjectSpec spec)
throws HiveMetastoreException {
ms.dropTable(spec.getDbName(), spec.getTableName(), true);
}
/**
* Drops a partition from the given partitioned table.
*
* @param ms TODO
* @param spec TODO
* @throws HiveMetastoreException TODO
*/
public static void dropPartition(HiveMetastoreClient ms, HiveObjectSpec spec)
throws HiveMetastoreException {
if (spec.isPartition()) {
ms.dropPartition(spec.getDbName(), spec.getTableName(), spec.getPartitionName(), true);
return;
} else {
throw new HiveMetastoreException("unpartitioned table provided" + spec.toString());
}
}
}
| 9,402 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/utils/RetryableTask.java | package com.airbnb.reair.utils;
/**
* A task that can be retried.
*/
public interface RetryableTask {
void run() throws Exception;
}
| 9,403 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/common/HiveMetastoreException.java | package com.airbnb.reair.common;
/**
* Exception thrown when there is an issue with the Hive metastore.
*/
public class HiveMetastoreException extends Exception {
public HiveMetastoreException(String message) {
super(message);
}
public HiveMetastoreException(Throwable throwable) {
super(throwable);
}
}
| 9,404 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/common/ProcessRunner.java | package com.airbnb.reair.common;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.io.IOException;
import java.lang.reflect.Field;
import java.util.Arrays;
import java.util.List;
/**
* Runs a process while streaming stdout and stderr to log4j.
*/
public class ProcessRunner {
private static final Log LOG = LogFactory.getLog(ProcessRunner.class);
private List<String> args;
public ProcessRunner(List<String> args) {
this.args = args;
}
/**
* TODO.
*
* @return TODO
*
* @throws ProcessRunException TODO
*/
public RunResult run() throws ProcessRunException {
try {
LOG.debug("Running: " + Arrays.asList(args));
Process process = new ProcessBuilder(args).start();
printPid(process);
String currentThreadName = Thread.currentThread().getName();
StreamLogger stdoutLogger =
new StreamLogger(currentThreadName + "-child-stdout", process.getInputStream(), true);
StreamLogger stderrLogger =
new StreamLogger(currentThreadName + "-child-stderr", process.getErrorStream(), false);
stdoutLogger.start();
stderrLogger.start();
stdoutLogger.join();
stderrLogger.join();
int returnCode = process.waitFor();
return new RunResult(returnCode, stdoutLogger.getStreamAsString());
} catch (IOException e) {
throw new ProcessRunException(e);
} catch (InterruptedException e) {
throw new ProcessRunException("Shouldn't be interrupted!", e);
}
}
private static void printPid(Process process) {
// There's no legit way to get the PID
if (process.getClass().getName().equals("java.lang.UNIXProcess")) {
try {
Field field = process.getClass().getDeclaredField("pid");
field.setAccessible(true);
long pid = field.getInt(process);
LOG.debug("PID is " + pid);
} catch (Throwable e) {
LOG.error("Unable to get PID!");
}
}
}
}
| 9,405 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/common/DistCpException.java | package com.airbnb.reair.common;
/**
* An exception thrown when there's an error running DistCp.
*/
public class DistCpException extends Exception {
public DistCpException(String message) {
super(message);
}
public DistCpException(String message, Throwable cause) {
super(message, cause);
}
public DistCpException(Throwable cause) {
super(cause);
}
}
| 9,406 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/common/ArgumentException.java | package com.airbnb.reair.common;
/**
* Exception thrown when there is an error with the supplied arguments.
*/
public class ArgumentException extends Exception {
public ArgumentException(String message) {
super(message);
}
public ArgumentException(String message, Throwable cause) {
super(message, cause);
}
public ArgumentException(Throwable cause) {
super(cause);
}
}
| 9,407 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/common/ThriftHiveMetastoreClient.java | package com.airbnb.reair.common;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
import org.apache.thrift.TException;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.transport.TSocket;
import org.apache.thrift.transport.TTransport;
import org.apache.thrift.transport.TTransportException;
import java.util.List;
import java.util.Map;
/**
* Concrete implementation of a HiveMetastoreClient using Thrift RPC's.
*/
public class ThriftHiveMetastoreClient implements HiveMetastoreClient {
private static final Log LOG = LogFactory.getLog(ThriftHiveMetastoreClient.class);
private static int DEFAULT_SOCKET_TIMEOUT = 600;
private String host;
private int port;
private int clientSocketTimeout;
private TTransport transport;
private ThriftHiveMetastore.Client client;
/**
* TODO.
*
* @param host TODO
* @param port TODO
*
* @throws HiveMetastoreException TODO
*/
public ThriftHiveMetastoreClient(String host, int port) throws HiveMetastoreException {
this.host = host;
this.port = port;
this.clientSocketTimeout = DEFAULT_SOCKET_TIMEOUT;
connect();
}
/**
* TODO.
*
* @throws HiveMetastoreException TODO
*/
private void connect() throws HiveMetastoreException {
LOG.info("Connecting to ThriftHiveMetastore " + host + ":" + port);
transport = new TSocket(host, port, 1000 * clientSocketTimeout);
this.client = new ThriftHiveMetastore.Client(new TBinaryProtocol(transport));
try {
transport.open();
} catch (TTransportException e) {
close();
throw new HiveMetastoreException(e);
}
}
/**
* TODO.
*/
public void close() {
if (transport != null) {
transport.close();
transport = null;
client = null;
}
}
private void connectIfNeeded() throws HiveMetastoreException {
if (transport == null) {
connect();
}
}
/**
* TODO.
*
* @param partition TODO
* @return TODO
*
* @throws HiveMetastoreException TODO
*/
public synchronized Partition addPartition(Partition partition) throws HiveMetastoreException {
try {
connectIfNeeded();
return client.add_partition(partition);
} catch (TException e) {
close();
throw new HiveMetastoreException(e);
}
}
/**
* TODO.
*
* @param dbName TODO
* @param tableName TODO
* @return TODO
*
* @throws HiveMetastoreException TODO
*/
public synchronized Table getTable(String dbName, String tableName)
throws HiveMetastoreException {
try {
connectIfNeeded();
return client.get_table(dbName, tableName);
} catch (NoSuchObjectException e) {
return null;
} catch (TException e) {
close();
throw new HiveMetastoreException(e);
}
}
/**
* TODO.
*
* @param dbName TODO
* @param tableName TODO
* @param partitionName TODO
* @return TODO
*
* @throws HiveMetastoreException TODO
*/
public synchronized Partition getPartition(String dbName, String tableName, String partitionName)
throws HiveMetastoreException {
try {
connectIfNeeded();
return client.get_partition_by_name(dbName, tableName, partitionName);
} catch (NoSuchObjectException e) {
return null;
} catch (MetaException e) {
// Brittle code - this was added to handle an issue with the Hive
// Metstore. The MetaException is thrown when a table is
// partitioned with one schema but the name follows a different one.
// It's impossible to differentiate from that case and other
// causes of the MetaException without something like this.
if ("Invalid partition key & values".equals(e.getMessage())) {
return null;
} else {
throw new HiveMetastoreException(e);
}
} catch (TException e) {
close();
throw new HiveMetastoreException(e);
}
}
/**
* TODO.
*
* @param dbName TODO
* @param tableName TODO
* @param partition TODO
*
* @throws HiveMetastoreException TODO
*/
public synchronized void alterPartition(String dbName, String tableName, Partition partition)
throws HiveMetastoreException {
try {
connectIfNeeded();
client.alter_partition(dbName, tableName, partition);
} catch (TException e) {
close();
throw new HiveMetastoreException(e);
}
}
/**
* TODO.
*
* @param dbName TODO
* @param tableName TODO
* @param table TODO
*
* @throws HiveMetastoreException TODO
*/
public synchronized void alterTable(String dbName, String tableName, Table table)
throws HiveMetastoreException {
try {
connectIfNeeded();
client.alter_table(dbName, tableName, table);
} catch (TException e) {
close();
throw new HiveMetastoreException(e);
}
}
/**
* TODO.
*
* @param dbName TODO
* @param tableName TODO
* @return TODO
*
* @throws HiveMetastoreException TODO
*/
public boolean isPartitioned(String dbName, String tableName) throws HiveMetastoreException {
Table table = getTable(dbName, tableName);
return table != null && table.getPartitionKeys().size() > 0;
}
/**
* TODO.
*
* @param dbName TODO
* @param tableName TODO
* @param partitionName TODO
* @return TODO
*
* @throws HiveMetastoreException TODO
*/
public synchronized boolean existsPartition(String dbName, String tableName, String partitionName)
throws HiveMetastoreException {
return getPartition(dbName, tableName, partitionName) != null;
}
/**
* TODO.
*
* @param dbName TODO
* @param tableName TODO
* @return TODO
*
* @throws HiveMetastoreException TODO
*/
public synchronized boolean existsTable(String dbName, String tableName)
throws HiveMetastoreException {
return getTable(dbName, tableName) != null;
}
/**
* TODO.
*
* @param table TODO
*
* @throws HiveMetastoreException TODO
*/
public synchronized void createTable(Table table) throws HiveMetastoreException {
try {
connectIfNeeded();
client.create_table(table);
} catch (TException e) {
close();
throw new HiveMetastoreException(e);
}
}
/**
* TODO.
*
* @param dbName TODO
* @param tableName TODO
* @param deleteData TODO
*
* @throws HiveMetastoreException TODO
*/
public synchronized void dropTable(String dbName, String tableName, boolean deleteData)
throws HiveMetastoreException {
try {
connectIfNeeded();
client.drop_table(dbName, tableName, deleteData);
} catch (TException e) {
close();
throw new HiveMetastoreException(e);
}
}
/**
* TODO.
*
* @param dbName TODO
* @param tableName TODO
* @param partitionName TODO
* @param deleteData TODO
*
* @throws HiveMetastoreException TODO
*/
public synchronized void dropPartition(String dbName, String tableName, String partitionName,
boolean deleteData) throws HiveMetastoreException {
try {
connectIfNeeded();
client.drop_partition_by_name(dbName, tableName, partitionName, deleteData);
} catch (TException e) {
close();
throw new HiveMetastoreException(e);
}
}
/**
* TODO.
*
* @param partitionName TODO
* @return TODO
*/
public synchronized Map<String, String> partitionNameToMap(String partitionName)
throws HiveMetastoreException {
try {
connectIfNeeded();
return client.partition_name_to_spec(partitionName);
} catch (TException e) {
close();
throw new HiveMetastoreException(e);
}
}
@Override
public synchronized void createDatabase(Database db) throws HiveMetastoreException {
try {
connectIfNeeded();
client.create_database(db);
} catch (TException e) {
close();
throw new HiveMetastoreException(e);
}
}
@Override
public synchronized Database getDatabase(String dbName) throws HiveMetastoreException {
try {
connectIfNeeded();
return client.get_database(dbName);
} catch (NoSuchObjectException e) {
return null;
} catch (TException e) {
close();
throw new HiveMetastoreException(e);
}
}
@Override
public synchronized boolean existsDb(String dbName) throws HiveMetastoreException {
return getDatabase(dbName) != null;
}
@Override
public synchronized List<String> getPartitionNames(String dbName, String tableName)
throws HiveMetastoreException {
try {
connectIfNeeded();
return client.get_partition_names(dbName, tableName, (short) -1);
} catch (TException e) {
close();
throw new HiveMetastoreException(e);
}
}
@Override
public synchronized List<String> getTables(String dbName, String tableName)
throws HiveMetastoreException {
try {
connectIfNeeded();
return client.get_tables(dbName, tableName);
} catch (TException e) {
close();
throw new HiveMetastoreException(e);
}
}
@Override
public synchronized Partition exchangePartition(
Map<String, String> partitionSpecs,
String sourceDb, String sourceTable,
String destDb,
String destinationTableName)
throws HiveMetastoreException {
try {
connectIfNeeded();
return client.exchange_partition(partitionSpecs, sourceDb, sourceTable, destDb,
destinationTableName);
} catch (TException e) {
close();
throw new HiveMetastoreException(e);
}
}
@Override
public void renamePartition(
String db,
String table,
List<String> partitionValues,
Partition partition)
throws HiveMetastoreException {
try {
connectIfNeeded();
client.rename_partition(db, table, partitionValues, partition);
} catch (TException e) {
close();
throw new HiveMetastoreException(e);
}
}
/**
* TODO.
*
* @return TODO
*/
public List<String> getAllDatabases() throws HiveMetastoreException {
try {
connectIfNeeded();
return client.get_all_databases();
} catch (TException e) {
close();
throw new HiveMetastoreException(e);
}
}
/**
* TODO.
*
* @param dbName TODO
* @return TODO
*/
public List<String> getAllTables(String dbName) throws HiveMetastoreException {
try {
connectIfNeeded();
return client.get_all_tables(dbName);
} catch (TException e) {
close();
throw new HiveMetastoreException(e);
}
}
}
| 9,408 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/common/Command.java | package com.airbnb.reair.common;
public interface Command<ReturnT, ExceptionT extends Throwable> {
public ReturnT run() throws ExceptionT;
}
| 9,409 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/common/PathBuilder.java | package com.airbnb.reair.common;
import org.apache.hadoop.fs.Path;
/**
* Helps construct Path objects by allowing incremental element additions. For example, /a/b/c can
* be formed by starting with /a, then adding b and c.
*/
public class PathBuilder {
private Path currentPath;
public PathBuilder(Path currentPath) {
this.currentPath = currentPath;
}
/**
* Add another path element.
*
* @param element the element to add
* @return a PathBuilder that includes the added element
*/
public PathBuilder add(String element) {
currentPath = new Path(currentPath, element);
return this;
}
/**
* Convert the path elements accumulated so far into a single path.
*
* @return a Path that includes all added path elements
*/
public Path toPath() {
return currentPath;
}
}
| 9,410 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/common/HiveUtils.java | package com.airbnb.reair.common;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.Table;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
public class HiveUtils {
/**
* Checks to see if a table is partitioned.
*
* @param table table to check
* @return true if the given table is partitioned.
*/
public static boolean isPartitioned(Table table) {
return table.getPartitionKeys() != null
&& table.getPartitionKeys().size() > 0;
}
/**
* Checks to see if a table is a view.
*
* @param table table to check
* @return true if the given table is a view.
*/
public static boolean isView(Table table) {
return TableType.VIRTUAL_VIEW.name().equals(table.getTableType());
}
/**
* Convert a partition name into a list of partition values. e.g. 'ds=1/hr=2' -> ['1', '2']
*
* @param ms Hive metastore client
* @param partitionName the partition name to convert
* @return a list of partition values
*
* @throws HiveMetastoreException TODO
*/
public static List<String> partitionNameToValues(HiveMetastoreClient ms, String partitionName)
throws HiveMetastoreException {
// Convert the name to a key-value map
Map<String, String> kv = ms.partitionNameToMap(partitionName);
List<String> values = new ArrayList<>();
for (String equalsExpression : partitionName.split("/")) {
String[] equalsExpressionSplit = equalsExpression.split("=");
String key = equalsExpressionSplit[0];
if (!kv.containsKey(key)) {
// This shouldn't happen, but if it does it implies an error
// in partition name to map conversion.
return null;
}
values.add(kv.get(key));
}
return values;
}
}
| 9,411 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/common/Container.java | package com.airbnb.reair.common;
/**
* Container that is used to hold other objects to workaround use cases where an object or null
* needs to be returned.
*
* @param <T> the type of the container
*/
public class Container<T> {
private volatile T item;
/**
* Put the item into this container.
*
* @param item the item to put in
*/
public void set(T item) {
this.item = item;
}
/**
* Get the item that was put into this container.
* @return the item that was last put in
*/
public T get() {
return item;
}
}
| 9,412 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/common/NamedPartition.java | package com.airbnb.reair.common;
import org.apache.hadoop.hive.metastore.api.Partition;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
/**
* Composite class that combines the Hive Partition thrift object with the associated name.
*/
public class NamedPartition {
private String name;
private Partition partition;
public NamedPartition(String name, Partition partition) {
this.name = name;
this.partition = partition;
}
public String getName() {
return name;
}
public Partition getPartition() {
return partition;
}
/**
* Convert a collection of NamedPartitions to a list of Partitions.
*
* @param collection collection of partitions to convert
* @return a list of converted partitions
*/
public static List<Partition> toPartitions(Collection<NamedPartition> collection) {
List<Partition> partitions = new ArrayList<>();
for (NamedPartition pwn : collection) {
partitions.add(pwn.getPartition());
}
return partitions;
}
/**
* Convert a list of NamedPartitions to partition names.
*
* @param collection collection of partitions to convert
* @return a list of partitio names
*/
public static List<String> toNames(Collection<NamedPartition> collection) {
List<String> partitionNames = new ArrayList<>();
for (NamedPartition pwn : collection) {
partitionNames.add(pwn.getName());
}
return partitionNames;
}
}
| 9,413 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/common/HiveMetastoreClient.java | package com.airbnb.reair.common;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import java.util.List;
import java.util.Map;
/**
* A client for the Hive Metastore Thrift service.
*/
public interface HiveMetastoreClient {
Partition addPartition(Partition partition) throws HiveMetastoreException;
Table getTable(String dbName, String tableName) throws HiveMetastoreException;
Partition getPartition(String dbName, String tableName, String partitionName)
throws HiveMetastoreException;
List<String> getPartitionNames(String dbName, String tableName)
throws HiveMetastoreException;
void alterPartition(String dbName, String tableName, Partition partition)
throws HiveMetastoreException;
void alterTable(
String dbName,
String tableName,
Table table) throws HiveMetastoreException;
boolean isPartitioned(String dbName, String tableName) throws HiveMetastoreException;
boolean existsPartition(String dbName, String tableName, String partitionName)
throws HiveMetastoreException;
boolean existsTable(String dbName, String tableName) throws HiveMetastoreException;
void createTable(Table table) throws HiveMetastoreException;
void dropTable(String dbName, String tableName, boolean deleteData)
throws HiveMetastoreException;
void dropPartition(String dbName, String tableName, String partitionName,
boolean deleteData) throws HiveMetastoreException;
Map<String, String> partitionNameToMap(String partitionName) throws HiveMetastoreException;
void createDatabase(Database db) throws HiveMetastoreException;
Database getDatabase(String dbName) throws HiveMetastoreException;
boolean existsDb(String dbName) throws HiveMetastoreException;
List<String> getTables(String dbName, String tableName) throws HiveMetastoreException;
Partition exchangePartition(
Map<String, String> partitionSpecs,
String sourceDb,
String sourceTable,
String destDb,
String destinationTableName)
throws HiveMetastoreException;
void renamePartition(
String db,
String table,
List<String> partitionValues,
Partition partition)
throws HiveMetastoreException;
List<String> getAllDatabases() throws HiveMetastoreException;
List<String> getAllTables(String dbName) throws HiveMetastoreException;
void close();
}
| 9,414 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/common/CliUtils.java | package com.airbnb.reair.common;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
/**
* Command line utilities.
*/
public class CliUtils {
public static void printHelp(String command, Options options) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp(command, options);
}
}
| 9,415 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/common/DistCpWrapper.java | package com.airbnb.reair.common;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.tools.DistCp;
import org.apache.hadoop.util.ToolRunner;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import java.util.Set;
/**
* This is a wrapper around DistCp that adds a few options and makes it easier to use.
*/
public class DistCpWrapper {
private static final Log LOG = LogFactory.getLog(DistCpWrapper.class);
private Configuration conf;
public DistCpWrapper(Configuration conf) {
this.conf = conf;
}
/**
* Constructor using the specified options.
*
* @param options the options to use when copying
* @return the number of bytes copied
*
* @throws IOException if there's an error accessing the filesystem
* @throws DistCpException if there is an error running DistCp
*/
public long copy(DistCpWrapperOptions options) throws IOException, DistCpException {
if (Thread.currentThread().isInterrupted()) {
throw new DistCpException("Current thread has been interrupted");
}
Path srcDir = options.getSrcDir();
Path destDir = options.getDestDir();
Path distCpTmpDir = options.getDistCpTmpDir();
Path distCpLogDir = options.getDistCpLogDir();
boolean destDirExists = FsUtils.dirExists(conf, destDir);
LOG.debug("Dest dir " + destDir + " exists is " + destDirExists);
boolean syncModificationTimes = options.getSyncModificationTimes();
boolean atomic = options.getAtomic();
boolean canDeleteDest = options.getCanDeleteDest();
if (destDirExists
&& FsUtils.equalDirs(conf, srcDir, destDir, Optional.empty(), syncModificationTimes)) {
LOG.debug("Source and destination paths are already equal!");
return 0;
}
boolean useDistcpUpdate = false;
// Distcp -update can be used for cases where we're not doing an atomic
// copy and there aren't any files in the destination that are not in
// the source. If you delete specific files on the destination, it's
// possible to do distcp update with unique files in the dest. However,
// that functionality is not yet built out. Instead, this deletes the
// destination directory and does a fresh copy.
if (!atomic) {
useDistcpUpdate = destDirExists
&& !FsUtils.filesExistOnDestButNotSrc(conf, srcDir, destDir, Optional.empty());
if (useDistcpUpdate) {
LOG.debug("Doing a distcp update from " + srcDir + " to " + destDir);
}
}
if (destDirExists && !canDeleteDest && !useDistcpUpdate) {
throw new IOException("Destination directory (" + destDir
+ ") exists, can't use update, and can't " + "overwrite!");
}
if (destDirExists && canDeleteDest && !useDistcpUpdate && !atomic) {
LOG.debug("Unable to use distcp update, so deleting " + destDir + " since it already exists");
FsUtils.deleteDirectory(conf, destDir);
}
Path distcpDestDir;
// For atomic moves, copy to a temporary location and then move the
// directory to the final destination. Note: S3 doesn't support atomic
// directory moves so don't use this option for S3 destinations.
if (atomic) {
distcpDestDir = distCpTmpDir;
} else {
distcpDestDir = destDir;
}
LOG.debug(String.format("Copying %s to %s", srcDir, distcpDestDir));
Set<FileStatus> fileStatuses =
FsUtils.getFileStatusesRecursive(conf, srcDir, Optional.empty());
List<Long> fileSizes = new ArrayList<>();
long srcSize = 0;
for (FileStatus status : fileStatuses) {
srcSize += status.getLen();
fileSizes.add(status.getLen());
}
LOG.debug(String.format(
"%s has %s files with a total size of %s bytes",
srcDir, fileStatuses.size(), srcSize));
// Use shell to copy for small files
if (srcSize < options.getLocalCopySizeThreshold()
&& fileStatuses.size() < options.getLocalCopyCountThreshold()) {
String[] mkdirArgs = {"-mkdir", "-p", distcpDestDir.getParent().toString()};
String[] copyArgs = {"-cp", srcDir.toString(), distcpDestDir.toString()};
FsShell shell = new FsShell();
try {
LOG.debug("Using shell to mkdir with args " + Arrays.asList(mkdirArgs));
ToolRunner.run(shell, mkdirArgs);
LOG.debug("Using shell to copy with args " + Arrays.asList(copyArgs));
ToolRunner.run(shell, copyArgs);
} catch (Exception e) {
throw new DistCpException(e);
} finally {
shell.close();
}
if (syncModificationTimes) {
FsUtils.syncModificationTimes(conf, srcDir, distcpDestDir, Optional.empty());
}
} else {
LOG.debug("DistCp log dir: " + distCpLogDir);
LOG.debug("DistCp dest dir: " + distcpDestDir);
LOG.debug("DistCp tmp dir: " + distCpTmpDir);
// Make sure that the tmp dir and the destination directory are on
// the same schema
if (!FsUtils.sameFs(distCpTmpDir, distcpDestDir)) {
throw new DistCpException(
String.format("Filesystems do not match for tmp (%s) " + "and destination (%s)",
distCpTmpDir, distcpDestDir));
}
List<String> distcpArgs = new ArrayList<>();
distcpArgs.add("-m");
long mappers = Math.max(1, srcSize / options.getBytesPerMapper());
mappers = Math.max(mappers, fileStatuses.size() / options.getFilesPerMapper());
distcpArgs.add(Long.toString(mappers));
distcpArgs.add("-log");
distcpArgs.add(distCpLogDir.toString());
if (useDistcpUpdate) {
distcpArgs.add("-update");
}
// Preserve replication number, user, group, permissions, and block size.
// Preserving block size is needed for DistCp to use built in checksums for verification.
distcpArgs.add("-prugpb");
distcpArgs.add(srcDir.toString());
distcpArgs.add(distcpDestDir.toString());
LOG.debug("Running DistCp with args: " + distcpArgs);
// For distcp v1, do something like
// DistCp distCp = new DistCp(conf);
// For distcp v2
DistCp distCp = new DistCp();
distCp.setConf(conf);
long distCpTimeout = options.getDistcpTimeout(fileSizes, mappers);
int ret = runDistCp(distCp, distcpArgs, distCpTimeout,
options.getDistCpPollInterval());
if (Thread.currentThread().isInterrupted()) {
throw new DistCpException("Thread interrupted");
}
if (ret != 0) {
throw new DistCpException("Distcp failed");
}
}
if (syncModificationTimes) {
FsUtils.syncModificationTimes(conf, srcDir, distcpDestDir, Optional.empty());
}
if (!FsUtils.equalDirs(conf, srcDir, distcpDestDir, Optional.empty(), syncModificationTimes)) {
LOG.error("Source and destination sizes don't match!");
if (atomic) {
LOG.debug("Since it's an atomic copy, deleting " + distcpDestDir);
FsUtils.deleteDirectory(conf, distcpDestDir);
throw new DistCpException("distcp result mismatch");
}
} else {
LOG.debug("Size of source and destinations match");
}
if (atomic) {
// Size is good, clear out the final destination directory and
// replace with the copied version.
destDirExists = FsUtils.dirExists(conf, destDir);
if (destDirExists) {
LOG.debug("Deleting existing directory " + destDir);
FsUtils.deleteDirectory(conf, destDir);
}
LOG.debug("Moving from " + distCpTmpDir + " to " + destDir);
FsUtils.moveDir(conf, distcpDestDir, destDir);
}
LOG.debug("Deleting log directory " + distCpLogDir);
FsUtils.deleteDirectory(conf, distCpLogDir);
// Not necessarily the bytes copied if using -update
return srcSize;
}
/**
* Run distcp in a separate thread, but kill the thread if runtime exceeds timeout.
*
* @param distCp directory copier object
* @param options the command line arguments to pass to DistCp
* @param timeout the maximum number of miliseconds that DistCp should run
* @param pollInterval how frequently to check if DistCp is done
* @return the value returned by DistCp
*
* @throws InterruptedException if this thread is interrupted while waiting for DistCp
*/
private int runDistCp(final DistCp distCp, final List<String> options, long timeout,
long pollInterval) throws DistCpException {
// Kick off distcp in a separate thread so we can implement a timeout
final Container<Integer> retVal = new Container<Integer>();
Thread distCpRunner = new Thread() {
@Override
public void run() {
int ret = distCp.run(options.toArray(new String[] {}));
retVal.set(Integer.valueOf(ret));
}
};
distCpRunner.setDaemon(true);
distCpRunner.setName(Thread.currentThread().getName() + "-distcp-" + distCpRunner.getId());
distCpRunner.start();
long startTime = System.currentTimeMillis();
while (true) {
if (System.currentTimeMillis() - startTime > timeout) {
LOG.debug(String.format("DistCp exceeded timeout of %sms", timeout));
distCpRunner.interrupt();
break;
}
if (retVal.get() != null) {
break;
}
try {
Thread.sleep(pollInterval);
} catch (InterruptedException e) {
throw new DistCpException(e);
}
}
return retVal.get() == null ? -1 : retVal.get();
}
}
| 9,416 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/common/ProcessRunException.java | package com.airbnb.reair.common;
/**
* Exception related to running a (Linux) process.
*/
public class ProcessRunException extends Exception {
public ProcessRunException(String message) {
super(message);
}
public ProcessRunException(Exception exception) {
super(exception);
}
public ProcessRunException(String message, Exception exception) {
super(message, exception);
}
}
| 9,417 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/common/HiveParameterKeys.java | package com.airbnb.reair.common;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
/**
* Keys used in the parameters map of a Hive Thrift object for storing Airbnb metadata.
*/
public class HiveParameterKeys {
// Metadata indicating where this table was copied from
public static final String SRC_CLUSTER = "abb_source_cluster";
// Official Hive param keys
// Last modification time
public static final String TLDT = hive_metastoreConstants.DDL_TIME;
// Last modification time for some cases
public static final String TLMT = "last_modified_time";
}
| 9,418 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/common/RunResult.java | package com.airbnb.reair.common;
/**
* Result object passed from running a process.
*/
public class RunResult {
private final int returnCode;
private final String stdout;
public RunResult(int returnCode, String stdout) {
this.returnCode = returnCode;
this.stdout = stdout;
}
public String getStdout() {
return stdout;
}
public int getReturnCode() {
return returnCode;
}
}
| 9,419 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/common/StreamLogger.java | package com.airbnb.reair.common;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
/**
* Thread that reads from a stream and writes what it read to log4j.
*/
public class StreamLogger extends Thread {
private static final Log LOG = LogFactory.getLog(StreamLogger.class);
private InputStream inputStream;
private boolean saveToString;
private String streamAsString;
/**
* TODO.
*
* @param inputStream TODO
* @param saveToString whether to return the entire output from the input stream as a single
* string.
*/
public StreamLogger(String threadName, InputStream inputStream, boolean saveToString) {
this.inputStream = inputStream;
this.saveToString = saveToString;
this.streamAsString = null;
setName(threadName);
setDaemon(true);
}
@Override
public void run() {
try {
StringBuilder sb = new StringBuilder();
InputStreamReader isr = new InputStreamReader(inputStream);
BufferedReader br = new BufferedReader(isr);
String line;
while ((line = br.readLine()) != null) {
LOG.debug(line);
if (saveToString) {
sb.append(line);
}
}
streamAsString = sb.toString();
} catch (IOException e) {
LOG.error(e);
}
}
public String getStreamAsString() {
return streamAsString;
}
}
| 9,420 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/common/RetryingProcessRunner.java | package com.airbnb.reair.common;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.util.List;
/**
* Runs a process, retrying if necessary.
*/
public class RetryingProcessRunner {
private static final Log LOG = LogFactory.getLog(RetryingProcessRunner.class);
private static final int DEFAULT_NUM_RETRIES = 3;
private static final long RETRY_SLEEP_TIME = 10 * 60 * 1000;
private int retries;
public RetryingProcessRunner() {
this.retries = DEFAULT_NUM_RETRIES;
}
public RetryingProcessRunner(int retries) {
this.retries = retries;
}
/**
* Run a shell command.
*
* @param args shell arguments to call
* @return the result of running the command
*
* @throws ProcessRunException if there's an error running the process
*/
public RunResult run(List<String> args) throws ProcessRunException {
for (int i = 0; i < retries; i++) {
LOG.debug("Running: " + args);
ProcessRunner runner = new ProcessRunner(args);
RunResult result = runner.run();
if (result.getReturnCode() == 0) {
return result;
}
LOG.error("Error running command! Got return code: " + result.getReturnCode());
if (i + 1 == retries) {
return result;
}
try {
LOG.debug("Sleeping for " + RETRY_SLEEP_TIME / 1000 + "s");
Thread.sleep(RETRY_SLEEP_TIME);
} catch (InterruptedException e) {
throw new RuntimeException("Shouldn't happen!");
}
}
throw new RuntimeException("Shouldn't happen");
}
}
| 9,421 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/common/FsUtils.java | package com.airbnb.reair.common;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.Trash;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Map;
import java.util.Optional;
import java.util.Queue;
import java.util.Set;
/**
* Utility methods related to the file system.
*/
public class FsUtils {
private static final Log LOG = LogFactory.getLog(FsUtils.class);
public static boolean sameFs(Path p1, Path p2) {
return StringUtils.equals(p1.toUri().getScheme(), p2.toUri().getScheme())
&& StringUtils.equals(p1.toUri().getAuthority(), p2.toUri().getAuthority());
}
/**
* Get the total size of the files under the specified path.
*
* @param conf the configuration object
* @param path the path to get the size of
* @param filter use this to filter out files and directories
* @return the size of the given location in bytes, including the size of any subdirectories
*
* @throws IOException if there's an error accessing the filesystem
*/
public static long getSize(Configuration conf, Path path, Optional<PathFilter> filter)
throws IOException {
long totalSize = 0;
FileSystem fs = FileSystem.get(path.toUri(), conf);
Queue<Path> pathsToCheck = new LinkedList<>();
pathsToCheck.add(path);
// Traverse the directory tree and find all the paths
// Use this instead of listFiles() as there seems to be more errors
// related to block locations when using with s3n
while (pathsToCheck.size() > 0) {
Path pathToCheck = pathsToCheck.remove();
if (filter.isPresent() && !filter.get().accept(pathToCheck)) {
LOG.warn("Skipping check of directory: " + pathToCheck);
continue;
}
FileStatus[] statuses = fs.listStatus(pathToCheck);
for (FileStatus status : statuses) {
if (status.isDirectory()) {
pathsToCheck.add(status.getPath());
} else {
totalSize += status.getLen();
}
}
}
return totalSize;
}
/**
* Check if a directory exceeds the specified size.
*
* @param conf configuration object
* @param path the path to check the size of
* @param maxSize max size for comparison
* @return whether the specified size exceeds the max size
*
* @throws IOException if there's an error accessing the filesystem
*/
public static boolean exceedsSize(Configuration conf, Path path, long maxSize)
throws IOException {
long totalSize = 0;
FileSystem fs = FileSystem.get(path.toUri(), conf);
Queue<Path> pathsToCheck = new LinkedList<>();
pathsToCheck.add(path);
// Traverse the directory tree and find all the paths
// Use this instead of listFiles() as there seems to be more errors
// related to block locations when using with s3n
while (pathsToCheck.size() > 0) {
Path pathToCheck = pathsToCheck.remove();
FileStatus[] statuses = fs.listStatus(pathToCheck);
for (FileStatus status : statuses) {
if (status.isDirectory()) {
pathsToCheck.add(status.getPath());
} else {
totalSize += status.getLen();
if (totalSize > maxSize) {
return true;
}
}
}
}
return false;
}
/**
* Get the file statuses of all the files in the path, including subdirectories.
*
* @param conf configuration object
* @param path the path to examine
*
* @return the FileStatus of all the files in the supplied path, including subdirectories
* @throws IOException if there's an error accessing the filesystem
*/
public static Set<FileStatus> getFileStatusesRecursive(
Configuration conf,
Path path,
Optional<PathFilter> filter) throws IOException {
FileSystem fs = FileSystem.get(path.toUri(), conf);
Set<FileStatus> fileStatuses = new HashSet<>();
Queue<Path> pathsToCheck = new LinkedList<>();
pathsToCheck.add(path);
// Traverse the directory tree and find all the paths
// Use this instead of listFiles() as there seems to be more errors
// related to block locations when using with s3n
while (pathsToCheck.size() > 0) {
Path pathToCheck = pathsToCheck.remove();
if (filter.isPresent() && !filter.get().accept(pathToCheck)) {
LOG.warn("Skipping check of directory: " + pathToCheck);
continue;
}
FileStatus[] statuses = fs.listStatus(pathToCheck);
for (FileStatus status : statuses) {
if (status.isDirectory()) {
pathsToCheck.add(status.getPath());
} else {
fileStatuses.add(status);
}
}
}
return fileStatuses;
}
/**
* Get the sizes of the files under the specified path.
*
* @param root the path to check
* @param statuses a set of statuses for all the files in the root directory
* @return a map from the path to a file relative to the root (e.g. a/b.txt) to the associated
* file size
*/
private static Map<String, Long> getRelPathToSizes(Path root, Set<FileStatus> statuses)
throws ArgumentException {
Map<String, Long> pathToStatus = new HashMap<>();
for (FileStatus status : statuses) {
pathToStatus.put(getRelativePath(root, status.getPath()), status.getLen());
}
return pathToStatus;
}
/**
* Get the modified times of the files under the specified path.
*
* @param root the path to check
* @param statuses a set of statuses for all the files in the root directory
* @return a map from the path to a file relative to the root (e.g. a/b.txt) to the associated
* modification time
*/
private static Map<String, Long> getRelativePathToModificationTime(Path root,
Set<FileStatus> statuses)
throws ArgumentException {
Map<String, Long> pathToStatus = new HashMap<>();
for (FileStatus status : statuses) {
pathToStatus.put(getRelativePath(root, status.getPath()), status.getModificationTime());
}
return pathToStatus;
}
/**
* Add "/" to path if path doesn't end with "/".
*/
public static String getPathWithSlash(String path) {
if (path == null) {
return null;
}
if (!path.endsWith("/")) {
path = path + "/";
}
return path;
}
/**
* Get the path relative to another path.
*
* @param root the reference path
* @param child a path under root
* @return The relative path of the child given the root. For example, if the root was '/a' and
* the file was '/a/b/c.txt', the relative path would be 'b/c.txt'
*/
public static String getRelativePath(Path root, Path child) {
// TODO: Use URI.relativize()
String prefix = getPathWithSlash(root.toString());
if (!child.toString().startsWith(prefix)) {
throw new RuntimeException("Invalid root: " + root + " and child " + child);
}
return child.toString().substring(prefix.length());
}
/**
* Get the total size of the paths.
*
* @param relPathToSize a map from the relative path to a file to the file size
* @return the total size of the files described in the map
*/
private static long totalSize(Map<String, Long> relPathToSize) {
long total = 0;
for (Long l : relPathToSize.values()) {
total += l;
}
return total;
}
/**
* Checks to see if filenames exist on a destination directory that don't exist in the source
* directory. Mainly used for checking if a distcp -update can work.
*
* @param conf configuration object
* @param src source path
* @param dest destination path
* @param filter filter to use when traversing through the directories
* @return true if there are any file names on the destination directory that are not in the
* source directory
* @throws IOException if there's an error accesing the filesystem
*/
public static boolean filesExistOnDestButNotSrc(Configuration conf, Path src, Path dest,
Optional<PathFilter> filter) throws IOException {
Set<FileStatus> srcFileStatuses = getFileStatusesRecursive(conf, src, filter);
Set<FileStatus> destFileStatuses = getFileStatusesRecursive(conf, dest, filter);
Map<String, Long> srcFileSizes = null;
Map<String, Long> destFileSizes = null;
try {
srcFileSizes = getRelPathToSizes(src, srcFileStatuses);
destFileSizes = getRelPathToSizes(dest, destFileStatuses);
} catch (ArgumentException e) {
throw new IOException("Invalid file statuses!", e);
}
for (String file : destFileSizes.keySet()) {
if (!srcFileSizes.containsKey(file)) {
LOG.warn(String.format("%s exists on %s but not in %s", file, dest, src));
return true;
}
}
return false;
}
public static boolean equalDirs(Configuration conf, Path src, Path dest) throws IOException {
return equalDirs(conf, src, dest, Optional.empty());
}
/**
* Checks to see if two directories are equal. The directories are considered equal if they have
* the same non-zero files with the same sizes in the same paths.
*
* @param conf configuration object
* @param src source directory
* @param dest destination directory
* @param filter files or directories rejected by this fileter are not checked
* @return true if the files in the source and the destination are the 'same'. 'same' is defined
* as having the same set of files with matching sizes.
* @throws IOException if there's an error accessing the filesystem
*/
public static boolean equalDirs(Configuration conf, Path src, Path dest,
Optional<PathFilter> filter) throws IOException {
return equalDirs(conf, src, dest, filter, false);
}
/**
* Checks to see if two directories are equal. The directories are considered equal if they have
* the same non-zero files with the same sizes in the same paths (with the same modification times
* if applicable)
*
* @param conf configuration object
* @param src source directory
* @param dest destination directory
* @param filter filter for excluding some files from comparison
* @param compareModificationTimes whether to compare modification times.
* @return true if the two directories are equal
*
* @throws IOException if there is an error accessing the filesystem
*/
public static boolean equalDirs(Configuration conf, Path src, Path dest,
Optional<PathFilter> filter, boolean compareModificationTimes) throws IOException {
boolean srcExists = src.getFileSystem(conf).exists(src);
boolean destExists = dest.getFileSystem(conf).exists(dest);
if (!srcExists || !destExists) {
return false;
}
Set<FileStatus> srcFileStatuses = getFileStatusesRecursive(conf, src, filter);
Set<FileStatus> destFileStatuses = getFileStatusesRecursive(conf, dest, filter);
Map<String, Long> srcFileSizes = null;
Map<String, Long> destFileSizes = null;
try {
srcFileSizes = getRelPathToSizes(src, srcFileStatuses);
destFileSizes = getRelPathToSizes(dest, destFileStatuses);
} catch (ArgumentException e) {
throw new IOException("Invalid file statuses!", e);
}
long srcSize = totalSize(srcFileSizes);
long destSize = totalSize(destFileSizes);
// Size check is sort of redundant, but is a quick one to show.
LOG.debug("Size of " + src + " is " + srcSize);
LOG.debug("Size of " + dest + " is " + destSize);
if (srcSize != destSize) {
LOG.debug(String.format("Size of %s and %s do not match!", src, dest));
return false;
}
if (srcFileSizes.size() != destFileSizes.size()) {
LOG.warn(String.format("Number of files in %s (%d) and %s (%d) " + "do not match!", src,
srcFileSizes.size(), dest, destFileSizes.size()));
return false;
}
for (String file : srcFileSizes.keySet()) {
if (!destFileSizes.containsKey(file)) {
LOG.warn(String.format("%s missing from %s!", file, dest));
return false;
}
if (!srcFileSizes.get(file).equals(destFileSizes.get(file))) {
LOG.warn(String.format("Size mismatch between %s (%d) in %s " + "and %s (%d) in %s", file,
srcFileSizes.get(file), src, file, destFileSizes.get(file), dest));
return false;
}
}
if (compareModificationTimes) {
Map<String, Long> srcFileModificationTimes = null;
Map<String, Long> destFileModificationTimes = null;
try {
srcFileModificationTimes = getRelativePathToModificationTime(src, srcFileStatuses);
destFileModificationTimes = getRelativePathToModificationTime(dest, destFileStatuses);
} catch (ArgumentException e) {
throw new IOException("Invalid file statuses!", e);
}
for (String file : srcFileModificationTimes.keySet()) {
if (!srcFileModificationTimes.get(file).equals(destFileModificationTimes.get(file))) {
LOG.warn(String.format(
"Modification time mismatch between " + "%s (%d) in %s and %s (%d) in %s", file,
srcFileModificationTimes.get(file), src, file, destFileModificationTimes.get(file),
dest));
return false;
}
}
}
LOG.debug(String.format("%s and %s are the same", src, dest));
return true;
}
/**
* Set the file modification times for the files on the destination to be the same as the
* modification times for the file on the source.
*
* @param conf configuration object
* @param src source directory
* @param dest destination directory
* @param filter a filter for excluding some files from modification
*
* @throws IOException if there's an error
*/
public static void syncModificationTimes(Configuration conf, Path src, Path dest,
Optional<PathFilter> filter) throws IOException {
Set<FileStatus> srcFileStatuses = getFileStatusesRecursive(conf, src, filter);
Map<String, Long> srcFileModificationTimes = null;
try {
srcFileModificationTimes = getRelativePathToModificationTime(src, srcFileStatuses);
} catch (ArgumentException e) {
throw new IOException("Invalid file statuses!", e);
}
FileSystem destFs = dest.getFileSystem(conf);
for (String file : srcFileModificationTimes.keySet()) {
destFs.setTimes(new Path(dest, file), srcFileModificationTimes.get(file), -1);
}
}
/**
* Moves the directory from the src to dest, creating the parent directory for the dest if one
* does not exist.
*
* @param conf configuration object
* @param src source directory
* @param dest destination directory
* @throws IOException if there's an error moving the directory
*/
public static void moveDir(Configuration conf, Path src, Path dest) throws IOException {
FileSystem srcFs = FileSystem.get(src.toUri(), conf);
FileSystem destFs = FileSystem.get(dest.toUri(), conf);
if (!srcFs.getUri().equals(destFs.getUri())) {
throw new IOException("Source and destination filesystems " + "are different! src: "
+ srcFs.getUri() + " dest: " + destFs.getUri());
}
Path destPathParent = dest.getParent();
if (destFs.exists(destPathParent)) {
if (!destFs.isDirectory(destPathParent)) {
throw new IOException("File exists instead of destination " + destPathParent);
} else {
LOG.debug("Parent directory exists: " + destPathParent);
}
} else {
destFs.mkdirs(destPathParent);
}
boolean successful = srcFs.rename(src, dest);
if (!successful) {
throw new IOException("Error while moving from " + src + " to " + dest);
}
}
/**
* Checks to see if a directory exists.
*
* @param conf configuration object
* @param path the path to check
* @return true if the path specifies a directory that exists
*
* @throws IOException if there's an error accessing the filesystem
*/
public static boolean dirExists(Configuration conf, Path path) throws IOException {
FileSystem fs = FileSystem.get(path.toUri(), conf);
return fs.exists(path) && fs.isDirectory(path);
}
/**
* Delete the specified directory, using the trash as available.
*
* @param conf configuration object
* @param path path to delete
*
* @throws IOException if there's an error deleting the directory.
*/
public static void deleteDirectory(Configuration conf, Path path) throws IOException {
Trash trash = new Trash(path.getFileSystem(conf), conf);
try {
if (!trash.isEnabled()) {
LOG.debug("Trash is not enabled for " + path + " so deleting instead");
FileSystem fs = path.getFileSystem(conf);
fs.delete(path, true);
} else {
boolean removed = trash.moveToTrash(path);
if (removed) {
LOG.debug("Moved to trash: " + path);
} else {
LOG.error("Item already in trash: " + path);
}
}
} catch (FileNotFoundException e) {
LOG.debug("Attempting to delete non-existent directory " + path);
return;
}
}
/**
* Checks to see if one directory is a subdirectory of another.
*
* @param p1 directory
* @param p2 potential subdirectory
* @return true if p2 is a subdirectory of p1
*/
public static boolean isSubDirectory(Path p1, Path p2) {
URI relativizedUri = p1.toUri().relativize(p2.toUri());
return !relativizedUri.equals(p2.toUri());
}
/**
* Replace a directory with another directory.
*
* @param conf configuration object
* @param src source directory
* @param dest destination directory
*
* @throws IOException if there's an error with the filesystem
*/
public static void replaceDirectory(Configuration conf, Path src, Path dest) throws IOException {
FileSystem fs = dest.getFileSystem(conf);
if (fs.exists(dest)) {
LOG.debug("Removing " + dest + " since it exists");
deleteDirectory(conf, dest);
}
LOG.debug("Renaming " + src + " to " + dest);
fs.rename(src, dest);
}
/**
* Returns true if the both files have checksums and they match. Returns false if checksums exist
* but they do not match. Returns empty if either file does not have a checksum.
*
* @param conf configuration use to create the FileSystems
* @param srcFile source file
* @param destFile destination file
* @throws IOException if there is an error getting the checksum for the specified files
*/
public static Optional<Boolean> checksumsMatch(Configuration conf, Path srcFile, Path destFile)
throws IOException {
FileSystem srcFs = srcFile.getFileSystem(conf);
FileChecksum srcChecksum = srcFs.getFileChecksum(srcFile);
FileSystem destFs = destFile.getFileSystem(conf);
FileChecksum destChecksum = destFs.getFileChecksum(destFile);
if (srcChecksum == null || destChecksum == null) {
// If either filesystem does not support checksums
return Optional.empty();
} else {
return Optional.of(Boolean.valueOf(srcChecksum.equals(destChecksum)));
}
}
}
| 9,422 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/common/HiveObjectSpec.java | package com.airbnb.reair.common;
import org.apache.hadoop.hive.metastore.api.Table;
/**
* Specification for a Hive object (table or partition). Used because having 3 arguments (db, table,
* partition) for every function gets old.
*/
public class HiveObjectSpec {
private String dbName = null;
public String getDbName() {
return dbName;
}
public String getTableName() {
return tableName;
}
public String getPartitionName() {
return partitionName;
}
private String tableName = null;
private String partitionName = null;
/**
* Constructor using a Thrift Hive table.
*
* @param table Hive table
*/
public HiveObjectSpec(Table table) {
this(table.getDbName(), table.getTableName());
}
/**
* Constructor using a Thrift Hive partition.
*
* @param namedPartition Hive partition
*/
public HiveObjectSpec(NamedPartition namedPartition) {
this(
namedPartition.getPartition().getDbName(),
namedPartition.getPartition().getTableName(),
namedPartition.getName());
}
public HiveObjectSpec(String dbName, String tableName) {
this.dbName = dbName;
this.tableName = tableName;
}
/**
* Constructor using specified names.
*
* @param dbName Hive database name
* @param tableName Hive table name
* @param partitionName Hive partition name
*/
public HiveObjectSpec(String dbName, String tableName, String partitionName) {
this.dbName = dbName;
this.tableName = tableName;
this.partitionName = partitionName;
}
public boolean isPartition() {
return this.partitionName != null;
}
@Override
public String toString() {
if (partitionName == null) {
return String.format("%s.%s", dbName, tableName);
} else {
return String.format("%s.%s/%s", dbName, tableName, partitionName);
}
}
/**
* When this specifies a partition, return the specification for the table that this partition
* resides in.
*
* @return specification for this partition's table
*/
public HiveObjectSpec getTableSpec() {
if (!isPartition()) {
throw new RuntimeException("Should only be called for " + "partition specs!");
}
return new HiveObjectSpec(dbName, tableName);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
HiveObjectSpec that = (HiveObjectSpec) obj;
if (!dbName.equals(that.dbName)) {
return false;
}
if (partitionName != null ? !partitionName.equals(that.partitionName)
: that.partitionName != null) {
return false;
}
if (!tableName.equals(that.tableName)) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = dbName.hashCode();
result = 31 * result + tableName.hashCode();
result = 31 * result + (partitionName != null ? partitionName.hashCode() : 0);
return result;
}
}
| 9,423 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/common/DistCpWrapperOptions.java | package com.airbnb.reair.common;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import java.util.Collections;
import java.util.List;
import java.util.PriorityQueue;
/**
* A class to encapsulate various options required for running DistCp.
*/
public class DistCpWrapperOptions {
private static final Log LOG = LogFactory.getLog(DistCpWrapperOptions.class);
// The source directory to copy
private Path srcDir;
// The destination directory for the copy
private Path destDir;
// Where distcp should temporarily copy files to
private Path distCpTmpDir;
// The log directory for the distcp job
private Path distCpLogDir;
// If atomic, distCp will copy to a temporary directory first and then
// do a directory move to the final location
private boolean atomic = true;
// If the destination directory exists with different data, can it be
// deleted?
private boolean canDeleteDest = true;
// Whether to set the modification times to be the same for the copied files
private boolean syncModificationTimes = true;
// Size number of mappers for the distcp job based on the source directory
// size and the number of files.
private long bytesPerMapper = (long) 256e6;
private int filesPerMapper = 100;
// If the distCp job runs longer than this many ms, fail the job
private long distcpJobTimeout = 1800 * 1000;
// If the input data size is smaller than this many MB, and fewer than
// this many files, use a local -cp command to copy the files.
private long localCopyCountThreshold = (long) 100;
private long localCopySizeThreshold = (long) 256e6;
// Poll for the progress of DistCp every N ms
private long distCpPollInterval = 2500;
// Use a variable amount of time for distcp job timeout, depending on filesize
// subject to a minimum and maximum
// ceil(filesize_gb) * timeoutMsPerGb contrained to range (min, max)
private boolean distcpDynamicJobTimeoutEnabled = false;
// timeout in millis per GB per mapper, size will get rounded up
private long distcpDynamicJobTimeoutMsPerGbPerMapper = 0;
// minimum job timeout for variable timeout (ms) which accounts for overhead
private long distcpDynamicJobTimeoutBase = distcpJobTimeout;
// maximum job timeout for variable timeout (ms)
private long distcpDynamicJobTimeoutMax = Long.MAX_VALUE;
/**
* Constructor for DistCp options.
*
* @param srcDir the source directory to copy from
* @param destDir the destination directory to copy to
* @param distCpTmpDir the temporary directory to use when copying
* @param distCpLogDir the log directory to use when copying
*/
public DistCpWrapperOptions(Path srcDir, Path destDir, Path distCpTmpDir, Path distCpLogDir) {
this.srcDir = srcDir;
this.destDir = destDir;
this.distCpTmpDir = distCpTmpDir;
this.distCpLogDir = distCpLogDir;
}
public DistCpWrapperOptions setAtomic(boolean atomic) {
this.atomic = atomic;
return this;
}
public DistCpWrapperOptions setCanDeleteDest(boolean canDeleteDest) {
this.canDeleteDest = canDeleteDest;
return this;
}
public DistCpWrapperOptions setSyncModificationTimes(boolean syncModificationTimes) {
this.syncModificationTimes = syncModificationTimes;
return this;
}
public DistCpWrapperOptions setBytesPerMapper(long bytesPerMapper) {
this.bytesPerMapper = bytesPerMapper;
return this;
}
public DistCpWrapperOptions setDistCpJobTimeout(long distCpJobTimeout) {
this.distcpJobTimeout = distCpJobTimeout;
return this;
}
public DistCpWrapperOptions setLocalCopySizeThreshold(long localCopySizeThreshold) {
this.localCopySizeThreshold = localCopySizeThreshold;
return this;
}
public DistCpWrapperOptions setDistcpDynamicJobTimeoutEnabled(
boolean distcpDynamicJobTimeoutEnabled) {
this.distcpDynamicJobTimeoutEnabled = distcpDynamicJobTimeoutEnabled;
return this;
}
public DistCpWrapperOptions setDistcpDynamicJobTimeoutMsPerGbPerMapper(
long distcpDynamicJobTimeoutMsPerGbPerMapper) {
this.distcpDynamicJobTimeoutMsPerGbPerMapper = distcpDynamicJobTimeoutMsPerGbPerMapper;
return this;
}
public DistCpWrapperOptions setDistcpDynamicJobTimeoutBase(
long distcpDynamicJobTimeoutBase) {
this.distcpDynamicJobTimeoutBase = distcpDynamicJobTimeoutBase;
return this;
}
public DistCpWrapperOptions setDistcpDynamicJobTimeoutMax(
long distcpDynamicJobTimeoutMax) {
this.distcpDynamicJobTimeoutMax = distcpDynamicJobTimeoutMax;
return this;
}
public Path getSrcDir() {
return srcDir;
}
public Path getDestDir() {
return destDir;
}
public Path getDistCpTmpDir() {
return distCpTmpDir;
}
public Path getDistCpLogDir() {
return distCpLogDir;
}
public boolean getAtomic() {
return atomic;
}
public boolean getCanDeleteDest() {
return canDeleteDest;
}
public boolean getSyncModificationTimes() {
return syncModificationTimes;
}
public long getBytesPerMapper() {
return bytesPerMapper;
}
public int getFilesPerMapper() {
return filesPerMapper;
}
public long getLocalCopySizeThreshold() {
return localCopySizeThreshold;
}
public long getLocalCopyCountThreshold() {
return localCopyCountThreshold;
}
public long getDistCpPollInterval() {
return distCpPollInterval;
}
/**
* Returns the distcp timeout in milliseconds according to options set.
* @param fileSizes File sizes of the files to copy.
* @param maxConcurrency The number of mappers in distcp.
* @return The timeout in milliseconds for distcp.
*/
public long getDistcpTimeout(List<Long> fileSizes, long maxConcurrency) {
if (distcpDynamicJobTimeoutEnabled) {
long bytesPerLongestMapper = computeLongestMapper(fileSizes, maxConcurrency);
long baseTimeout = distcpDynamicJobTimeoutBase;
long maxTimeout = distcpDynamicJobTimeoutMax;
long msPerGb = distcpDynamicJobTimeoutMsPerGbPerMapper;
long adjustment = ((long) Math.ceil(bytesPerLongestMapper / 1e9) * msPerGb);
long timeout = Math.min(maxTimeout, baseTimeout + adjustment);
LOG.debug(String.format("Setting dynamic timeout of %d milliseconds for max mapper size %d",
timeout, bytesPerLongestMapper));
return timeout;
} else {
return distcpJobTimeout;
}
}
/**
* Computes an estimate for how many bytes the mapper that copies the most will copy.
* This is within 4/3 of the optimal scheduling using a heuristic for the multiprocessor
* scheduling problem.
* @param fileSizes A list of filesizes to copy.
* @param maxConcurrency How many parallel processes will copy the files.
* @return An estimate of how many bytes the busiest mapper will copy.
*/
public long computeLongestMapper(List<Long> fileSizes, long maxConcurrency) {
Collections.sort(fileSizes);
PriorityQueue<Long> processors = new PriorityQueue<>();
for (int i = 0; i < maxConcurrency; i++) {
processors.add(0L);
}
Long maxValue = 0L;
for (int i = fileSizes.size() - 1; i >= 0; i--) {
Long popped = processors.poll();
Long newValue = popped + fileSizes.get(i);
processors.add(newValue);
maxValue = Math.max(maxValue, newValue);
}
return maxValue;
}
}
| 9,424 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/db/DbKeyValueStore.java | package com.airbnb.reair.db;
import com.airbnb.reair.common.Container;
import com.airbnb.reair.utils.RetryableTask;
import com.airbnb.reair.utils.RetryingTaskRunner;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Optional;
/**
* A simple string key/value store using a DB.
*/
public class DbKeyValueStore {
private static final Log LOG = LogFactory.getLog(DbKeyValueStore.class);
private DbConnectionFactory dbConnectionFactory;
private String dbTableName;
private RetryingTaskRunner retryingTaskRunner = new RetryingTaskRunner();
/**
* Constructor.
*
* @param dbConnectionFactory connection factory to use for connecting to the DB
* @param dbTableName name of the table containing the keys and values
*/
public DbKeyValueStore(DbConnectionFactory dbConnectionFactory, String dbTableName) {
this.dbTableName = dbTableName;
this.dbConnectionFactory = dbConnectionFactory;
}
/**
* Get the create table command for the key/value table.
*
* @param tableName name of the table
* @return SQL that can be executed to create the table
*/
public static String getCreateTableSql(String tableName) {
return String.format("CREATE TABLE `%s` (\n"
+ " `update_time` timestamp NOT NULL DEFAULT "
+ "CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,\n"
+ " `key_string` varchar(256) NOT NULL,\n"
+ " `value_string` varchar(4000) DEFAULT NULL,\n" + " PRIMARY KEY (`key_string`)\n"
+ ") ENGINE=InnoDB", tableName);
}
/**
* Get the value of the key.
*
* @param key name of the key
* @return the value associated with they key
*
* @throws SQLException if there's an error querying the DB
*/
public Optional<String> get(String key) throws SQLException {
Connection connection = dbConnectionFactory.getConnection();
String query =
String.format("SELECT value_string FROM %s " + "WHERE key_string = ? LIMIT 1", dbTableName);
PreparedStatement ps = connection.prepareStatement(query);
try {
ps.setString(1, key);
ResultSet rs = ps.executeQuery();
if (rs.next()) {
return Optional.ofNullable(rs.getString(1));
} else {
return Optional.empty();
}
} finally {
ps.close();
ps = null;
}
}
/**
* Sets the value for a key, retrying if necessary.
*
* @param key the key to set
* @param value the value to associate with the key
*/
public void resilientSet(final String key, final String value) {
retryingTaskRunner.runUntilSuccessful(new RetryableTask() {
@Override
public void run() throws Exception {
set(key, value);
}
});
}
/**
* Sets the value for a key.
*
* @param key the key to set
* @param value the value to associate with the key
*
* @throws SQLException if there's an error querying the DB
*/
public void set(String key, String value) throws SQLException {
LOG.debug("Setting " + key + " to " + value);
Connection connection = dbConnectionFactory.getConnection();
String query = String.format("INSERT INTO %s (key_string, value_string) "
+ "VALUE (?, ?) ON DUPLICATE KEY UPDATE value_string = ?", dbTableName);
PreparedStatement ps = connection.prepareStatement(query);
try {
ps.setString(1, key);
ps.setString(2, value);
ps.setString(3, value);
ps.executeUpdate();
} finally {
ps.close();
ps = null;
}
}
}
| 9,425 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/db/DbConnectionFactory.java | package com.airbnb.reair.db;
import java.sql.Connection;
import java.sql.SQLException;
/**
* Interface for a factory that returns connections to a DB.
*/
public interface DbConnectionFactory {
Connection getConnection() throws SQLException;
}
| 9,426 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/db/StaticDbConnectionFactory.java | package com.airbnb.reair.db;
import com.airbnb.reair.utils.RetryableTask;
import com.airbnb.reair.utils.RetryingTaskRunner;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
/**
* A factory that creates connections to a DB based on connection information supplied in the
* constructor.
*/
public class StaticDbConnectionFactory implements DbConnectionFactory {
private static final Log LOG = LogFactory.getLog(StaticDbConnectionFactory.class);
private String jdbcUrl;
private String username;
private String password;
private Connection connection;
private RetryingTaskRunner retryingTaskRunner;
/**
* Constructor using specified connection information.
*
* @param jdbcUrl the JDBC connection URL
* @param username the username
* @param password the password associated with the username
*/
public StaticDbConnectionFactory(String jdbcUrl, String username, String password) {
this.jdbcUrl = jdbcUrl;
this.username = username;
this.password = password;
this.retryingTaskRunner = new RetryingTaskRunner();
try {
Class.forName("com.mysql.jdbc.Driver").newInstance();
} catch (ClassNotFoundException e) {
LOG.error(e);
} catch (IllegalAccessException e) {
LOG.error(e);
} catch (InstantiationException e) {
LOG.error(e);
}
}
@Override
public Connection getConnection() throws SQLException {
retryingTaskRunner.runUntilSuccessful(new RetryableTask() {
@Override
public void run() throws Exception {
if (connection == null || !connection.isValid(5)) {
LOG.debug("Connecting to " + jdbcUrl);
connection = DriverManager.getConnection(jdbcUrl, username, password);
}
}
});
return connection;
}
}
| 9,427 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/db/DbCredentials.java | package com.airbnb.reair.db;
import java.io.IOException;
/**
* Interface for classes that can return username / passwords for connecting to a DB.
*/
public interface DbCredentials {
/**
* Called if the credentials should be refreshed (e.g. re-read from file).
*
* @throws IOException if there's an error reading the credentials
*/
void refreshCredsIfNecessary() throws IOException;
/**
* Get the username that has read and write privileges.
*
* @return the username that has read / write access to the DB
*
* @throws IOException if there an error reading the credentials
*/
String getReadWriteUsername() throws IOException;
/**
* Get the password associated with the user that has read / write access to the DB.
*
* @return the password for the user that has read / write access to the DB
*
* @throws IOException if there's an error reading the credentials
*/
String getReadWritePassword() throws IOException;
}
| 9,428 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/db/EmbeddedMySqlDb.java | package com.airbnb.reair.db;
import com.mysql.management.MysqldResource;
import com.mysql.management.MysqldResourceI;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.io.File;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
/**
* An embedded MySQL DB for testing.
*/
public class EmbeddedMySqlDb {
private static final Log LOG = LogFactory.getLog(EmbeddedMySqlDb.class);
private String databaseDir;
private String databaseName;
private String host;
private int port;
private String username;
private String password;
private MysqldResource mysqldResource;
/**
* Constructor for a MySQL DB with a random database name and running on a random port.
*/
public EmbeddedMySqlDb() {
databaseDir = System.getProperty("java.io.tmpdir");
databaseName = "test_db_" + System.nanoTime();
host = "localhost";
port = new Random().nextInt(10000) + 3306;
username = "root";
password = "";
}
/**
* Start the database.
*/
public void startDb() {
Map<String, String> databaseOptions = new HashMap<>();
databaseOptions.put(MysqldResourceI.PORT, Integer.toString(port));
mysqldResource = new MysqldResource(new File(databaseDir, databaseName));
mysqldResource.start("embedded-mysqld-db-thread-" + System.currentTimeMillis(),
databaseOptions);
if (!mysqldResource.isRunning()) {
throw new RuntimeException("Failed to start embedded MySQL DB!");
}
LOG.debug("MySQL started successfully");
}
/**
* Stop the database.
*/
public void stopDb() {
mysqldResource.shutdown();
LOG.debug("MySQL stoppped succcessfully");
}
public String getUsername() {
return username;
}
public String getPassword() {
return password;
}
public String getHost() {
return host;
}
public int getPort() {
return port;
}
}
| 9,429 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/db/DbConnectionWatchdog.java | package com.airbnb.reair.db;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
/**
* Periodically checks to see if it's possible to make a connection to the DB. If not, it exits this
* process. This is to handle a case where for some reason, the MySQL JDBC driver gets in a bad
* state and is no longer able to make connections. Further debugging is pending.
*/
public class DbConnectionWatchdog extends Thread {
private static final Log LOG = LogFactory.getLog(DbConnectionWatchdog.class);
private static final long DB_CONNECTION_CHECK_INTERVAL = 10 * 1000;
private static final long WATCHDOG_TIMER_LIMIT = 100 * 100;
private static final String TEST_QUERY = "SELECT 1";
private long lastSuccessfulConnectionTime = 0;
private DbConnectionFactory dbConnectionFactory;
/**
* Constructor.
*
* @param dbConnectionFactory the connection factory to use for making test connections
*/
public DbConnectionWatchdog(DbConnectionFactory dbConnectionFactory) {
this.dbConnectionFactory = dbConnectionFactory;
this.setDaemon(true);
this.setName(this.getClass().getSimpleName() + "-" + this.getId());
}
@Override
public void run() {
lastSuccessfulConnectionTime = System.currentTimeMillis();
while (true) {
try {
Connection connection = dbConnectionFactory.getConnection();
PreparedStatement ps = connection.prepareStatement("SELECT 1");
ps.execute();
LOG.debug("Successfully executed " + TEST_QUERY);
lastSuccessfulConnectionTime = System.currentTimeMillis();
} catch (SQLException e) {
LOG.error("Got an exception when executing " + TEST_QUERY, e);
}
// If too long has passed since a last successful query, exit the
// server so that it can restart.
long timeSinceLastSuccessfulConnection =
System.currentTimeMillis() - lastSuccessfulConnectionTime;
if (timeSinceLastSuccessfulConnection > WATCHDOG_TIMER_LIMIT) {
LOG.error(String.format(
"Too much time has elapsed since the " + "last successful DB connection (Elapsed: %sms "
+ "Limit: %sms). Exiting...",
timeSinceLastSuccessfulConnection, WATCHDOG_TIMER_LIMIT));
System.exit(-1);
}
try {
Thread.sleep(DB_CONNECTION_CHECK_INTERVAL);
} catch (InterruptedException e) {
LOG.error("Got interrupted! Exiting...", e);
System.exit(-1);
}
}
}
}
| 9,430 |
0 | Create_ds/reair/utils/src/main/java/com/airbnb/reair | Create_ds/reair/utils/src/main/java/com/airbnb/reair/db/TestDbCredentials.java | package com.airbnb.reair.db;
import com.airbnb.reair.db.DbCredentials;
/**
* Credentials for connecting to the EmbeddedMySqlDb.
*/
public class TestDbCredentials implements DbCredentials {
@Override
public void refreshCredsIfNecessary() {
}
@Override
public String getReadWriteUsername() {
return "root";
}
@Override
public String getReadWritePassword() {
return "";
}
}
| 9,431 |
0 | Create_ds/reair/thrift/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/thrift/src/main/java/com/airbnb/reair/incremental/thrift/TReplicationOperation.java | /**
* Autogenerated by Thrift Compiler (0.9.1)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
*
* @generated
*/
package com.airbnb.reair.incremental.thrift;
public enum TReplicationOperation implements org.apache.thrift.TEnum {
COPY_UNPARTITIONED_TABLE(0), COPY_PARTITIONED_TABLE(1), COPY_PARTITION(2), COPY_PARTITIONS(
3), DROP_TABLE(4), DROP_PARTITION(5), RENAME_TABLE(6), RENAME_PARTITION(7);
private final int value;
private TReplicationOperation(int value) {
this.value = value;
}
/**
* Get the integer value of this enum value, as defined in the Thrift IDL.
*/
public int getValue() {
return value;
}
/**
* Find a the enum type by its integer value, as defined in the Thrift IDL.
*
* @return null if the value is not found.
*/
public static TReplicationOperation findByValue(int value) {
switch (value) {
case 0:
return COPY_UNPARTITIONED_TABLE;
case 1:
return COPY_PARTITIONED_TABLE;
case 2:
return COPY_PARTITION;
case 3:
return COPY_PARTITIONS;
case 4:
return DROP_TABLE;
case 5:
return DROP_PARTITION;
case 6:
return RENAME_TABLE;
case 7:
return RENAME_PARTITION;
default:
return null;
}
}
}
| 9,432 |
0 | Create_ds/reair/thrift/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/thrift/src/main/java/com/airbnb/reair/incremental/thrift/TReplicationStatus.java | /**
* Autogenerated by Thrift Compiler (0.9.1)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
*
* @generated
*/
package com.airbnb.reair.incremental.thrift;
public enum TReplicationStatus implements org.apache.thrift.TEnum {
PENDING(0), RUNNING(1), SUCCESSFUL(2), FAILED(3), NOT_COMPLETABLE(4);
private final int value;
private TReplicationStatus(int value) {
this.value = value;
}
/**
* Get the integer value of this enum value, as defined in the Thrift IDL.
*/
public int getValue() {
return value;
}
/**
* Find a the enum type by its integer value, as defined in the Thrift IDL.
*
* @return null if the value is not found.
*/
public static TReplicationStatus findByValue(int value) {
switch (value) {
case 0:
return PENDING;
case 1:
return RUNNING;
case 2:
return SUCCESSFUL;
case 3:
return FAILED;
case 4:
return NOT_COMPLETABLE;
default:
return null;
}
}
}
| 9,433 |
0 | Create_ds/reair/thrift/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/thrift/src/main/java/com/airbnb/reair/incremental/thrift/TReplicationService.java | /**
* Autogenerated by Thrift Compiler (0.9.1)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
*
* @generated
*/
package com.airbnb.reair.incremental.thrift;
import org.apache.thrift.scheme.IScheme;
import org.apache.thrift.scheme.SchemeFactory;
import org.apache.thrift.scheme.StandardScheme;
import org.apache.thrift.scheme.TupleScheme;
import org.apache.thrift.protocol.TTupleProtocol;
import org.apache.thrift.EncodingUtils;
import org.apache.thrift.TException;
import org.apache.thrift.async.AsyncMethodCallback;
import org.apache.thrift.server.AbstractNonblockingServer.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.util.HashMap;
import java.util.EnumMap;
import java.util.EnumSet;
import java.util.Collections;
import java.util.BitSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class TReplicationService {
public interface Iface {
public List<TReplicationJob> getActiveJobs(long afterId, int maxJobs)
throws org.apache.thrift.TException;
public List<TReplicationJob> getRetiredJobs(long afterId, int maxJobs)
throws org.apache.thrift.TException;
public Map<Long, TReplicationJob> getJobs(List<Long> ids) throws org.apache.thrift.TException;
public void pause() throws org.apache.thrift.TException;
public void resume() throws org.apache.thrift.TException;
public long getLag() throws org.apache.thrift.TException;
}
public interface AsyncIface {
public void getActiveJobs(long afterId, int maxJobs,
org.apache.thrift.async.AsyncMethodCallback resultHandler)
throws org.apache.thrift.TException;
public void getRetiredJobs(long afterId, int maxJobs,
org.apache.thrift.async.AsyncMethodCallback resultHandler)
throws org.apache.thrift.TException;
public void getJobs(List<Long> ids, org.apache.thrift.async.AsyncMethodCallback resultHandler)
throws org.apache.thrift.TException;
public void pause(org.apache.thrift.async.AsyncMethodCallback resultHandler)
throws org.apache.thrift.TException;
public void resume(org.apache.thrift.async.AsyncMethodCallback resultHandler)
throws org.apache.thrift.TException;
public void getLag(org.apache.thrift.async.AsyncMethodCallback resultHandler)
throws org.apache.thrift.TException;
}
public static class Client extends org.apache.thrift.TServiceClient implements Iface {
public static class Factory implements org.apache.thrift.TServiceClientFactory<Client> {
public Factory() {}
public Client getClient(org.apache.thrift.protocol.TProtocol prot) {
return new Client(prot);
}
public Client getClient(org.apache.thrift.protocol.TProtocol iprot,
org.apache.thrift.protocol.TProtocol oprot) {
return new Client(iprot, oprot);
}
}
public Client(org.apache.thrift.protocol.TProtocol prot) {
super(prot, prot);
}
public Client(org.apache.thrift.protocol.TProtocol iprot,
org.apache.thrift.protocol.TProtocol oprot) {
super(iprot, oprot);
}
public List<TReplicationJob> getActiveJobs(long afterId, int maxJobs)
throws org.apache.thrift.TException {
send_getActiveJobs(afterId, maxJobs);
return recv_getActiveJobs();
}
public void send_getActiveJobs(long afterId, int maxJobs) throws org.apache.thrift.TException {
getActiveJobs_args args = new getActiveJobs_args();
args.setAfterId(afterId);
args.setMaxJobs(maxJobs);
sendBase("getActiveJobs", args);
}
public List<TReplicationJob> recv_getActiveJobs() throws org.apache.thrift.TException {
getActiveJobs_result result = new getActiveJobs_result();
receiveBase(result, "getActiveJobs");
if (result.isSetSuccess()) {
return result.success;
}
throw new org.apache.thrift.TApplicationException(
org.apache.thrift.TApplicationException.MISSING_RESULT,
"getActiveJobs failed: unknown result");
}
public List<TReplicationJob> getRetiredJobs(long afterId, int maxJobs)
throws org.apache.thrift.TException {
send_getRetiredJobs(afterId, maxJobs);
return recv_getRetiredJobs();
}
public void send_getRetiredJobs(long afterId, int maxJobs) throws org.apache.thrift.TException {
getRetiredJobs_args args = new getRetiredJobs_args();
args.setAfterId(afterId);
args.setMaxJobs(maxJobs);
sendBase("getRetiredJobs", args);
}
public List<TReplicationJob> recv_getRetiredJobs() throws org.apache.thrift.TException {
getRetiredJobs_result result = new getRetiredJobs_result();
receiveBase(result, "getRetiredJobs");
if (result.isSetSuccess()) {
return result.success;
}
throw new org.apache.thrift.TApplicationException(
org.apache.thrift.TApplicationException.MISSING_RESULT,
"getRetiredJobs failed: unknown result");
}
public Map<Long, TReplicationJob> getJobs(List<Long> ids) throws org.apache.thrift.TException {
send_getJobs(ids);
return recv_getJobs();
}
public void send_getJobs(List<Long> ids) throws org.apache.thrift.TException {
getJobs_args args = new getJobs_args();
args.setIds(ids);
sendBase("getJobs", args);
}
public Map<Long, TReplicationJob> recv_getJobs() throws org.apache.thrift.TException {
getJobs_result result = new getJobs_result();
receiveBase(result, "getJobs");
if (result.isSetSuccess()) {
return result.success;
}
throw new org.apache.thrift.TApplicationException(
org.apache.thrift.TApplicationException.MISSING_RESULT, "getJobs failed: unknown result");
}
public void pause() throws org.apache.thrift.TException {
send_pause();
recv_pause();
}
public void send_pause() throws org.apache.thrift.TException {
pause_args args = new pause_args();
sendBase("pause", args);
}
public void recv_pause() throws org.apache.thrift.TException {
pause_result result = new pause_result();
receiveBase(result, "pause");
return;
}
public void resume() throws org.apache.thrift.TException {
send_resume();
recv_resume();
}
public void send_resume() throws org.apache.thrift.TException {
resume_args args = new resume_args();
sendBase("resume", args);
}
public void recv_resume() throws org.apache.thrift.TException {
resume_result result = new resume_result();
receiveBase(result, "resume");
return;
}
public long getLag() throws org.apache.thrift.TException {
send_getLag();
return recv_getLag();
}
public void send_getLag() throws org.apache.thrift.TException {
getLag_args args = new getLag_args();
sendBase("getLag", args);
}
public long recv_getLag() throws org.apache.thrift.TException {
getLag_result result = new getLag_result();
receiveBase(result, "getLag");
if (result.isSetSuccess()) {
return result.success;
}
throw new org.apache.thrift.TApplicationException(
org.apache.thrift.TApplicationException.MISSING_RESULT, "getLag failed: unknown result");
}
}
public static class AsyncClient extends org.apache.thrift.async.TAsyncClient
implements AsyncIface {
public static class Factory
implements org.apache.thrift.async.TAsyncClientFactory<AsyncClient> {
private org.apache.thrift.async.TAsyncClientManager clientManager;
private org.apache.thrift.protocol.TProtocolFactory protocolFactory;
public Factory(org.apache.thrift.async.TAsyncClientManager clientManager,
org.apache.thrift.protocol.TProtocolFactory protocolFactory) {
this.clientManager = clientManager;
this.protocolFactory = protocolFactory;
}
public AsyncClient getAsyncClient(
org.apache.thrift.transport.TNonblockingTransport transport) {
return new AsyncClient(protocolFactory, clientManager, transport);
}
}
public AsyncClient(org.apache.thrift.protocol.TProtocolFactory protocolFactory,
org.apache.thrift.async.TAsyncClientManager clientManager,
org.apache.thrift.transport.TNonblockingTransport transport) {
super(protocolFactory, clientManager, transport);
}
public void getActiveJobs(long afterId, int maxJobs,
org.apache.thrift.async.AsyncMethodCallback resultHandler)
throws org.apache.thrift.TException {
checkReady();
getActiveJobs_call method_call = new getActiveJobs_call(afterId, maxJobs, resultHandler, this,
___protocolFactory, ___transport);
this.___currentMethod = method_call;
___manager.call(method_call);
}
public static class getActiveJobs_call extends org.apache.thrift.async.TAsyncMethodCall {
private long afterId;
private int maxJobs;
public getActiveJobs_call(long afterId, int maxJobs,
org.apache.thrift.async.AsyncMethodCallback resultHandler,
org.apache.thrift.async.TAsyncClient client,
org.apache.thrift.protocol.TProtocolFactory protocolFactory,
org.apache.thrift.transport.TNonblockingTransport transport)
throws org.apache.thrift.TException {
super(client, protocolFactory, transport, resultHandler, false);
this.afterId = afterId;
this.maxJobs = maxJobs;
}
public void write_args(org.apache.thrift.protocol.TProtocol prot)
throws org.apache.thrift.TException {
prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getActiveJobs",
org.apache.thrift.protocol.TMessageType.CALL, 0));
getActiveJobs_args args = new getActiveJobs_args();
args.setAfterId(afterId);
args.setMaxJobs(maxJobs);
args.write(prot);
prot.writeMessageEnd();
}
public List<TReplicationJob> getResult() throws org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
org.apache.thrift.protocol.TProtocol prot =
client.getProtocolFactory().getProtocol(memoryTransport);
return (new Client(prot)).recv_getActiveJobs();
}
}
public void getRetiredJobs(long afterId, int maxJobs,
org.apache.thrift.async.AsyncMethodCallback resultHandler)
throws org.apache.thrift.TException {
checkReady();
getRetiredJobs_call method_call = new getRetiredJobs_call(afterId, maxJobs, resultHandler,
this, ___protocolFactory, ___transport);
this.___currentMethod = method_call;
___manager.call(method_call);
}
public static class getRetiredJobs_call extends org.apache.thrift.async.TAsyncMethodCall {
private long afterId;
private int maxJobs;
public getRetiredJobs_call(long afterId, int maxJobs,
org.apache.thrift.async.AsyncMethodCallback resultHandler,
org.apache.thrift.async.TAsyncClient client,
org.apache.thrift.protocol.TProtocolFactory protocolFactory,
org.apache.thrift.transport.TNonblockingTransport transport)
throws org.apache.thrift.TException {
super(client, protocolFactory, transport, resultHandler, false);
this.afterId = afterId;
this.maxJobs = maxJobs;
}
public void write_args(org.apache.thrift.protocol.TProtocol prot)
throws org.apache.thrift.TException {
prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getRetiredJobs",
org.apache.thrift.protocol.TMessageType.CALL, 0));
getRetiredJobs_args args = new getRetiredJobs_args();
args.setAfterId(afterId);
args.setMaxJobs(maxJobs);
args.write(prot);
prot.writeMessageEnd();
}
public List<TReplicationJob> getResult() throws org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
org.apache.thrift.protocol.TProtocol prot =
client.getProtocolFactory().getProtocol(memoryTransport);
return (new Client(prot)).recv_getRetiredJobs();
}
}
public void getJobs(List<Long> ids, org.apache.thrift.async.AsyncMethodCallback resultHandler)
throws org.apache.thrift.TException {
checkReady();
getJobs_call method_call =
new getJobs_call(ids, resultHandler, this, ___protocolFactory, ___transport);
this.___currentMethod = method_call;
___manager.call(method_call);
}
public static class getJobs_call extends org.apache.thrift.async.TAsyncMethodCall {
private List<Long> ids;
public getJobs_call(List<Long> ids, org.apache.thrift.async.AsyncMethodCallback resultHandler,
org.apache.thrift.async.TAsyncClient client,
org.apache.thrift.protocol.TProtocolFactory protocolFactory,
org.apache.thrift.transport.TNonblockingTransport transport)
throws org.apache.thrift.TException {
super(client, protocolFactory, transport, resultHandler, false);
this.ids = ids;
}
public void write_args(org.apache.thrift.protocol.TProtocol prot)
throws org.apache.thrift.TException {
prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getJobs",
org.apache.thrift.protocol.TMessageType.CALL, 0));
getJobs_args args = new getJobs_args();
args.setIds(ids);
args.write(prot);
prot.writeMessageEnd();
}
public Map<Long, TReplicationJob> getResult() throws org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
org.apache.thrift.protocol.TProtocol prot =
client.getProtocolFactory().getProtocol(memoryTransport);
return (new Client(prot)).recv_getJobs();
}
}
public void pause(org.apache.thrift.async.AsyncMethodCallback resultHandler)
throws org.apache.thrift.TException {
checkReady();
pause_call method_call =
new pause_call(resultHandler, this, ___protocolFactory, ___transport);
this.___currentMethod = method_call;
___manager.call(method_call);
}
public static class pause_call extends org.apache.thrift.async.TAsyncMethodCall {
public pause_call(org.apache.thrift.async.AsyncMethodCallback resultHandler,
org.apache.thrift.async.TAsyncClient client,
org.apache.thrift.protocol.TProtocolFactory protocolFactory,
org.apache.thrift.transport.TNonblockingTransport transport)
throws org.apache.thrift.TException {
super(client, protocolFactory, transport, resultHandler, false);
}
public void write_args(org.apache.thrift.protocol.TProtocol prot)
throws org.apache.thrift.TException {
prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("pause",
org.apache.thrift.protocol.TMessageType.CALL, 0));
pause_args args = new pause_args();
args.write(prot);
prot.writeMessageEnd();
}
public void getResult() throws org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
org.apache.thrift.protocol.TProtocol prot =
client.getProtocolFactory().getProtocol(memoryTransport);
(new Client(prot)).recv_pause();
}
}
public void resume(org.apache.thrift.async.AsyncMethodCallback resultHandler)
throws org.apache.thrift.TException {
checkReady();
resume_call method_call =
new resume_call(resultHandler, this, ___protocolFactory, ___transport);
this.___currentMethod = method_call;
___manager.call(method_call);
}
public static class resume_call extends org.apache.thrift.async.TAsyncMethodCall {
public resume_call(org.apache.thrift.async.AsyncMethodCallback resultHandler,
org.apache.thrift.async.TAsyncClient client,
org.apache.thrift.protocol.TProtocolFactory protocolFactory,
org.apache.thrift.transport.TNonblockingTransport transport)
throws org.apache.thrift.TException {
super(client, protocolFactory, transport, resultHandler, false);
}
public void write_args(org.apache.thrift.protocol.TProtocol prot)
throws org.apache.thrift.TException {
prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("resume",
org.apache.thrift.protocol.TMessageType.CALL, 0));
resume_args args = new resume_args();
args.write(prot);
prot.writeMessageEnd();
}
public void getResult() throws org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
org.apache.thrift.protocol.TProtocol prot =
client.getProtocolFactory().getProtocol(memoryTransport);
(new Client(prot)).recv_resume();
}
}
public void getLag(org.apache.thrift.async.AsyncMethodCallback resultHandler)
throws org.apache.thrift.TException {
checkReady();
getLag_call method_call =
new getLag_call(resultHandler, this, ___protocolFactory, ___transport);
this.___currentMethod = method_call;
___manager.call(method_call);
}
public static class getLag_call extends org.apache.thrift.async.TAsyncMethodCall {
public getLag_call(org.apache.thrift.async.AsyncMethodCallback resultHandler,
org.apache.thrift.async.TAsyncClient client,
org.apache.thrift.protocol.TProtocolFactory protocolFactory,
org.apache.thrift.transport.TNonblockingTransport transport)
throws org.apache.thrift.TException {
super(client, protocolFactory, transport, resultHandler, false);
}
public void write_args(org.apache.thrift.protocol.TProtocol prot)
throws org.apache.thrift.TException {
prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getLag",
org.apache.thrift.protocol.TMessageType.CALL, 0));
getLag_args args = new getLag_args();
args.write(prot);
prot.writeMessageEnd();
}
public long getResult() throws org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
org.apache.thrift.protocol.TProtocol prot =
client.getProtocolFactory().getProtocol(memoryTransport);
return (new Client(prot)).recv_getLag();
}
}
}
public static class Processor<I extends Iface> extends org.apache.thrift.TBaseProcessor<I>
implements org.apache.thrift.TProcessor {
private static final Logger LOGGER = LoggerFactory.getLogger(Processor.class.getName());
public Processor(I iface) {
super(iface, getProcessMap(
new HashMap<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>>()));
}
protected Processor(I iface,
Map<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>> processMap) {
super(iface, getProcessMap(processMap));
}
private static <I extends Iface> Map<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>> getProcessMap(
Map<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>> processMap) {
processMap.put("getActiveJobs", new getActiveJobs());
processMap.put("getRetiredJobs", new getRetiredJobs());
processMap.put("getJobs", new getJobs());
processMap.put("pause", new pause());
processMap.put("resume", new resume());
processMap.put("getLag", new getLag());
return processMap;
}
public static class getActiveJobs<I extends Iface>
extends org.apache.thrift.ProcessFunction<I, getActiveJobs_args> {
public getActiveJobs() {
super("getActiveJobs");
}
public getActiveJobs_args getEmptyArgsInstance() {
return new getActiveJobs_args();
}
protected boolean isOneway() {
return false;
}
public getActiveJobs_result getResult(I iface, getActiveJobs_args args)
throws org.apache.thrift.TException {
getActiveJobs_result result = new getActiveJobs_result();
result.success = iface.getActiveJobs(args.afterId, args.maxJobs);
return result;
}
}
public static class getRetiredJobs<I extends Iface>
extends org.apache.thrift.ProcessFunction<I, getRetiredJobs_args> {
public getRetiredJobs() {
super("getRetiredJobs");
}
public getRetiredJobs_args getEmptyArgsInstance() {
return new getRetiredJobs_args();
}
protected boolean isOneway() {
return false;
}
public getRetiredJobs_result getResult(I iface, getRetiredJobs_args args)
throws org.apache.thrift.TException {
getRetiredJobs_result result = new getRetiredJobs_result();
result.success = iface.getRetiredJobs(args.afterId, args.maxJobs);
return result;
}
}
public static class getJobs<I extends Iface>
extends org.apache.thrift.ProcessFunction<I, getJobs_args> {
public getJobs() {
super("getJobs");
}
public getJobs_args getEmptyArgsInstance() {
return new getJobs_args();
}
protected boolean isOneway() {
return false;
}
public getJobs_result getResult(I iface, getJobs_args args)
throws org.apache.thrift.TException {
getJobs_result result = new getJobs_result();
result.success = iface.getJobs(args.ids);
return result;
}
}
public static class pause<I extends Iface>
extends org.apache.thrift.ProcessFunction<I, pause_args> {
public pause() {
super("pause");
}
public pause_args getEmptyArgsInstance() {
return new pause_args();
}
protected boolean isOneway() {
return false;
}
public pause_result getResult(I iface, pause_args args) throws org.apache.thrift.TException {
pause_result result = new pause_result();
iface.pause();
return result;
}
}
public static class resume<I extends Iface>
extends org.apache.thrift.ProcessFunction<I, resume_args> {
public resume() {
super("resume");
}
public resume_args getEmptyArgsInstance() {
return new resume_args();
}
protected boolean isOneway() {
return false;
}
public resume_result getResult(I iface, resume_args args)
throws org.apache.thrift.TException {
resume_result result = new resume_result();
iface.resume();
return result;
}
}
public static class getLag<I extends Iface>
extends org.apache.thrift.ProcessFunction<I, getLag_args> {
public getLag() {
super("getLag");
}
public getLag_args getEmptyArgsInstance() {
return new getLag_args();
}
protected boolean isOneway() {
return false;
}
public getLag_result getResult(I iface, getLag_args args)
throws org.apache.thrift.TException {
getLag_result result = new getLag_result();
result.success = iface.getLag();
result.setSuccessIsSet(true);
return result;
}
}
}
public static class AsyncProcessor<I extends AsyncIface>
extends org.apache.thrift.TBaseAsyncProcessor<I> {
private static final Logger LOGGER = LoggerFactory.getLogger(AsyncProcessor.class.getName());
public AsyncProcessor(I iface) {
super(iface, getProcessMap(
new HashMap<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase, ?>>()));
}
protected AsyncProcessor(I iface,
Map<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase, ?>> processMap) {
super(iface, getProcessMap(processMap));
}
private static <I extends AsyncIface> Map<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase, ?>> getProcessMap(
Map<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase, ?>> processMap) {
processMap.put("getActiveJobs", new getActiveJobs());
processMap.put("getRetiredJobs", new getRetiredJobs());
processMap.put("getJobs", new getJobs());
processMap.put("pause", new pause());
processMap.put("resume", new resume());
processMap.put("getLag", new getLag());
return processMap;
}
public static class getActiveJobs<I extends AsyncIface> extends
org.apache.thrift.AsyncProcessFunction<I, getActiveJobs_args, List<TReplicationJob>> {
public getActiveJobs() {
super("getActiveJobs");
}
public getActiveJobs_args getEmptyArgsInstance() {
return new getActiveJobs_args();
}
public AsyncMethodCallback<List<TReplicationJob>> getResultHandler(final AsyncFrameBuffer fb,
final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<List<TReplicationJob>>() {
public void onComplete(List<TReplicationJob> o) {
getActiveJobs_result result = new getActiveJobs_result();
result.success = o;
try {
fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
return;
} catch (Exception e) {
LOGGER.error("Exception writing to internal frame buffer", e);
}
fb.close();
}
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
getActiveJobs_result result = new getActiveJobs_result();
{
msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
msg = (org.apache.thrift.TBase) new org.apache.thrift.TApplicationException(
org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
}
try {
fcall.sendResponse(fb, msg, msgType, seqid);
return;
} catch (Exception ex) {
LOGGER.error("Exception writing to internal frame buffer", ex);
}
fb.close();
}
};
}
protected boolean isOneway() {
return false;
}
public void start(I iface, getActiveJobs_args args,
org.apache.thrift.async.AsyncMethodCallback<List<TReplicationJob>> resultHandler)
throws TException {
iface.getActiveJobs(args.afterId, args.maxJobs, resultHandler);
}
}
public static class getRetiredJobs<I extends AsyncIface> extends
org.apache.thrift.AsyncProcessFunction<I, getRetiredJobs_args, List<TReplicationJob>> {
public getRetiredJobs() {
super("getRetiredJobs");
}
public getRetiredJobs_args getEmptyArgsInstance() {
return new getRetiredJobs_args();
}
public AsyncMethodCallback<List<TReplicationJob>> getResultHandler(final AsyncFrameBuffer fb,
final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<List<TReplicationJob>>() {
public void onComplete(List<TReplicationJob> o) {
getRetiredJobs_result result = new getRetiredJobs_result();
result.success = o;
try {
fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
return;
} catch (Exception e) {
LOGGER.error("Exception writing to internal frame buffer", e);
}
fb.close();
}
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
getRetiredJobs_result result = new getRetiredJobs_result();
{
msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
msg = (org.apache.thrift.TBase) new org.apache.thrift.TApplicationException(
org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
}
try {
fcall.sendResponse(fb, msg, msgType, seqid);
return;
} catch (Exception ex) {
LOGGER.error("Exception writing to internal frame buffer", ex);
}
fb.close();
}
};
}
protected boolean isOneway() {
return false;
}
public void start(I iface, getRetiredJobs_args args,
org.apache.thrift.async.AsyncMethodCallback<List<TReplicationJob>> resultHandler)
throws TException {
iface.getRetiredJobs(args.afterId, args.maxJobs, resultHandler);
}
}
public static class getJobs<I extends AsyncIface> extends
org.apache.thrift.AsyncProcessFunction<I, getJobs_args, Map<Long, TReplicationJob>> {
public getJobs() {
super("getJobs");
}
public getJobs_args getEmptyArgsInstance() {
return new getJobs_args();
}
public AsyncMethodCallback<Map<Long, TReplicationJob>> getResultHandler(
final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<Map<Long, TReplicationJob>>() {
public void onComplete(Map<Long, TReplicationJob> o) {
getJobs_result result = new getJobs_result();
result.success = o;
try {
fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
return;
} catch (Exception e) {
LOGGER.error("Exception writing to internal frame buffer", e);
}
fb.close();
}
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
getJobs_result result = new getJobs_result();
{
msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
msg = (org.apache.thrift.TBase) new org.apache.thrift.TApplicationException(
org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
}
try {
fcall.sendResponse(fb, msg, msgType, seqid);
return;
} catch (Exception ex) {
LOGGER.error("Exception writing to internal frame buffer", ex);
}
fb.close();
}
};
}
protected boolean isOneway() {
return false;
}
public void start(I iface, getJobs_args args,
org.apache.thrift.async.AsyncMethodCallback<Map<Long, TReplicationJob>> resultHandler)
throws TException {
iface.getJobs(args.ids, resultHandler);
}
}
public static class pause<I extends AsyncIface>
extends org.apache.thrift.AsyncProcessFunction<I, pause_args, Void> {
public pause() {
super("pause");
}
public pause_args getEmptyArgsInstance() {
return new pause_args();
}
public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb,
final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<Void>() {
public void onComplete(Void o) {
pause_result result = new pause_result();
try {
fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
return;
} catch (Exception e) {
LOGGER.error("Exception writing to internal frame buffer", e);
}
fb.close();
}
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
pause_result result = new pause_result();
{
msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
msg = (org.apache.thrift.TBase) new org.apache.thrift.TApplicationException(
org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
}
try {
fcall.sendResponse(fb, msg, msgType, seqid);
return;
} catch (Exception ex) {
LOGGER.error("Exception writing to internal frame buffer", ex);
}
fb.close();
}
};
}
protected boolean isOneway() {
return false;
}
public void start(I iface, pause_args args,
org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
iface.pause(resultHandler);
}
}
public static class resume<I extends AsyncIface>
extends org.apache.thrift.AsyncProcessFunction<I, resume_args, Void> {
public resume() {
super("resume");
}
public resume_args getEmptyArgsInstance() {
return new resume_args();
}
public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb,
final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<Void>() {
public void onComplete(Void o) {
resume_result result = new resume_result();
try {
fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
return;
} catch (Exception e) {
LOGGER.error("Exception writing to internal frame buffer", e);
}
fb.close();
}
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
resume_result result = new resume_result();
{
msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
msg = (org.apache.thrift.TBase) new org.apache.thrift.TApplicationException(
org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
}
try {
fcall.sendResponse(fb, msg, msgType, seqid);
return;
} catch (Exception ex) {
LOGGER.error("Exception writing to internal frame buffer", ex);
}
fb.close();
}
};
}
protected boolean isOneway() {
return false;
}
public void start(I iface, resume_args args,
org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
iface.resume(resultHandler);
}
}
public static class getLag<I extends AsyncIface>
extends org.apache.thrift.AsyncProcessFunction<I, getLag_args, Long> {
public getLag() {
super("getLag");
}
public getLag_args getEmptyArgsInstance() {
return new getLag_args();
}
public AsyncMethodCallback<Long> getResultHandler(final AsyncFrameBuffer fb,
final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<Long>() {
public void onComplete(Long o) {
getLag_result result = new getLag_result();
result.success = o;
result.setSuccessIsSet(true);
try {
fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
return;
} catch (Exception e) {
LOGGER.error("Exception writing to internal frame buffer", e);
}
fb.close();
}
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
getLag_result result = new getLag_result();
{
msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
msg = (org.apache.thrift.TBase) new org.apache.thrift.TApplicationException(
org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
}
try {
fcall.sendResponse(fb, msg, msgType, seqid);
return;
} catch (Exception ex) {
LOGGER.error("Exception writing to internal frame buffer", ex);
}
fb.close();
}
};
}
protected boolean isOneway() {
return false;
}
public void start(I iface, getLag_args args,
org.apache.thrift.async.AsyncMethodCallback<Long> resultHandler) throws TException {
iface.getLag(resultHandler);
}
}
}
public static class getActiveJobs_args
implements org.apache.thrift.TBase<getActiveJobs_args, getActiveJobs_args._Fields>,
java.io.Serializable, Cloneable, Comparable<getActiveJobs_args> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
new org.apache.thrift.protocol.TStruct("getActiveJobs_args");
private static final org.apache.thrift.protocol.TField AFTER_ID_FIELD_DESC =
new org.apache.thrift.protocol.TField("afterId", org.apache.thrift.protocol.TType.I64,
(short) 1);
private static final org.apache.thrift.protocol.TField MAX_JOBS_FIELD_DESC =
new org.apache.thrift.protocol.TField("maxJobs", org.apache.thrift.protocol.TType.I32,
(short) 2);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes =
new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new getActiveJobs_argsStandardSchemeFactory());
schemes.put(TupleScheme.class, new getActiveJobs_argsTupleSchemeFactory());
}
public long afterId; // required
public int maxJobs; // required
/**
* The set of fields this struct contains, along with convenience methods for finding and
* manipulating them.
*/
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
AFTER_ID((short) 1, "afterId"), MAX_JOBS((short) 2, "maxJobs");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch (fieldId) {
case 1: // AFTER_ID
return AFTER_ID;
case 2: // MAX_JOBS
return MAX_JOBS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null)
throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
private static final int __AFTERID_ISSET_ID = 0;
private static final int __MAXJOBS_ISSET_ID = 1;
private byte __isset_bitfield = 0;
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.AFTER_ID,
new org.apache.thrift.meta_data.FieldMetaData("afterId",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.I64)));
tmpMap.put(_Fields.MAX_JOBS,
new org.apache.thrift.meta_data.FieldMetaData("maxJobs",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.I32)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getActiveJobs_args.class,
metaDataMap);
}
public getActiveJobs_args() {}
public getActiveJobs_args(long afterId, int maxJobs) {
this();
this.afterId = afterId;
setAfterIdIsSet(true);
this.maxJobs = maxJobs;
setMaxJobsIsSet(true);
}
/**
* Performs a deep copy on <i>other</i>.
*/
public getActiveJobs_args(getActiveJobs_args other) {
__isset_bitfield = other.__isset_bitfield;
this.afterId = other.afterId;
this.maxJobs = other.maxJobs;
}
public getActiveJobs_args deepCopy() {
return new getActiveJobs_args(this);
}
@Override
public void clear() {
setAfterIdIsSet(false);
this.afterId = 0;
setMaxJobsIsSet(false);
this.maxJobs = 0;
}
public long getAfterId() {
return this.afterId;
}
public getActiveJobs_args setAfterId(long afterId) {
this.afterId = afterId;
setAfterIdIsSet(true);
return this;
}
public void unsetAfterId() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __AFTERID_ISSET_ID);
}
/** Returns true if field afterId is set (has been assigned a value) and false otherwise */
public boolean isSetAfterId() {
return EncodingUtils.testBit(__isset_bitfield, __AFTERID_ISSET_ID);
}
public void setAfterIdIsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __AFTERID_ISSET_ID, value);
}
public int getMaxJobs() {
return this.maxJobs;
}
public getActiveJobs_args setMaxJobs(int maxJobs) {
this.maxJobs = maxJobs;
setMaxJobsIsSet(true);
return this;
}
public void unsetMaxJobs() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAXJOBS_ISSET_ID);
}
/** Returns true if field maxJobs is set (has been assigned a value) and false otherwise */
public boolean isSetMaxJobs() {
return EncodingUtils.testBit(__isset_bitfield, __MAXJOBS_ISSET_ID);
}
public void setMaxJobsIsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAXJOBS_ISSET_ID, value);
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case AFTER_ID:
if (value == null) {
unsetAfterId();
} else {
setAfterId((Long) value);
}
break;
case MAX_JOBS:
if (value == null) {
unsetMaxJobs();
} else {
setMaxJobs((Integer) value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case AFTER_ID:
return Long.valueOf(getAfterId());
case MAX_JOBS:
return Integer.valueOf(getMaxJobs());
}
throw new IllegalStateException();
}
/**
* Returns true if field corresponding to fieldID is set (has been assigned a value) and false
* otherwise
*/
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case AFTER_ID:
return isSetAfterId();
case MAX_JOBS:
return isSetMaxJobs();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof getActiveJobs_args)
return this.equals((getActiveJobs_args) that);
return false;
}
public boolean equals(getActiveJobs_args that) {
if (that == null)
return false;
boolean this_present_afterId = true;
boolean that_present_afterId = true;
if (this_present_afterId || that_present_afterId) {
if (!(this_present_afterId && that_present_afterId))
return false;
if (this.afterId != that.afterId)
return false;
}
boolean this_present_maxJobs = true;
boolean that_present_maxJobs = true;
if (this_present_maxJobs || that_present_maxJobs) {
if (!(this_present_maxJobs && that_present_maxJobs))
return false;
if (this.maxJobs != that.maxJobs)
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(getActiveJobs_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetAfterId()).compareTo(other.isSetAfterId());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetAfterId()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.afterId, other.afterId);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetMaxJobs()).compareTo(other.isSetMaxJobs());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetMaxJobs()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.maxJobs, other.maxJobs);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot)
throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot)
throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("getActiveJobs_args(");
boolean first = true;
sb.append("afterId:");
sb.append(this.afterId);
first = false;
if (!first)
sb.append(", ");
sb.append("maxJobs:");
sb.append(this.maxJobs);
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in)
throws java.io.IOException, ClassNotFoundException {
try {
// it doesn't seem like you should have to do this, but java serialization is wacky, and
// doesn't call the default constructor.
__isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class getActiveJobs_argsStandardSchemeFactory implements SchemeFactory {
public getActiveJobs_argsStandardScheme getScheme() {
return new getActiveJobs_argsStandardScheme();
}
}
private static class getActiveJobs_argsStandardScheme
extends StandardScheme<getActiveJobs_args> {
public void read(org.apache.thrift.protocol.TProtocol iprot, getActiveJobs_args struct)
throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true) {
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 1: // AFTER_ID
if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
struct.afterId = iprot.readI64();
struct.setAfterIdIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 2: // MAX_JOBS
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.maxJobs = iprot.readI32();
struct.setMaxJobsIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate
// method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, getActiveJobs_args struct)
throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldBegin(AFTER_ID_FIELD_DESC);
oprot.writeI64(struct.afterId);
oprot.writeFieldEnd();
oprot.writeFieldBegin(MAX_JOBS_FIELD_DESC);
oprot.writeI32(struct.maxJobs);
oprot.writeFieldEnd();
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class getActiveJobs_argsTupleSchemeFactory implements SchemeFactory {
public getActiveJobs_argsTupleScheme getScheme() {
return new getActiveJobs_argsTupleScheme();
}
}
private static class getActiveJobs_argsTupleScheme extends TupleScheme<getActiveJobs_args> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, getActiveJobs_args struct)
throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetAfterId()) {
optionals.set(0);
}
if (struct.isSetMaxJobs()) {
optionals.set(1);
}
oprot.writeBitSet(optionals, 2);
if (struct.isSetAfterId()) {
oprot.writeI64(struct.afterId);
}
if (struct.isSetMaxJobs()) {
oprot.writeI32(struct.maxJobs);
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, getActiveJobs_args struct)
throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(2);
if (incoming.get(0)) {
struct.afterId = iprot.readI64();
struct.setAfterIdIsSet(true);
}
if (incoming.get(1)) {
struct.maxJobs = iprot.readI32();
struct.setMaxJobsIsSet(true);
}
}
}
}
public static class getActiveJobs_result
implements org.apache.thrift.TBase<getActiveJobs_result, getActiveJobs_result._Fields>,
java.io.Serializable, Cloneable, Comparable<getActiveJobs_result> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
new org.apache.thrift.protocol.TStruct("getActiveJobs_result");
private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
(short) 0);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes =
new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new getActiveJobs_resultStandardSchemeFactory());
schemes.put(TupleScheme.class, new getActiveJobs_resultTupleSchemeFactory());
}
public List<TReplicationJob> success; // required
/**
* The set of fields this struct contains, along with convenience methods for finding and
* manipulating them.
*/
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
SUCCESS((short) 0, "success");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch (fieldId) {
case 0: // SUCCESS
return SUCCESS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null)
throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.SUCCESS,
new org.apache.thrift.meta_data.FieldMetaData("success",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.StructMetaData(
org.apache.thrift.protocol.TType.STRUCT, TReplicationJob.class))));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getActiveJobs_result.class,
metaDataMap);
}
public getActiveJobs_result() {}
public getActiveJobs_result(List<TReplicationJob> success) {
this();
this.success = success;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public getActiveJobs_result(getActiveJobs_result other) {
if (other.isSetSuccess()) {
List<TReplicationJob> __this__success =
new ArrayList<TReplicationJob>(other.success.size());
for (TReplicationJob other_element : other.success) {
__this__success.add(new TReplicationJob(other_element));
}
this.success = __this__success;
}
}
public getActiveJobs_result deepCopy() {
return new getActiveJobs_result(this);
}
@Override
public void clear() {
this.success = null;
}
public int getSuccessSize() {
return (this.success == null) ? 0 : this.success.size();
}
public java.util.Iterator<TReplicationJob> getSuccessIterator() {
return (this.success == null) ? null : this.success.iterator();
}
public void addToSuccess(TReplicationJob elem) {
if (this.success == null) {
this.success = new ArrayList<TReplicationJob>();
}
this.success.add(elem);
}
public List<TReplicationJob> getSuccess() {
return this.success;
}
public getActiveJobs_result setSuccess(List<TReplicationJob> success) {
this.success = success;
return this;
}
public void unsetSuccess() {
this.success = null;
}
/** Returns true if field success is set (has been assigned a value) and false otherwise */
public boolean isSetSuccess() {
return this.success != null;
}
public void setSuccessIsSet(boolean value) {
if (!value) {
this.success = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case SUCCESS:
if (value == null) {
unsetSuccess();
} else {
setSuccess((List<TReplicationJob>) value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case SUCCESS:
return getSuccess();
}
throw new IllegalStateException();
}
/**
* Returns true if field corresponding to fieldID is set (has been assigned a value) and false
* otherwise
*/
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case SUCCESS:
return isSetSuccess();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof getActiveJobs_result)
return this.equals((getActiveJobs_result) that);
return false;
}
public boolean equals(getActiveJobs_result that) {
if (that == null)
return false;
boolean this_present_success = true && this.isSetSuccess();
boolean that_present_success = true && that.isSetSuccess();
if (this_present_success || that_present_success) {
if (!(this_present_success && that_present_success))
return false;
if (!this.success.equals(that.success))
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(getActiveJobs_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSuccess()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot)
throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot)
throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("getActiveJobs_result(");
boolean first = true;
sb.append("success:");
if (this.success == null) {
sb.append("null");
} else {
sb.append(this.success);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in)
throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class getActiveJobs_resultStandardSchemeFactory implements SchemeFactory {
public getActiveJobs_resultStandardScheme getScheme() {
return new getActiveJobs_resultStandardScheme();
}
}
private static class getActiveJobs_resultStandardScheme
extends StandardScheme<getActiveJobs_result> {
public void read(org.apache.thrift.protocol.TProtocol iprot, getActiveJobs_result struct)
throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true) {
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
org.apache.thrift.protocol.TList _list26 = iprot.readListBegin();
struct.success = new ArrayList<TReplicationJob>(_list26.size);
for (int _i27 = 0; _i27 < _list26.size; ++_i27) {
TReplicationJob _elem28;
_elem28 = new TReplicationJob();
_elem28.read(iprot);
struct.success.add(_elem28);
}
iprot.readListEnd();
}
struct.setSuccessIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate
// method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, getActiveJobs_result struct)
throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.success != null) {
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(
org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
for (TReplicationJob _iter29 : struct.success) {
_iter29.write(oprot);
}
oprot.writeListEnd();
}
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class getActiveJobs_resultTupleSchemeFactory implements SchemeFactory {
public getActiveJobs_resultTupleScheme getScheme() {
return new getActiveJobs_resultTupleScheme();
}
}
private static class getActiveJobs_resultTupleScheme extends TupleScheme<getActiveJobs_result> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, getActiveJobs_result struct)
throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetSuccess()) {
optionals.set(0);
}
oprot.writeBitSet(optionals, 1);
if (struct.isSetSuccess()) {
{
oprot.writeI32(struct.success.size());
for (TReplicationJob _iter30 : struct.success) {
_iter30.write(oprot);
}
}
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, getActiveJobs_result struct)
throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(1);
if (incoming.get(0)) {
{
org.apache.thrift.protocol.TList _list31 = new org.apache.thrift.protocol.TList(
org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
struct.success = new ArrayList<TReplicationJob>(_list31.size);
for (int _i32 = 0; _i32 < _list31.size; ++_i32) {
TReplicationJob _elem33;
_elem33 = new TReplicationJob();
_elem33.read(iprot);
struct.success.add(_elem33);
}
}
struct.setSuccessIsSet(true);
}
}
}
}
public static class getRetiredJobs_args
implements org.apache.thrift.TBase<getRetiredJobs_args, getRetiredJobs_args._Fields>,
java.io.Serializable, Cloneable, Comparable<getRetiredJobs_args> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
new org.apache.thrift.protocol.TStruct("getRetiredJobs_args");
private static final org.apache.thrift.protocol.TField AFTER_ID_FIELD_DESC =
new org.apache.thrift.protocol.TField("afterId", org.apache.thrift.protocol.TType.I64,
(short) 1);
private static final org.apache.thrift.protocol.TField MAX_JOBS_FIELD_DESC =
new org.apache.thrift.protocol.TField("maxJobs", org.apache.thrift.protocol.TType.I32,
(short) 2);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes =
new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new getRetiredJobs_argsStandardSchemeFactory());
schemes.put(TupleScheme.class, new getRetiredJobs_argsTupleSchemeFactory());
}
public long afterId; // required
public int maxJobs; // required
/**
* The set of fields this struct contains, along with convenience methods for finding and
* manipulating them.
*/
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
AFTER_ID((short) 1, "afterId"), MAX_JOBS((short) 2, "maxJobs");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch (fieldId) {
case 1: // AFTER_ID
return AFTER_ID;
case 2: // MAX_JOBS
return MAX_JOBS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null)
throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
private static final int __AFTERID_ISSET_ID = 0;
private static final int __MAXJOBS_ISSET_ID = 1;
private byte __isset_bitfield = 0;
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.AFTER_ID,
new org.apache.thrift.meta_data.FieldMetaData("afterId",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.I64)));
tmpMap.put(_Fields.MAX_JOBS,
new org.apache.thrift.meta_data.FieldMetaData("maxJobs",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.I32)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRetiredJobs_args.class,
metaDataMap);
}
public getRetiredJobs_args() {}
public getRetiredJobs_args(long afterId, int maxJobs) {
this();
this.afterId = afterId;
setAfterIdIsSet(true);
this.maxJobs = maxJobs;
setMaxJobsIsSet(true);
}
/**
* Performs a deep copy on <i>other</i>.
*/
public getRetiredJobs_args(getRetiredJobs_args other) {
__isset_bitfield = other.__isset_bitfield;
this.afterId = other.afterId;
this.maxJobs = other.maxJobs;
}
public getRetiredJobs_args deepCopy() {
return new getRetiredJobs_args(this);
}
@Override
public void clear() {
setAfterIdIsSet(false);
this.afterId = 0;
setMaxJobsIsSet(false);
this.maxJobs = 0;
}
public long getAfterId() {
return this.afterId;
}
public getRetiredJobs_args setAfterId(long afterId) {
this.afterId = afterId;
setAfterIdIsSet(true);
return this;
}
public void unsetAfterId() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __AFTERID_ISSET_ID);
}
/** Returns true if field afterId is set (has been assigned a value) and false otherwise */
public boolean isSetAfterId() {
return EncodingUtils.testBit(__isset_bitfield, __AFTERID_ISSET_ID);
}
public void setAfterIdIsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __AFTERID_ISSET_ID, value);
}
public int getMaxJobs() {
return this.maxJobs;
}
public getRetiredJobs_args setMaxJobs(int maxJobs) {
this.maxJobs = maxJobs;
setMaxJobsIsSet(true);
return this;
}
public void unsetMaxJobs() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAXJOBS_ISSET_ID);
}
/** Returns true if field maxJobs is set (has been assigned a value) and false otherwise */
public boolean isSetMaxJobs() {
return EncodingUtils.testBit(__isset_bitfield, __MAXJOBS_ISSET_ID);
}
public void setMaxJobsIsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAXJOBS_ISSET_ID, value);
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case AFTER_ID:
if (value == null) {
unsetAfterId();
} else {
setAfterId((Long) value);
}
break;
case MAX_JOBS:
if (value == null) {
unsetMaxJobs();
} else {
setMaxJobs((Integer) value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case AFTER_ID:
return Long.valueOf(getAfterId());
case MAX_JOBS:
return Integer.valueOf(getMaxJobs());
}
throw new IllegalStateException();
}
/**
* Returns true if field corresponding to fieldID is set (has been assigned a value) and false
* otherwise
*/
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case AFTER_ID:
return isSetAfterId();
case MAX_JOBS:
return isSetMaxJobs();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof getRetiredJobs_args)
return this.equals((getRetiredJobs_args) that);
return false;
}
public boolean equals(getRetiredJobs_args that) {
if (that == null)
return false;
boolean this_present_afterId = true;
boolean that_present_afterId = true;
if (this_present_afterId || that_present_afterId) {
if (!(this_present_afterId && that_present_afterId))
return false;
if (this.afterId != that.afterId)
return false;
}
boolean this_present_maxJobs = true;
boolean that_present_maxJobs = true;
if (this_present_maxJobs || that_present_maxJobs) {
if (!(this_present_maxJobs && that_present_maxJobs))
return false;
if (this.maxJobs != that.maxJobs)
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(getRetiredJobs_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetAfterId()).compareTo(other.isSetAfterId());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetAfterId()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.afterId, other.afterId);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetMaxJobs()).compareTo(other.isSetMaxJobs());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetMaxJobs()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.maxJobs, other.maxJobs);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot)
throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot)
throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("getRetiredJobs_args(");
boolean first = true;
sb.append("afterId:");
sb.append(this.afterId);
first = false;
if (!first)
sb.append(", ");
sb.append("maxJobs:");
sb.append(this.maxJobs);
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in)
throws java.io.IOException, ClassNotFoundException {
try {
// it doesn't seem like you should have to do this, but java serialization is wacky, and
// doesn't call the default constructor.
__isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class getRetiredJobs_argsStandardSchemeFactory implements SchemeFactory {
public getRetiredJobs_argsStandardScheme getScheme() {
return new getRetiredJobs_argsStandardScheme();
}
}
private static class getRetiredJobs_argsStandardScheme
extends StandardScheme<getRetiredJobs_args> {
public void read(org.apache.thrift.protocol.TProtocol iprot, getRetiredJobs_args struct)
throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true) {
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 1: // AFTER_ID
if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
struct.afterId = iprot.readI64();
struct.setAfterIdIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 2: // MAX_JOBS
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.maxJobs = iprot.readI32();
struct.setMaxJobsIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate
// method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, getRetiredJobs_args struct)
throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldBegin(AFTER_ID_FIELD_DESC);
oprot.writeI64(struct.afterId);
oprot.writeFieldEnd();
oprot.writeFieldBegin(MAX_JOBS_FIELD_DESC);
oprot.writeI32(struct.maxJobs);
oprot.writeFieldEnd();
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class getRetiredJobs_argsTupleSchemeFactory implements SchemeFactory {
public getRetiredJobs_argsTupleScheme getScheme() {
return new getRetiredJobs_argsTupleScheme();
}
}
private static class getRetiredJobs_argsTupleScheme extends TupleScheme<getRetiredJobs_args> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, getRetiredJobs_args struct)
throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetAfterId()) {
optionals.set(0);
}
if (struct.isSetMaxJobs()) {
optionals.set(1);
}
oprot.writeBitSet(optionals, 2);
if (struct.isSetAfterId()) {
oprot.writeI64(struct.afterId);
}
if (struct.isSetMaxJobs()) {
oprot.writeI32(struct.maxJobs);
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, getRetiredJobs_args struct)
throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(2);
if (incoming.get(0)) {
struct.afterId = iprot.readI64();
struct.setAfterIdIsSet(true);
}
if (incoming.get(1)) {
struct.maxJobs = iprot.readI32();
struct.setMaxJobsIsSet(true);
}
}
}
}
public static class getRetiredJobs_result
implements org.apache.thrift.TBase<getRetiredJobs_result, getRetiredJobs_result._Fields>,
java.io.Serializable, Cloneable, Comparable<getRetiredJobs_result> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
new org.apache.thrift.protocol.TStruct("getRetiredJobs_result");
private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
(short) 0);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes =
new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new getRetiredJobs_resultStandardSchemeFactory());
schemes.put(TupleScheme.class, new getRetiredJobs_resultTupleSchemeFactory());
}
public List<TReplicationJob> success; // required
/**
* The set of fields this struct contains, along with convenience methods for finding and
* manipulating them.
*/
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
SUCCESS((short) 0, "success");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch (fieldId) {
case 0: // SUCCESS
return SUCCESS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null)
throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.SUCCESS,
new org.apache.thrift.meta_data.FieldMetaData("success",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.StructMetaData(
org.apache.thrift.protocol.TType.STRUCT, TReplicationJob.class))));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRetiredJobs_result.class,
metaDataMap);
}
public getRetiredJobs_result() {}
public getRetiredJobs_result(List<TReplicationJob> success) {
this();
this.success = success;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public getRetiredJobs_result(getRetiredJobs_result other) {
if (other.isSetSuccess()) {
List<TReplicationJob> __this__success =
new ArrayList<TReplicationJob>(other.success.size());
for (TReplicationJob other_element : other.success) {
__this__success.add(new TReplicationJob(other_element));
}
this.success = __this__success;
}
}
public getRetiredJobs_result deepCopy() {
return new getRetiredJobs_result(this);
}
@Override
public void clear() {
this.success = null;
}
public int getSuccessSize() {
return (this.success == null) ? 0 : this.success.size();
}
public java.util.Iterator<TReplicationJob> getSuccessIterator() {
return (this.success == null) ? null : this.success.iterator();
}
public void addToSuccess(TReplicationJob elem) {
if (this.success == null) {
this.success = new ArrayList<TReplicationJob>();
}
this.success.add(elem);
}
public List<TReplicationJob> getSuccess() {
return this.success;
}
public getRetiredJobs_result setSuccess(List<TReplicationJob> success) {
this.success = success;
return this;
}
public void unsetSuccess() {
this.success = null;
}
/** Returns true if field success is set (has been assigned a value) and false otherwise */
public boolean isSetSuccess() {
return this.success != null;
}
public void setSuccessIsSet(boolean value) {
if (!value) {
this.success = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case SUCCESS:
if (value == null) {
unsetSuccess();
} else {
setSuccess((List<TReplicationJob>) value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case SUCCESS:
return getSuccess();
}
throw new IllegalStateException();
}
/**
* Returns true if field corresponding to fieldID is set (has been assigned a value) and false
* otherwise
*/
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case SUCCESS:
return isSetSuccess();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof getRetiredJobs_result)
return this.equals((getRetiredJobs_result) that);
return false;
}
public boolean equals(getRetiredJobs_result that) {
if (that == null)
return false;
boolean this_present_success = true && this.isSetSuccess();
boolean that_present_success = true && that.isSetSuccess();
if (this_present_success || that_present_success) {
if (!(this_present_success && that_present_success))
return false;
if (!this.success.equals(that.success))
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(getRetiredJobs_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSuccess()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot)
throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot)
throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("getRetiredJobs_result(");
boolean first = true;
sb.append("success:");
if (this.success == null) {
sb.append("null");
} else {
sb.append(this.success);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in)
throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class getRetiredJobs_resultStandardSchemeFactory implements SchemeFactory {
public getRetiredJobs_resultStandardScheme getScheme() {
return new getRetiredJobs_resultStandardScheme();
}
}
private static class getRetiredJobs_resultStandardScheme
extends StandardScheme<getRetiredJobs_result> {
public void read(org.apache.thrift.protocol.TProtocol iprot, getRetiredJobs_result struct)
throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true) {
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
org.apache.thrift.protocol.TList _list34 = iprot.readListBegin();
struct.success = new ArrayList<TReplicationJob>(_list34.size);
for (int _i35 = 0; _i35 < _list34.size; ++_i35) {
TReplicationJob _elem36;
_elem36 = new TReplicationJob();
_elem36.read(iprot);
struct.success.add(_elem36);
}
iprot.readListEnd();
}
struct.setSuccessIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate
// method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, getRetiredJobs_result struct)
throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.success != null) {
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(
org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
for (TReplicationJob _iter37 : struct.success) {
_iter37.write(oprot);
}
oprot.writeListEnd();
}
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class getRetiredJobs_resultTupleSchemeFactory implements SchemeFactory {
public getRetiredJobs_resultTupleScheme getScheme() {
return new getRetiredJobs_resultTupleScheme();
}
}
private static class getRetiredJobs_resultTupleScheme
extends TupleScheme<getRetiredJobs_result> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, getRetiredJobs_result struct)
throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetSuccess()) {
optionals.set(0);
}
oprot.writeBitSet(optionals, 1);
if (struct.isSetSuccess()) {
{
oprot.writeI32(struct.success.size());
for (TReplicationJob _iter38 : struct.success) {
_iter38.write(oprot);
}
}
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, getRetiredJobs_result struct)
throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(1);
if (incoming.get(0)) {
{
org.apache.thrift.protocol.TList _list39 = new org.apache.thrift.protocol.TList(
org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
struct.success = new ArrayList<TReplicationJob>(_list39.size);
for (int _i40 = 0; _i40 < _list39.size; ++_i40) {
TReplicationJob _elem41;
_elem41 = new TReplicationJob();
_elem41.read(iprot);
struct.success.add(_elem41);
}
}
struct.setSuccessIsSet(true);
}
}
}
}
public static class getJobs_args
implements org.apache.thrift.TBase<getJobs_args, getJobs_args._Fields>, java.io.Serializable,
Cloneable, Comparable<getJobs_args> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
new org.apache.thrift.protocol.TStruct("getJobs_args");
private static final org.apache.thrift.protocol.TField IDS_FIELD_DESC =
new org.apache.thrift.protocol.TField("ids", org.apache.thrift.protocol.TType.LIST,
(short) 1);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes =
new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new getJobs_argsStandardSchemeFactory());
schemes.put(TupleScheme.class, new getJobs_argsTupleSchemeFactory());
}
public List<Long> ids; // required
/**
* The set of fields this struct contains, along with convenience methods for finding and
* manipulating them.
*/
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
IDS((short) 1, "ids");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch (fieldId) {
case 1: // IDS
return IDS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null)
throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.IDS,
new org.apache.thrift.meta_data.FieldMetaData("ids",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.I64))));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getJobs_args.class,
metaDataMap);
}
public getJobs_args() {}
public getJobs_args(List<Long> ids) {
this();
this.ids = ids;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public getJobs_args(getJobs_args other) {
if (other.isSetIds()) {
List<Long> __this__ids = new ArrayList<Long>(other.ids);
this.ids = __this__ids;
}
}
public getJobs_args deepCopy() {
return new getJobs_args(this);
}
@Override
public void clear() {
this.ids = null;
}
public int getIdsSize() {
return (this.ids == null) ? 0 : this.ids.size();
}
public java.util.Iterator<Long> getIdsIterator() {
return (this.ids == null) ? null : this.ids.iterator();
}
public void addToIds(long elem) {
if (this.ids == null) {
this.ids = new ArrayList<Long>();
}
this.ids.add(elem);
}
public List<Long> getIds() {
return this.ids;
}
public getJobs_args setIds(List<Long> ids) {
this.ids = ids;
return this;
}
public void unsetIds() {
this.ids = null;
}
/** Returns true if field ids is set (has been assigned a value) and false otherwise */
public boolean isSetIds() {
return this.ids != null;
}
public void setIdsIsSet(boolean value) {
if (!value) {
this.ids = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case IDS:
if (value == null) {
unsetIds();
} else {
setIds((List<Long>) value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case IDS:
return getIds();
}
throw new IllegalStateException();
}
/**
* Returns true if field corresponding to fieldID is set (has been assigned a value) and false
* otherwise
*/
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case IDS:
return isSetIds();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof getJobs_args)
return this.equals((getJobs_args) that);
return false;
}
public boolean equals(getJobs_args that) {
if (that == null)
return false;
boolean this_present_ids = true && this.isSetIds();
boolean that_present_ids = true && that.isSetIds();
if (this_present_ids || that_present_ids) {
if (!(this_present_ids && that_present_ids))
return false;
if (!this.ids.equals(that.ids))
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(getJobs_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetIds()).compareTo(other.isSetIds());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetIds()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ids, other.ids);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot)
throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot)
throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("getJobs_args(");
boolean first = true;
sb.append("ids:");
if (this.ids == null) {
sb.append("null");
} else {
sb.append(this.ids);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in)
throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class getJobs_argsStandardSchemeFactory implements SchemeFactory {
public getJobs_argsStandardScheme getScheme() {
return new getJobs_argsStandardScheme();
}
}
private static class getJobs_argsStandardScheme extends StandardScheme<getJobs_args> {
public void read(org.apache.thrift.protocol.TProtocol iprot, getJobs_args struct)
throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true) {
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 1: // IDS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
org.apache.thrift.protocol.TList _list42 = iprot.readListBegin();
struct.ids = new ArrayList<Long>(_list42.size);
for (int _i43 = 0; _i43 < _list42.size; ++_i43) {
long _elem44;
_elem44 = iprot.readI64();
struct.ids.add(_elem44);
}
iprot.readListEnd();
}
struct.setIdsIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate
// method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, getJobs_args struct)
throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.ids != null) {
oprot.writeFieldBegin(IDS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(
org.apache.thrift.protocol.TType.I64, struct.ids.size()));
for (long _iter45 : struct.ids) {
oprot.writeI64(_iter45);
}
oprot.writeListEnd();
}
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class getJobs_argsTupleSchemeFactory implements SchemeFactory {
public getJobs_argsTupleScheme getScheme() {
return new getJobs_argsTupleScheme();
}
}
private static class getJobs_argsTupleScheme extends TupleScheme<getJobs_args> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, getJobs_args struct)
throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetIds()) {
optionals.set(0);
}
oprot.writeBitSet(optionals, 1);
if (struct.isSetIds()) {
{
oprot.writeI32(struct.ids.size());
for (long _iter46 : struct.ids) {
oprot.writeI64(_iter46);
}
}
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, getJobs_args struct)
throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(1);
if (incoming.get(0)) {
{
org.apache.thrift.protocol.TList _list47 = new org.apache.thrift.protocol.TList(
org.apache.thrift.protocol.TType.I64, iprot.readI32());
struct.ids = new ArrayList<Long>(_list47.size);
for (int _i48 = 0; _i48 < _list47.size; ++_i48) {
long _elem49;
_elem49 = iprot.readI64();
struct.ids.add(_elem49);
}
}
struct.setIdsIsSet(true);
}
}
}
}
public static class getJobs_result
implements org.apache.thrift.TBase<getJobs_result, getJobs_result._Fields>,
java.io.Serializable, Cloneable, Comparable<getJobs_result> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
new org.apache.thrift.protocol.TStruct("getJobs_result");
private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.MAP,
(short) 0);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes =
new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new getJobs_resultStandardSchemeFactory());
schemes.put(TupleScheme.class, new getJobs_resultTupleSchemeFactory());
}
public Map<Long, TReplicationJob> success; // required
/**
* The set of fields this struct contains, along with convenience methods for finding and
* manipulating them.
*/
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
SUCCESS((short) 0, "success");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch (fieldId) {
case 0: // SUCCESS
return SUCCESS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null)
throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.SUCCESS,
new org.apache.thrift.meta_data.FieldMetaData("success",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.I64),
new org.apache.thrift.meta_data.StructMetaData(
org.apache.thrift.protocol.TType.STRUCT, TReplicationJob.class))));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getJobs_result.class,
metaDataMap);
}
public getJobs_result() {}
public getJobs_result(Map<Long, TReplicationJob> success) {
this();
this.success = success;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public getJobs_result(getJobs_result other) {
if (other.isSetSuccess()) {
Map<Long, TReplicationJob> __this__success =
new HashMap<Long, TReplicationJob>(other.success.size());
for (Map.Entry<Long, TReplicationJob> other_element : other.success.entrySet()) {
Long other_element_key = other_element.getKey();
TReplicationJob other_element_value = other_element.getValue();
Long __this__success_copy_key = other_element_key;
TReplicationJob __this__success_copy_value = new TReplicationJob(other_element_value);
__this__success.put(__this__success_copy_key, __this__success_copy_value);
}
this.success = __this__success;
}
}
public getJobs_result deepCopy() {
return new getJobs_result(this);
}
@Override
public void clear() {
this.success = null;
}
public int getSuccessSize() {
return (this.success == null) ? 0 : this.success.size();
}
public void putToSuccess(long key, TReplicationJob val) {
if (this.success == null) {
this.success = new HashMap<Long, TReplicationJob>();
}
this.success.put(key, val);
}
public Map<Long, TReplicationJob> getSuccess() {
return this.success;
}
public getJobs_result setSuccess(Map<Long, TReplicationJob> success) {
this.success = success;
return this;
}
public void unsetSuccess() {
this.success = null;
}
/** Returns true if field success is set (has been assigned a value) and false otherwise */
public boolean isSetSuccess() {
return this.success != null;
}
public void setSuccessIsSet(boolean value) {
if (!value) {
this.success = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case SUCCESS:
if (value == null) {
unsetSuccess();
} else {
setSuccess((Map<Long, TReplicationJob>) value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case SUCCESS:
return getSuccess();
}
throw new IllegalStateException();
}
/**
* Returns true if field corresponding to fieldID is set (has been assigned a value) and false
* otherwise
*/
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case SUCCESS:
return isSetSuccess();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof getJobs_result)
return this.equals((getJobs_result) that);
return false;
}
public boolean equals(getJobs_result that) {
if (that == null)
return false;
boolean this_present_success = true && this.isSetSuccess();
boolean that_present_success = true && that.isSetSuccess();
if (this_present_success || that_present_success) {
if (!(this_present_success && that_present_success))
return false;
if (!this.success.equals(that.success))
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(getJobs_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSuccess()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot)
throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot)
throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("getJobs_result(");
boolean first = true;
sb.append("success:");
if (this.success == null) {
sb.append("null");
} else {
sb.append(this.success);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in)
throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class getJobs_resultStandardSchemeFactory implements SchemeFactory {
public getJobs_resultStandardScheme getScheme() {
return new getJobs_resultStandardScheme();
}
}
private static class getJobs_resultStandardScheme extends StandardScheme<getJobs_result> {
public void read(org.apache.thrift.protocol.TProtocol iprot, getJobs_result struct)
throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true) {
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
{
org.apache.thrift.protocol.TMap _map50 = iprot.readMapBegin();
struct.success = new HashMap<Long, TReplicationJob>(2 * _map50.size);
for (int _i51 = 0; _i51 < _map50.size; ++_i51) {
long _key52;
TReplicationJob _val53;
_key52 = iprot.readI64();
_val53 = new TReplicationJob();
_val53.read(iprot);
struct.success.put(_key52, _val53);
}
iprot.readMapEnd();
}
struct.setSuccessIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate
// method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, getJobs_result struct)
throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.success != null) {
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeMapBegin(
new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64,
org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
for (Map.Entry<Long, TReplicationJob> _iter54 : struct.success.entrySet()) {
oprot.writeI64(_iter54.getKey());
_iter54.getValue().write(oprot);
}
oprot.writeMapEnd();
}
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class getJobs_resultTupleSchemeFactory implements SchemeFactory {
public getJobs_resultTupleScheme getScheme() {
return new getJobs_resultTupleScheme();
}
}
private static class getJobs_resultTupleScheme extends TupleScheme<getJobs_result> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, getJobs_result struct)
throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetSuccess()) {
optionals.set(0);
}
oprot.writeBitSet(optionals, 1);
if (struct.isSetSuccess()) {
{
oprot.writeI32(struct.success.size());
for (Map.Entry<Long, TReplicationJob> _iter55 : struct.success.entrySet()) {
oprot.writeI64(_iter55.getKey());
_iter55.getValue().write(oprot);
}
}
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, getJobs_result struct)
throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(1);
if (incoming.get(0)) {
{
org.apache.thrift.protocol.TMap _map56 =
new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64,
org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
struct.success = new HashMap<Long, TReplicationJob>(2 * _map56.size);
for (int _i57 = 0; _i57 < _map56.size; ++_i57) {
long _key58;
TReplicationJob _val59;
_key58 = iprot.readI64();
_val59 = new TReplicationJob();
_val59.read(iprot);
struct.success.put(_key58, _val59);
}
}
struct.setSuccessIsSet(true);
}
}
}
}
public static class pause_args implements org.apache.thrift.TBase<pause_args, pause_args._Fields>,
java.io.Serializable, Cloneable, Comparable<pause_args> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
new org.apache.thrift.protocol.TStruct("pause_args");
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes =
new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new pause_argsStandardSchemeFactory());
schemes.put(TupleScheme.class, new pause_argsTupleSchemeFactory());
}
/**
* The set of fields this struct contains, along with convenience methods for finding and
* manipulating them.
*/
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
;
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch (fieldId) {
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null)
throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(pause_args.class, metaDataMap);
}
public pause_args() {}
/**
* Performs a deep copy on <i>other</i>.
*/
public pause_args(pause_args other) {}
public pause_args deepCopy() {
return new pause_args(this);
}
@Override
public void clear() {}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
}
throw new IllegalStateException();
}
/**
* Returns true if field corresponding to fieldID is set (has been assigned a value) and false
* otherwise
*/
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof pause_args)
return this.equals((pause_args) that);
return false;
}
public boolean equals(pause_args that) {
if (that == null)
return false;
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(pause_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot)
throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot)
throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("pause_args(");
boolean first = true;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in)
throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class pause_argsStandardSchemeFactory implements SchemeFactory {
public pause_argsStandardScheme getScheme() {
return new pause_argsStandardScheme();
}
}
private static class pause_argsStandardScheme extends StandardScheme<pause_args> {
public void read(org.apache.thrift.protocol.TProtocol iprot, pause_args struct)
throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true) {
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate
// method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, pause_args struct)
throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class pause_argsTupleSchemeFactory implements SchemeFactory {
public pause_argsTupleScheme getScheme() {
return new pause_argsTupleScheme();
}
}
private static class pause_argsTupleScheme extends TupleScheme<pause_args> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, pause_args struct)
throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, pause_args struct)
throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
}
}
}
public static class pause_result
implements org.apache.thrift.TBase<pause_result, pause_result._Fields>, java.io.Serializable,
Cloneable, Comparable<pause_result> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
new org.apache.thrift.protocol.TStruct("pause_result");
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes =
new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new pause_resultStandardSchemeFactory());
schemes.put(TupleScheme.class, new pause_resultTupleSchemeFactory());
}
/**
* The set of fields this struct contains, along with convenience methods for finding and
* manipulating them.
*/
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
;
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch (fieldId) {
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null)
throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(pause_result.class,
metaDataMap);
}
public pause_result() {}
/**
* Performs a deep copy on <i>other</i>.
*/
public pause_result(pause_result other) {}
public pause_result deepCopy() {
return new pause_result(this);
}
@Override
public void clear() {}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
}
throw new IllegalStateException();
}
/**
* Returns true if field corresponding to fieldID is set (has been assigned a value) and false
* otherwise
*/
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof pause_result)
return this.equals((pause_result) that);
return false;
}
public boolean equals(pause_result that) {
if (that == null)
return false;
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(pause_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot)
throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot)
throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("pause_result(");
boolean first = true;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in)
throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class pause_resultStandardSchemeFactory implements SchemeFactory {
public pause_resultStandardScheme getScheme() {
return new pause_resultStandardScheme();
}
}
private static class pause_resultStandardScheme extends StandardScheme<pause_result> {
public void read(org.apache.thrift.protocol.TProtocol iprot, pause_result struct)
throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true) {
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate
// method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, pause_result struct)
throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class pause_resultTupleSchemeFactory implements SchemeFactory {
public pause_resultTupleScheme getScheme() {
return new pause_resultTupleScheme();
}
}
private static class pause_resultTupleScheme extends TupleScheme<pause_result> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, pause_result struct)
throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, pause_result struct)
throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
}
}
}
public static class resume_args
implements org.apache.thrift.TBase<resume_args, resume_args._Fields>, java.io.Serializable,
Cloneable, Comparable<resume_args> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
new org.apache.thrift.protocol.TStruct("resume_args");
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes =
new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new resume_argsStandardSchemeFactory());
schemes.put(TupleScheme.class, new resume_argsTupleSchemeFactory());
}
/**
* The set of fields this struct contains, along with convenience methods for finding and
* manipulating them.
*/
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
;
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch (fieldId) {
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null)
throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(resume_args.class,
metaDataMap);
}
public resume_args() {}
/**
* Performs a deep copy on <i>other</i>.
*/
public resume_args(resume_args other) {}
public resume_args deepCopy() {
return new resume_args(this);
}
@Override
public void clear() {}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
}
throw new IllegalStateException();
}
/**
* Returns true if field corresponding to fieldID is set (has been assigned a value) and false
* otherwise
*/
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof resume_args)
return this.equals((resume_args) that);
return false;
}
public boolean equals(resume_args that) {
if (that == null)
return false;
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(resume_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot)
throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot)
throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("resume_args(");
boolean first = true;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in)
throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class resume_argsStandardSchemeFactory implements SchemeFactory {
public resume_argsStandardScheme getScheme() {
return new resume_argsStandardScheme();
}
}
private static class resume_argsStandardScheme extends StandardScheme<resume_args> {
public void read(org.apache.thrift.protocol.TProtocol iprot, resume_args struct)
throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true) {
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate
// method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, resume_args struct)
throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class resume_argsTupleSchemeFactory implements SchemeFactory {
public resume_argsTupleScheme getScheme() {
return new resume_argsTupleScheme();
}
}
private static class resume_argsTupleScheme extends TupleScheme<resume_args> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, resume_args struct)
throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, resume_args struct)
throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
}
}
}
public static class resume_result
implements org.apache.thrift.TBase<resume_result, resume_result._Fields>,
java.io.Serializable, Cloneable, Comparable<resume_result> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
new org.apache.thrift.protocol.TStruct("resume_result");
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes =
new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new resume_resultStandardSchemeFactory());
schemes.put(TupleScheme.class, new resume_resultTupleSchemeFactory());
}
/**
* The set of fields this struct contains, along with convenience methods for finding and
* manipulating them.
*/
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
;
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch (fieldId) {
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null)
throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(resume_result.class,
metaDataMap);
}
public resume_result() {}
/**
* Performs a deep copy on <i>other</i>.
*/
public resume_result(resume_result other) {}
public resume_result deepCopy() {
return new resume_result(this);
}
@Override
public void clear() {}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
}
throw new IllegalStateException();
}
/**
* Returns true if field corresponding to fieldID is set (has been assigned a value) and false
* otherwise
*/
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof resume_result)
return this.equals((resume_result) that);
return false;
}
public boolean equals(resume_result that) {
if (that == null)
return false;
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(resume_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot)
throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot)
throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("resume_result(");
boolean first = true;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in)
throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class resume_resultStandardSchemeFactory implements SchemeFactory {
public resume_resultStandardScheme getScheme() {
return new resume_resultStandardScheme();
}
}
private static class resume_resultStandardScheme extends StandardScheme<resume_result> {
public void read(org.apache.thrift.protocol.TProtocol iprot, resume_result struct)
throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true) {
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate
// method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, resume_result struct)
throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class resume_resultTupleSchemeFactory implements SchemeFactory {
public resume_resultTupleScheme getScheme() {
return new resume_resultTupleScheme();
}
}
private static class resume_resultTupleScheme extends TupleScheme<resume_result> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, resume_result struct)
throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, resume_result struct)
throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
}
}
}
public static class getLag_args
implements org.apache.thrift.TBase<getLag_args, getLag_args._Fields>, java.io.Serializable,
Cloneable, Comparable<getLag_args> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
new org.apache.thrift.protocol.TStruct("getLag_args");
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes =
new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new getLag_argsStandardSchemeFactory());
schemes.put(TupleScheme.class, new getLag_argsTupleSchemeFactory());
}
/**
* The set of fields this struct contains, along with convenience methods for finding and
* manipulating them.
*/
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
;
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch (fieldId) {
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null)
throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getLag_args.class,
metaDataMap);
}
public getLag_args() {}
/**
* Performs a deep copy on <i>other</i>.
*/
public getLag_args(getLag_args other) {}
public getLag_args deepCopy() {
return new getLag_args(this);
}
@Override
public void clear() {}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
}
throw new IllegalStateException();
}
/**
* Returns true if field corresponding to fieldID is set (has been assigned a value) and false
* otherwise
*/
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof getLag_args)
return this.equals((getLag_args) that);
return false;
}
public boolean equals(getLag_args that) {
if (that == null)
return false;
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(getLag_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot)
throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot)
throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("getLag_args(");
boolean first = true;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in)
throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class getLag_argsStandardSchemeFactory implements SchemeFactory {
public getLag_argsStandardScheme getScheme() {
return new getLag_argsStandardScheme();
}
}
private static class getLag_argsStandardScheme extends StandardScheme<getLag_args> {
public void read(org.apache.thrift.protocol.TProtocol iprot, getLag_args struct)
throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true) {
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate
// method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, getLag_args struct)
throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class getLag_argsTupleSchemeFactory implements SchemeFactory {
public getLag_argsTupleScheme getScheme() {
return new getLag_argsTupleScheme();
}
}
private static class getLag_argsTupleScheme extends TupleScheme<getLag_args> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, getLag_args struct)
throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, getLag_args struct)
throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
}
}
}
public static class getLag_result
implements org.apache.thrift.TBase<getLag_result, getLag_result._Fields>,
java.io.Serializable, Cloneable, Comparable<getLag_result> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
new org.apache.thrift.protocol.TStruct("getLag_result");
private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I64,
(short) 0);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes =
new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new getLag_resultStandardSchemeFactory());
schemes.put(TupleScheme.class, new getLag_resultTupleSchemeFactory());
}
public long success; // required
/**
* The set of fields this struct contains, along with convenience methods for finding and
* manipulating them.
*/
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
SUCCESS((short) 0, "success");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch (fieldId) {
case 0: // SUCCESS
return SUCCESS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null)
throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
private static final int __SUCCESS_ISSET_ID = 0;
private byte __isset_bitfield = 0;
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.SUCCESS,
new org.apache.thrift.meta_data.FieldMetaData("success",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.I64)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getLag_result.class,
metaDataMap);
}
public getLag_result() {}
public getLag_result(long success) {
this();
this.success = success;
setSuccessIsSet(true);
}
/**
* Performs a deep copy on <i>other</i>.
*/
public getLag_result(getLag_result other) {
__isset_bitfield = other.__isset_bitfield;
this.success = other.success;
}
public getLag_result deepCopy() {
return new getLag_result(this);
}
@Override
public void clear() {
setSuccessIsSet(false);
this.success = 0;
}
public long getSuccess() {
return this.success;
}
public getLag_result setSuccess(long success) {
this.success = success;
setSuccessIsSet(true);
return this;
}
public void unsetSuccess() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
}
/** Returns true if field success is set (has been assigned a value) and false otherwise */
public boolean isSetSuccess() {
return EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID);
}
public void setSuccessIsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case SUCCESS:
if (value == null) {
unsetSuccess();
} else {
setSuccess((Long) value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case SUCCESS:
return Long.valueOf(getSuccess());
}
throw new IllegalStateException();
}
/**
* Returns true if field corresponding to fieldID is set (has been assigned a value) and false
* otherwise
*/
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case SUCCESS:
return isSetSuccess();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof getLag_result)
return this.equals((getLag_result) that);
return false;
}
public boolean equals(getLag_result that) {
if (that == null)
return false;
boolean this_present_success = true;
boolean that_present_success = true;
if (this_present_success || that_present_success) {
if (!(this_present_success && that_present_success))
return false;
if (this.success != that.success)
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(getLag_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSuccess()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot)
throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot)
throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("getLag_result(");
boolean first = true;
sb.append("success:");
sb.append(this.success);
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in)
throws java.io.IOException, ClassNotFoundException {
try {
// it doesn't seem like you should have to do this, but java serialization is wacky, and
// doesn't call the default constructor.
__isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class getLag_resultStandardSchemeFactory implements SchemeFactory {
public getLag_resultStandardScheme getScheme() {
return new getLag_resultStandardScheme();
}
}
private static class getLag_resultStandardScheme extends StandardScheme<getLag_result> {
public void read(org.apache.thrift.protocol.TProtocol iprot, getLag_result struct)
throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true) {
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
struct.success = iprot.readI64();
struct.setSuccessIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate
// method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, getLag_result struct)
throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.isSetSuccess()) {
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
oprot.writeI64(struct.success);
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class getLag_resultTupleSchemeFactory implements SchemeFactory {
public getLag_resultTupleScheme getScheme() {
return new getLag_resultTupleScheme();
}
}
private static class getLag_resultTupleScheme extends TupleScheme<getLag_result> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, getLag_result struct)
throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetSuccess()) {
optionals.set(0);
}
oprot.writeBitSet(optionals, 1);
if (struct.isSetSuccess()) {
oprot.writeI64(struct.success);
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, getLag_result struct)
throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(1);
if (incoming.get(0)) {
struct.success = iprot.readI64();
struct.setSuccessIsSet(true);
}
}
}
}
}
| 9,434 |
0 | Create_ds/reair/thrift/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/thrift/src/main/java/com/airbnb/reair/incremental/thrift/TReplicationJob.java | /**
* Autogenerated by Thrift Compiler (0.9.1)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
*
* @generated
*/
package com.airbnb.reair.incremental.thrift;
import org.apache.thrift.scheme.IScheme;
import org.apache.thrift.scheme.SchemeFactory;
import org.apache.thrift.scheme.StandardScheme;
import org.apache.thrift.scheme.TupleScheme;
import org.apache.thrift.protocol.TTupleProtocol;
import org.apache.thrift.EncodingUtils;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.util.HashMap;
import java.util.EnumMap;
import java.util.EnumSet;
import java.util.Collections;
import java.util.BitSet;
public class TReplicationJob
implements org.apache.thrift.TBase<TReplicationJob, TReplicationJob._Fields>,
java.io.Serializable, Cloneable, Comparable<TReplicationJob> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
new org.apache.thrift.protocol.TStruct("TReplicationJob");
private static final org.apache.thrift.protocol.TField ID_FIELD_DESC =
new org.apache.thrift.protocol.TField("id", org.apache.thrift.protocol.TType.I64, (short) 1);
private static final org.apache.thrift.protocol.TField CREATE_TIME_FIELD_DESC =
new org.apache.thrift.protocol.TField("createTime", org.apache.thrift.protocol.TType.I64,
(short) 2);
private static final org.apache.thrift.protocol.TField UPDATE_TIME_FIELD_DESC =
new org.apache.thrift.protocol.TField("updateTime", org.apache.thrift.protocol.TType.I64,
(short) 3);
private static final org.apache.thrift.protocol.TField OPERATION_FIELD_DESC =
new org.apache.thrift.protocol.TField("operation", org.apache.thrift.protocol.TType.I32,
(short) 4);
private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC =
new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.I32,
(short) 5);
private static final org.apache.thrift.protocol.TField SRC_PATH_FIELD_DESC =
new org.apache.thrift.protocol.TField("srcPath", org.apache.thrift.protocol.TType.STRING,
(short) 6);
private static final org.apache.thrift.protocol.TField SRC_CLUSTER_FIELD_DESC =
new org.apache.thrift.protocol.TField("srcCluster", org.apache.thrift.protocol.TType.STRING,
(short) 7);
private static final org.apache.thrift.protocol.TField SRC_DB_FIELD_DESC =
new org.apache.thrift.protocol.TField("srcDb", org.apache.thrift.protocol.TType.STRING,
(short) 8);
private static final org.apache.thrift.protocol.TField SRC_TABLE_FIELD_DESC =
new org.apache.thrift.protocol.TField("srcTable", org.apache.thrift.protocol.TType.STRING,
(short) 9);
private static final org.apache.thrift.protocol.TField SRC_PARTITIONS_FIELD_DESC =
new org.apache.thrift.protocol.TField("srcPartitions", org.apache.thrift.protocol.TType.LIST,
(short) 10);
private static final org.apache.thrift.protocol.TField SRC_MODIFIED_TIME_FIELD_DESC =
new org.apache.thrift.protocol.TField("srcModifiedTime",
org.apache.thrift.protocol.TType.STRING, (short) 11);
private static final org.apache.thrift.protocol.TField RENAME_TO_DB_FIELD_DESC =
new org.apache.thrift.protocol.TField("renameToDb", org.apache.thrift.protocol.TType.STRING,
(short) 12);
private static final org.apache.thrift.protocol.TField RENAME_TO_TABLE_FIELD_DESC =
new org.apache.thrift.protocol.TField("renameToTable",
org.apache.thrift.protocol.TType.STRING, (short) 13);
private static final org.apache.thrift.protocol.TField RENAME_TO_PATH_FIELD_DESC =
new org.apache.thrift.protocol.TField("renameToPath", org.apache.thrift.protocol.TType.STRING,
(short) 14);
private static final org.apache.thrift.protocol.TField EXTRAS_FIELD_DESC =
new org.apache.thrift.protocol.TField("extras", org.apache.thrift.protocol.TType.MAP,
(short) 15);
private static final org.apache.thrift.protocol.TField WAITING_ON_JOBS_FIELD_DESC =
new org.apache.thrift.protocol.TField("waitingOnJobs", org.apache.thrift.protocol.TType.LIST,
(short) 16);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes =
new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new TReplicationJobStandardSchemeFactory());
schemes.put(TupleScheme.class, new TReplicationJobTupleSchemeFactory());
}
public long id; // required
public long createTime; // required
public long updateTime; // required
/**
*
* @see TReplicationOperation
*/
public TReplicationOperation operation; // required
/**
*
* @see TReplicationStatus
*/
public TReplicationStatus status; // required
public String srcPath; // required
public String srcCluster; // required
public String srcDb; // required
public String srcTable; // required
public List<String> srcPartitions; // required
public String srcModifiedTime; // required
public String renameToDb; // required
public String renameToTable; // required
public String renameToPath; // required
public Map<String, String> extras; // required
public List<Long> waitingOnJobs; // required
/**
* The set of fields this struct contains, along with convenience methods for finding and
* manipulating them.
*/
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
ID((short) 1, "id"), CREATE_TIME((short) 2, "createTime"), UPDATE_TIME((short) 3, "updateTime"),
/**
*
* @see TReplicationOperation
*/
OPERATION((short) 4, "operation"),
/**
*
* @see TReplicationStatus
*/
STATUS((short) 5, "status"), SRC_PATH((short) 6, "srcPath"), SRC_CLUSTER((short) 7,
"srcCluster"), SRC_DB((short) 8, "srcDb"), SRC_TABLE((short) 9,
"srcTable"), SRC_PARTITIONS((short) 10, "srcPartitions"), SRC_MODIFIED_TIME((short) 11,
"srcModifiedTime"), RENAME_TO_DB((short) 12, "renameToDb"), RENAME_TO_TABLE(
(short) 13,
"renameToTable"), RENAME_TO_PATH((short) 14, "renameToPath"), EXTRAS((short) 15,
"extras"), WAITING_ON_JOBS((short) 16, "waitingOnJobs");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch (fieldId) {
case 1: // ID
return ID;
case 2: // CREATE_TIME
return CREATE_TIME;
case 3: // UPDATE_TIME
return UPDATE_TIME;
case 4: // OPERATION
return OPERATION;
case 5: // STATUS
return STATUS;
case 6: // SRC_PATH
return SRC_PATH;
case 7: // SRC_CLUSTER
return SRC_CLUSTER;
case 8: // SRC_DB
return SRC_DB;
case 9: // SRC_TABLE
return SRC_TABLE;
case 10: // SRC_PARTITIONS
return SRC_PARTITIONS;
case 11: // SRC_MODIFIED_TIME
return SRC_MODIFIED_TIME;
case 12: // RENAME_TO_DB
return RENAME_TO_DB;
case 13: // RENAME_TO_TABLE
return RENAME_TO_TABLE;
case 14: // RENAME_TO_PATH
return RENAME_TO_PATH;
case 15: // EXTRAS
return EXTRAS;
case 16: // WAITING_ON_JOBS
return WAITING_ON_JOBS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null)
throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
private static final int __ID_ISSET_ID = 0;
private static final int __CREATETIME_ISSET_ID = 1;
private static final int __UPDATETIME_ISSET_ID = 2;
private byte __isset_bitfield = 0;
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.ID, new org.apache.thrift.meta_data.FieldMetaData("id",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
tmpMap.put(_Fields.CREATE_TIME, new org.apache.thrift.meta_data.FieldMetaData("createTime",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
tmpMap.put(_Fields.UPDATE_TIME, new org.apache.thrift.meta_data.FieldMetaData("updateTime",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
tmpMap.put(_Fields.OPERATION,
new org.apache.thrift.meta_data.FieldMetaData("operation",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM,
TReplicationOperation.class)));
tmpMap.put(_Fields.STATUS,
new org.apache.thrift.meta_data.FieldMetaData("status",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM,
TReplicationStatus.class)));
tmpMap.put(_Fields.SRC_PATH,
new org.apache.thrift.meta_data.FieldMetaData("srcPath",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.SRC_CLUSTER,
new org.apache.thrift.meta_data.FieldMetaData("srcCluster",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.SRC_DB,
new org.apache.thrift.meta_data.FieldMetaData("srcDb",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.SRC_TABLE,
new org.apache.thrift.meta_data.FieldMetaData("srcTable",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.SRC_PARTITIONS,
new org.apache.thrift.meta_data.FieldMetaData("srcPartitions",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.STRING))));
tmpMap.put(_Fields.SRC_MODIFIED_TIME,
new org.apache.thrift.meta_data.FieldMetaData("srcModifiedTime",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.RENAME_TO_DB,
new org.apache.thrift.meta_data.FieldMetaData("renameToDb",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.RENAME_TO_TABLE,
new org.apache.thrift.meta_data.FieldMetaData("renameToTable",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.RENAME_TO_PATH,
new org.apache.thrift.meta_data.FieldMetaData("renameToPath",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.EXTRAS,
new org.apache.thrift.meta_data.FieldMetaData("extras",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.STRING),
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.STRING))));
tmpMap.put(_Fields.WAITING_ON_JOBS,
new org.apache.thrift.meta_data.FieldMetaData("waitingOnJobs",
org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.FieldValueMetaData(
org.apache.thrift.protocol.TType.I64))));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TReplicationJob.class,
metaDataMap);
}
public TReplicationJob() {}
public TReplicationJob(long id, long createTime, long updateTime, TReplicationOperation operation,
TReplicationStatus status, String srcPath, String srcCluster, String srcDb, String srcTable,
List<String> srcPartitions, String srcModifiedTime, String renameToDb, String renameToTable,
String renameToPath, Map<String, String> extras, List<Long> waitingOnJobs) {
this();
this.id = id;
setIdIsSet(true);
this.createTime = createTime;
setCreateTimeIsSet(true);
this.updateTime = updateTime;
setUpdateTimeIsSet(true);
this.operation = operation;
this.status = status;
this.srcPath = srcPath;
this.srcCluster = srcCluster;
this.srcDb = srcDb;
this.srcTable = srcTable;
this.srcPartitions = srcPartitions;
this.srcModifiedTime = srcModifiedTime;
this.renameToDb = renameToDb;
this.renameToTable = renameToTable;
this.renameToPath = renameToPath;
this.extras = extras;
this.waitingOnJobs = waitingOnJobs;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public TReplicationJob(TReplicationJob other) {
__isset_bitfield = other.__isset_bitfield;
this.id = other.id;
this.createTime = other.createTime;
this.updateTime = other.updateTime;
if (other.isSetOperation()) {
this.operation = other.operation;
}
if (other.isSetStatus()) {
this.status = other.status;
}
if (other.isSetSrcPath()) {
this.srcPath = other.srcPath;
}
if (other.isSetSrcCluster()) {
this.srcCluster = other.srcCluster;
}
if (other.isSetSrcDb()) {
this.srcDb = other.srcDb;
}
if (other.isSetSrcTable()) {
this.srcTable = other.srcTable;
}
if (other.isSetSrcPartitions()) {
List<String> __this__srcPartitions = new ArrayList<String>(other.srcPartitions);
this.srcPartitions = __this__srcPartitions;
}
if (other.isSetSrcModifiedTime()) {
this.srcModifiedTime = other.srcModifiedTime;
}
if (other.isSetRenameToDb()) {
this.renameToDb = other.renameToDb;
}
if (other.isSetRenameToTable()) {
this.renameToTable = other.renameToTable;
}
if (other.isSetRenameToPath()) {
this.renameToPath = other.renameToPath;
}
if (other.isSetExtras()) {
Map<String, String> __this__extras = new HashMap<String, String>(other.extras);
this.extras = __this__extras;
}
if (other.isSetWaitingOnJobs()) {
List<Long> __this__waitingOnJobs = new ArrayList<Long>(other.waitingOnJobs);
this.waitingOnJobs = __this__waitingOnJobs;
}
}
public TReplicationJob deepCopy() {
return new TReplicationJob(this);
}
@Override
public void clear() {
setIdIsSet(false);
this.id = 0;
setCreateTimeIsSet(false);
this.createTime = 0;
setUpdateTimeIsSet(false);
this.updateTime = 0;
this.operation = null;
this.status = null;
this.srcPath = null;
this.srcCluster = null;
this.srcDb = null;
this.srcTable = null;
this.srcPartitions = null;
this.srcModifiedTime = null;
this.renameToDb = null;
this.renameToTable = null;
this.renameToPath = null;
this.extras = null;
this.waitingOnJobs = null;
}
public long getId() {
return this.id;
}
public TReplicationJob setId(long id) {
this.id = id;
setIdIsSet(true);
return this;
}
public void unsetId() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ID_ISSET_ID);
}
/** Returns true if field id is set (has been assigned a value) and false otherwise */
public boolean isSetId() {
return EncodingUtils.testBit(__isset_bitfield, __ID_ISSET_ID);
}
public void setIdIsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ID_ISSET_ID, value);
}
public long getCreateTime() {
return this.createTime;
}
public TReplicationJob setCreateTime(long createTime) {
this.createTime = createTime;
setCreateTimeIsSet(true);
return this;
}
public void unsetCreateTime() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __CREATETIME_ISSET_ID);
}
/** Returns true if field createTime is set (has been assigned a value) and false otherwise */
public boolean isSetCreateTime() {
return EncodingUtils.testBit(__isset_bitfield, __CREATETIME_ISSET_ID);
}
public void setCreateTimeIsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __CREATETIME_ISSET_ID, value);
}
public long getUpdateTime() {
return this.updateTime;
}
public TReplicationJob setUpdateTime(long updateTime) {
this.updateTime = updateTime;
setUpdateTimeIsSet(true);
return this;
}
public void unsetUpdateTime() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __UPDATETIME_ISSET_ID);
}
/** Returns true if field updateTime is set (has been assigned a value) and false otherwise */
public boolean isSetUpdateTime() {
return EncodingUtils.testBit(__isset_bitfield, __UPDATETIME_ISSET_ID);
}
public void setUpdateTimeIsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __UPDATETIME_ISSET_ID, value);
}
/**
*
* @see TReplicationOperation
*/
public TReplicationOperation getOperation() {
return this.operation;
}
/**
*
* @see TReplicationOperation
*/
public TReplicationJob setOperation(TReplicationOperation operation) {
this.operation = operation;
return this;
}
public void unsetOperation() {
this.operation = null;
}
/** Returns true if field operation is set (has been assigned a value) and false otherwise */
public boolean isSetOperation() {
return this.operation != null;
}
public void setOperationIsSet(boolean value) {
if (!value) {
this.operation = null;
}
}
/**
*
* @see TReplicationStatus
*/
public TReplicationStatus getStatus() {
return this.status;
}
/**
*
* @see TReplicationStatus
*/
public TReplicationJob setStatus(TReplicationStatus status) {
this.status = status;
return this;
}
public void unsetStatus() {
this.status = null;
}
/** Returns true if field status is set (has been assigned a value) and false otherwise */
public boolean isSetStatus() {
return this.status != null;
}
public void setStatusIsSet(boolean value) {
if (!value) {
this.status = null;
}
}
public String getSrcPath() {
return this.srcPath;
}
public TReplicationJob setSrcPath(String srcPath) {
this.srcPath = srcPath;
return this;
}
public void unsetSrcPath() {
this.srcPath = null;
}
/** Returns true if field srcPath is set (has been assigned a value) and false otherwise */
public boolean isSetSrcPath() {
return this.srcPath != null;
}
public void setSrcPathIsSet(boolean value) {
if (!value) {
this.srcPath = null;
}
}
public String getSrcCluster() {
return this.srcCluster;
}
public TReplicationJob setSrcCluster(String srcCluster) {
this.srcCluster = srcCluster;
return this;
}
public void unsetSrcCluster() {
this.srcCluster = null;
}
/** Returns true if field srcCluster is set (has been assigned a value) and false otherwise */
public boolean isSetSrcCluster() {
return this.srcCluster != null;
}
public void setSrcClusterIsSet(boolean value) {
if (!value) {
this.srcCluster = null;
}
}
public String getSrcDb() {
return this.srcDb;
}
public TReplicationJob setSrcDb(String srcDb) {
this.srcDb = srcDb;
return this;
}
public void unsetSrcDb() {
this.srcDb = null;
}
/** Returns true if field srcDb is set (has been assigned a value) and false otherwise */
public boolean isSetSrcDb() {
return this.srcDb != null;
}
public void setSrcDbIsSet(boolean value) {
if (!value) {
this.srcDb = null;
}
}
public String getSrcTable() {
return this.srcTable;
}
public TReplicationJob setSrcTable(String srcTable) {
this.srcTable = srcTable;
return this;
}
public void unsetSrcTable() {
this.srcTable = null;
}
/** Returns true if field srcTable is set (has been assigned a value) and false otherwise */
public boolean isSetSrcTable() {
return this.srcTable != null;
}
public void setSrcTableIsSet(boolean value) {
if (!value) {
this.srcTable = null;
}
}
public int getSrcPartitionsSize() {
return (this.srcPartitions == null) ? 0 : this.srcPartitions.size();
}
public java.util.Iterator<String> getSrcPartitionsIterator() {
return (this.srcPartitions == null) ? null : this.srcPartitions.iterator();
}
public void addToSrcPartitions(String elem) {
if (this.srcPartitions == null) {
this.srcPartitions = new ArrayList<String>();
}
this.srcPartitions.add(elem);
}
public List<String> getSrcPartitions() {
return this.srcPartitions;
}
public TReplicationJob setSrcPartitions(List<String> srcPartitions) {
this.srcPartitions = srcPartitions;
return this;
}
public void unsetSrcPartitions() {
this.srcPartitions = null;
}
/** Returns true if field srcPartitions is set (has been assigned a value) and false otherwise */
public boolean isSetSrcPartitions() {
return this.srcPartitions != null;
}
public void setSrcPartitionsIsSet(boolean value) {
if (!value) {
this.srcPartitions = null;
}
}
public String getSrcModifiedTime() {
return this.srcModifiedTime;
}
public TReplicationJob setSrcModifiedTime(String srcModifiedTime) {
this.srcModifiedTime = srcModifiedTime;
return this;
}
public void unsetSrcModifiedTime() {
this.srcModifiedTime = null;
}
/**
* Returns true if field srcModifiedTime is set (has been assigned a value) and false otherwise
*/
public boolean isSetSrcModifiedTime() {
return this.srcModifiedTime != null;
}
public void setSrcModifiedTimeIsSet(boolean value) {
if (!value) {
this.srcModifiedTime = null;
}
}
public String getRenameToDb() {
return this.renameToDb;
}
public TReplicationJob setRenameToDb(String renameToDb) {
this.renameToDb = renameToDb;
return this;
}
public void unsetRenameToDb() {
this.renameToDb = null;
}
/** Returns true if field renameToDb is set (has been assigned a value) and false otherwise */
public boolean isSetRenameToDb() {
return this.renameToDb != null;
}
public void setRenameToDbIsSet(boolean value) {
if (!value) {
this.renameToDb = null;
}
}
public String getRenameToTable() {
return this.renameToTable;
}
public TReplicationJob setRenameToTable(String renameToTable) {
this.renameToTable = renameToTable;
return this;
}
public void unsetRenameToTable() {
this.renameToTable = null;
}
/** Returns true if field renameToTable is set (has been assigned a value) and false otherwise */
public boolean isSetRenameToTable() {
return this.renameToTable != null;
}
public void setRenameToTableIsSet(boolean value) {
if (!value) {
this.renameToTable = null;
}
}
public String getRenameToPath() {
return this.renameToPath;
}
public TReplicationJob setRenameToPath(String renameToPath) {
this.renameToPath = renameToPath;
return this;
}
public void unsetRenameToPath() {
this.renameToPath = null;
}
/** Returns true if field renameToPath is set (has been assigned a value) and false otherwise */
public boolean isSetRenameToPath() {
return this.renameToPath != null;
}
public void setRenameToPathIsSet(boolean value) {
if (!value) {
this.renameToPath = null;
}
}
public int getExtrasSize() {
return (this.extras == null) ? 0 : this.extras.size();
}
public void putToExtras(String key, String val) {
if (this.extras == null) {
this.extras = new HashMap<String, String>();
}
this.extras.put(key, val);
}
public Map<String, String> getExtras() {
return this.extras;
}
public TReplicationJob setExtras(Map<String, String> extras) {
this.extras = extras;
return this;
}
public void unsetExtras() {
this.extras = null;
}
/** Returns true if field extras is set (has been assigned a value) and false otherwise */
public boolean isSetExtras() {
return this.extras != null;
}
public void setExtrasIsSet(boolean value) {
if (!value) {
this.extras = null;
}
}
public int getWaitingOnJobsSize() {
return (this.waitingOnJobs == null) ? 0 : this.waitingOnJobs.size();
}
public java.util.Iterator<Long> getWaitingOnJobsIterator() {
return (this.waitingOnJobs == null) ? null : this.waitingOnJobs.iterator();
}
public void addToWaitingOnJobs(long elem) {
if (this.waitingOnJobs == null) {
this.waitingOnJobs = new ArrayList<Long>();
}
this.waitingOnJobs.add(elem);
}
public List<Long> getWaitingOnJobs() {
return this.waitingOnJobs;
}
public TReplicationJob setWaitingOnJobs(List<Long> waitingOnJobs) {
this.waitingOnJobs = waitingOnJobs;
return this;
}
public void unsetWaitingOnJobs() {
this.waitingOnJobs = null;
}
/** Returns true if field waitingOnJobs is set (has been assigned a value) and false otherwise */
public boolean isSetWaitingOnJobs() {
return this.waitingOnJobs != null;
}
public void setWaitingOnJobsIsSet(boolean value) {
if (!value) {
this.waitingOnJobs = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case ID:
if (value == null) {
unsetId();
} else {
setId((Long) value);
}
break;
case CREATE_TIME:
if (value == null) {
unsetCreateTime();
} else {
setCreateTime((Long) value);
}
break;
case UPDATE_TIME:
if (value == null) {
unsetUpdateTime();
} else {
setUpdateTime((Long) value);
}
break;
case OPERATION:
if (value == null) {
unsetOperation();
} else {
setOperation((TReplicationOperation) value);
}
break;
case STATUS:
if (value == null) {
unsetStatus();
} else {
setStatus((TReplicationStatus) value);
}
break;
case SRC_PATH:
if (value == null) {
unsetSrcPath();
} else {
setSrcPath((String) value);
}
break;
case SRC_CLUSTER:
if (value == null) {
unsetSrcCluster();
} else {
setSrcCluster((String) value);
}
break;
case SRC_DB:
if (value == null) {
unsetSrcDb();
} else {
setSrcDb((String) value);
}
break;
case SRC_TABLE:
if (value == null) {
unsetSrcTable();
} else {
setSrcTable((String) value);
}
break;
case SRC_PARTITIONS:
if (value == null) {
unsetSrcPartitions();
} else {
setSrcPartitions((List<String>) value);
}
break;
case SRC_MODIFIED_TIME:
if (value == null) {
unsetSrcModifiedTime();
} else {
setSrcModifiedTime((String) value);
}
break;
case RENAME_TO_DB:
if (value == null) {
unsetRenameToDb();
} else {
setRenameToDb((String) value);
}
break;
case RENAME_TO_TABLE:
if (value == null) {
unsetRenameToTable();
} else {
setRenameToTable((String) value);
}
break;
case RENAME_TO_PATH:
if (value == null) {
unsetRenameToPath();
} else {
setRenameToPath((String) value);
}
break;
case EXTRAS:
if (value == null) {
unsetExtras();
} else {
setExtras((Map<String, String>) value);
}
break;
case WAITING_ON_JOBS:
if (value == null) {
unsetWaitingOnJobs();
} else {
setWaitingOnJobs((List<Long>) value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case ID:
return Long.valueOf(getId());
case CREATE_TIME:
return Long.valueOf(getCreateTime());
case UPDATE_TIME:
return Long.valueOf(getUpdateTime());
case OPERATION:
return getOperation();
case STATUS:
return getStatus();
case SRC_PATH:
return getSrcPath();
case SRC_CLUSTER:
return getSrcCluster();
case SRC_DB:
return getSrcDb();
case SRC_TABLE:
return getSrcTable();
case SRC_PARTITIONS:
return getSrcPartitions();
case SRC_MODIFIED_TIME:
return getSrcModifiedTime();
case RENAME_TO_DB:
return getRenameToDb();
case RENAME_TO_TABLE:
return getRenameToTable();
case RENAME_TO_PATH:
return getRenameToPath();
case EXTRAS:
return getExtras();
case WAITING_ON_JOBS:
return getWaitingOnJobs();
}
throw new IllegalStateException();
}
/**
* Returns true if field corresponding to fieldID is set (has been assigned a value) and false
* otherwise
*/
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case ID:
return isSetId();
case CREATE_TIME:
return isSetCreateTime();
case UPDATE_TIME:
return isSetUpdateTime();
case OPERATION:
return isSetOperation();
case STATUS:
return isSetStatus();
case SRC_PATH:
return isSetSrcPath();
case SRC_CLUSTER:
return isSetSrcCluster();
case SRC_DB:
return isSetSrcDb();
case SRC_TABLE:
return isSetSrcTable();
case SRC_PARTITIONS:
return isSetSrcPartitions();
case SRC_MODIFIED_TIME:
return isSetSrcModifiedTime();
case RENAME_TO_DB:
return isSetRenameToDb();
case RENAME_TO_TABLE:
return isSetRenameToTable();
case RENAME_TO_PATH:
return isSetRenameToPath();
case EXTRAS:
return isSetExtras();
case WAITING_ON_JOBS:
return isSetWaitingOnJobs();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof TReplicationJob)
return this.equals((TReplicationJob) that);
return false;
}
public boolean equals(TReplicationJob that) {
if (that == null)
return false;
boolean this_present_id = true;
boolean that_present_id = true;
if (this_present_id || that_present_id) {
if (!(this_present_id && that_present_id))
return false;
if (this.id != that.id)
return false;
}
boolean this_present_createTime = true;
boolean that_present_createTime = true;
if (this_present_createTime || that_present_createTime) {
if (!(this_present_createTime && that_present_createTime))
return false;
if (this.createTime != that.createTime)
return false;
}
boolean this_present_updateTime = true;
boolean that_present_updateTime = true;
if (this_present_updateTime || that_present_updateTime) {
if (!(this_present_updateTime && that_present_updateTime))
return false;
if (this.updateTime != that.updateTime)
return false;
}
boolean this_present_operation = true && this.isSetOperation();
boolean that_present_operation = true && that.isSetOperation();
if (this_present_operation || that_present_operation) {
if (!(this_present_operation && that_present_operation))
return false;
if (!this.operation.equals(that.operation))
return false;
}
boolean this_present_status = true && this.isSetStatus();
boolean that_present_status = true && that.isSetStatus();
if (this_present_status || that_present_status) {
if (!(this_present_status && that_present_status))
return false;
if (!this.status.equals(that.status))
return false;
}
boolean this_present_srcPath = true && this.isSetSrcPath();
boolean that_present_srcPath = true && that.isSetSrcPath();
if (this_present_srcPath || that_present_srcPath) {
if (!(this_present_srcPath && that_present_srcPath))
return false;
if (!this.srcPath.equals(that.srcPath))
return false;
}
boolean this_present_srcCluster = true && this.isSetSrcCluster();
boolean that_present_srcCluster = true && that.isSetSrcCluster();
if (this_present_srcCluster || that_present_srcCluster) {
if (!(this_present_srcCluster && that_present_srcCluster))
return false;
if (!this.srcCluster.equals(that.srcCluster))
return false;
}
boolean this_present_srcDb = true && this.isSetSrcDb();
boolean that_present_srcDb = true && that.isSetSrcDb();
if (this_present_srcDb || that_present_srcDb) {
if (!(this_present_srcDb && that_present_srcDb))
return false;
if (!this.srcDb.equals(that.srcDb))
return false;
}
boolean this_present_srcTable = true && this.isSetSrcTable();
boolean that_present_srcTable = true && that.isSetSrcTable();
if (this_present_srcTable || that_present_srcTable) {
if (!(this_present_srcTable && that_present_srcTable))
return false;
if (!this.srcTable.equals(that.srcTable))
return false;
}
boolean this_present_srcPartitions = true && this.isSetSrcPartitions();
boolean that_present_srcPartitions = true && that.isSetSrcPartitions();
if (this_present_srcPartitions || that_present_srcPartitions) {
if (!(this_present_srcPartitions && that_present_srcPartitions))
return false;
if (!this.srcPartitions.equals(that.srcPartitions))
return false;
}
boolean this_present_srcModifiedTime = true && this.isSetSrcModifiedTime();
boolean that_present_srcModifiedTime = true && that.isSetSrcModifiedTime();
if (this_present_srcModifiedTime || that_present_srcModifiedTime) {
if (!(this_present_srcModifiedTime && that_present_srcModifiedTime))
return false;
if (!this.srcModifiedTime.equals(that.srcModifiedTime))
return false;
}
boolean this_present_renameToDb = true && this.isSetRenameToDb();
boolean that_present_renameToDb = true && that.isSetRenameToDb();
if (this_present_renameToDb || that_present_renameToDb) {
if (!(this_present_renameToDb && that_present_renameToDb))
return false;
if (!this.renameToDb.equals(that.renameToDb))
return false;
}
boolean this_present_renameToTable = true && this.isSetRenameToTable();
boolean that_present_renameToTable = true && that.isSetRenameToTable();
if (this_present_renameToTable || that_present_renameToTable) {
if (!(this_present_renameToTable && that_present_renameToTable))
return false;
if (!this.renameToTable.equals(that.renameToTable))
return false;
}
boolean this_present_renameToPath = true && this.isSetRenameToPath();
boolean that_present_renameToPath = true && that.isSetRenameToPath();
if (this_present_renameToPath || that_present_renameToPath) {
if (!(this_present_renameToPath && that_present_renameToPath))
return false;
if (!this.renameToPath.equals(that.renameToPath))
return false;
}
boolean this_present_extras = true && this.isSetExtras();
boolean that_present_extras = true && that.isSetExtras();
if (this_present_extras || that_present_extras) {
if (!(this_present_extras && that_present_extras))
return false;
if (!this.extras.equals(that.extras))
return false;
}
boolean this_present_waitingOnJobs = true && this.isSetWaitingOnJobs();
boolean that_present_waitingOnJobs = true && that.isSetWaitingOnJobs();
if (this_present_waitingOnJobs || that_present_waitingOnJobs) {
if (!(this_present_waitingOnJobs && that_present_waitingOnJobs))
return false;
if (!this.waitingOnJobs.equals(that.waitingOnJobs))
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(TReplicationJob other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetId()).compareTo(other.isSetId());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetId()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.id, other.id);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetCreateTime()).compareTo(other.isSetCreateTime());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetCreateTime()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.createTime, other.createTime);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetUpdateTime()).compareTo(other.isSetUpdateTime());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetUpdateTime()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.updateTime, other.updateTime);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetOperation()).compareTo(other.isSetOperation());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetOperation()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operation, other.operation);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetStatus()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetSrcPath()).compareTo(other.isSetSrcPath());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSrcPath()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.srcPath, other.srcPath);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetSrcCluster()).compareTo(other.isSetSrcCluster());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSrcCluster()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.srcCluster, other.srcCluster);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetSrcDb()).compareTo(other.isSetSrcDb());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSrcDb()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.srcDb, other.srcDb);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetSrcTable()).compareTo(other.isSetSrcTable());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSrcTable()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.srcTable, other.srcTable);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetSrcPartitions()).compareTo(other.isSetSrcPartitions());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSrcPartitions()) {
lastComparison =
org.apache.thrift.TBaseHelper.compareTo(this.srcPartitions, other.srcPartitions);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison =
Boolean.valueOf(isSetSrcModifiedTime()).compareTo(other.isSetSrcModifiedTime());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSrcModifiedTime()) {
lastComparison =
org.apache.thrift.TBaseHelper.compareTo(this.srcModifiedTime, other.srcModifiedTime);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetRenameToDb()).compareTo(other.isSetRenameToDb());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetRenameToDb()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.renameToDb, other.renameToDb);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetRenameToTable()).compareTo(other.isSetRenameToTable());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetRenameToTable()) {
lastComparison =
org.apache.thrift.TBaseHelper.compareTo(this.renameToTable, other.renameToTable);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetRenameToPath()).compareTo(other.isSetRenameToPath());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetRenameToPath()) {
lastComparison =
org.apache.thrift.TBaseHelper.compareTo(this.renameToPath, other.renameToPath);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetExtras()).compareTo(other.isSetExtras());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetExtras()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.extras, other.extras);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetWaitingOnJobs()).compareTo(other.isSetWaitingOnJobs());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetWaitingOnJobs()) {
lastComparison =
org.apache.thrift.TBaseHelper.compareTo(this.waitingOnJobs, other.waitingOnJobs);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot)
throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("TReplicationJob(");
boolean first = true;
sb.append("id:");
sb.append(this.id);
first = false;
if (!first)
sb.append(", ");
sb.append("createTime:");
sb.append(this.createTime);
first = false;
if (!first)
sb.append(", ");
sb.append("updateTime:");
sb.append(this.updateTime);
first = false;
if (!first)
sb.append(", ");
sb.append("operation:");
if (this.operation == null) {
sb.append("null");
} else {
sb.append(this.operation);
}
first = false;
if (!first)
sb.append(", ");
sb.append("status:");
if (this.status == null) {
sb.append("null");
} else {
sb.append(this.status);
}
first = false;
if (!first)
sb.append(", ");
sb.append("srcPath:");
if (this.srcPath == null) {
sb.append("null");
} else {
sb.append(this.srcPath);
}
first = false;
if (!first)
sb.append(", ");
sb.append("srcCluster:");
if (this.srcCluster == null) {
sb.append("null");
} else {
sb.append(this.srcCluster);
}
first = false;
if (!first)
sb.append(", ");
sb.append("srcDb:");
if (this.srcDb == null) {
sb.append("null");
} else {
sb.append(this.srcDb);
}
first = false;
if (!first)
sb.append(", ");
sb.append("srcTable:");
if (this.srcTable == null) {
sb.append("null");
} else {
sb.append(this.srcTable);
}
first = false;
if (!first)
sb.append(", ");
sb.append("srcPartitions:");
if (this.srcPartitions == null) {
sb.append("null");
} else {
sb.append(this.srcPartitions);
}
first = false;
if (!first)
sb.append(", ");
sb.append("srcModifiedTime:");
if (this.srcModifiedTime == null) {
sb.append("null");
} else {
sb.append(this.srcModifiedTime);
}
first = false;
if (!first)
sb.append(", ");
sb.append("renameToDb:");
if (this.renameToDb == null) {
sb.append("null");
} else {
sb.append(this.renameToDb);
}
first = false;
if (!first)
sb.append(", ");
sb.append("renameToTable:");
if (this.renameToTable == null) {
sb.append("null");
} else {
sb.append(this.renameToTable);
}
first = false;
if (!first)
sb.append(", ");
sb.append("renameToPath:");
if (this.renameToPath == null) {
sb.append("null");
} else {
sb.append(this.renameToPath);
}
first = false;
if (!first)
sb.append(", ");
sb.append("extras:");
if (this.extras == null) {
sb.append("null");
} else {
sb.append(this.extras);
}
first = false;
if (!first)
sb.append(", ");
sb.append("waitingOnJobs:");
if (this.waitingOnJobs == null) {
sb.append("null");
} else {
sb.append(this.waitingOnJobs);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in)
throws java.io.IOException, ClassNotFoundException {
try {
// it doesn't seem like you should have to do this, but java serialization is wacky, and
// doesn't call the default constructor.
__isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(
new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class TReplicationJobStandardSchemeFactory implements SchemeFactory {
public TReplicationJobStandardScheme getScheme() {
return new TReplicationJobStandardScheme();
}
}
private static class TReplicationJobStandardScheme extends StandardScheme<TReplicationJob> {
public void read(org.apache.thrift.protocol.TProtocol iprot, TReplicationJob struct)
throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true) {
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 1: // ID
if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
struct.id = iprot.readI64();
struct.setIdIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 2: // CREATE_TIME
if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
struct.createTime = iprot.readI64();
struct.setCreateTimeIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 3: // UPDATE_TIME
if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
struct.updateTime = iprot.readI64();
struct.setUpdateTimeIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 4: // OPERATION
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.operation = TReplicationOperation.findByValue(iprot.readI32());
struct.setOperationIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 5: // STATUS
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.status = TReplicationStatus.findByValue(iprot.readI32());
struct.setStatusIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 6: // SRC_PATH
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.srcPath = iprot.readString();
struct.setSrcPathIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 7: // SRC_CLUSTER
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.srcCluster = iprot.readString();
struct.setSrcClusterIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 8: // SRC_DB
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.srcDb = iprot.readString();
struct.setSrcDbIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 9: // SRC_TABLE
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.srcTable = iprot.readString();
struct.setSrcTableIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 10: // SRC_PARTITIONS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
org.apache.thrift.protocol.TList _list0 = iprot.readListBegin();
struct.srcPartitions = new ArrayList<String>(_list0.size);
for (int _i1 = 0; _i1 < _list0.size; ++_i1) {
String _elem2;
_elem2 = iprot.readString();
struct.srcPartitions.add(_elem2);
}
iprot.readListEnd();
}
struct.setSrcPartitionsIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 11: // SRC_MODIFIED_TIME
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.srcModifiedTime = iprot.readString();
struct.setSrcModifiedTimeIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 12: // RENAME_TO_DB
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.renameToDb = iprot.readString();
struct.setRenameToDbIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 13: // RENAME_TO_TABLE
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.renameToTable = iprot.readString();
struct.setRenameToTableIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 14: // RENAME_TO_PATH
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.renameToPath = iprot.readString();
struct.setRenameToPathIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 15: // EXTRAS
if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
{
org.apache.thrift.protocol.TMap _map3 = iprot.readMapBegin();
struct.extras = new HashMap<String, String>(2 * _map3.size);
for (int _i4 = 0; _i4 < _map3.size; ++_i4) {
String _key5;
String _val6;
_key5 = iprot.readString();
_val6 = iprot.readString();
struct.extras.put(_key5, _val6);
}
iprot.readMapEnd();
}
struct.setExtrasIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 16: // WAITING_ON_JOBS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
org.apache.thrift.protocol.TList _list7 = iprot.readListBegin();
struct.waitingOnJobs = new ArrayList<Long>(_list7.size);
for (int _i8 = 0; _i8 < _list7.size; ++_i8) {
long _elem9;
_elem9 = iprot.readI64();
struct.waitingOnJobs.add(_elem9);
}
iprot.readListEnd();
}
struct.setWaitingOnJobsIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, TReplicationJob struct)
throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldBegin(ID_FIELD_DESC);
oprot.writeI64(struct.id);
oprot.writeFieldEnd();
oprot.writeFieldBegin(CREATE_TIME_FIELD_DESC);
oprot.writeI64(struct.createTime);
oprot.writeFieldEnd();
oprot.writeFieldBegin(UPDATE_TIME_FIELD_DESC);
oprot.writeI64(struct.updateTime);
oprot.writeFieldEnd();
if (struct.operation != null) {
oprot.writeFieldBegin(OPERATION_FIELD_DESC);
oprot.writeI32(struct.operation.getValue());
oprot.writeFieldEnd();
}
if (struct.status != null) {
oprot.writeFieldBegin(STATUS_FIELD_DESC);
oprot.writeI32(struct.status.getValue());
oprot.writeFieldEnd();
}
if (struct.srcPath != null) {
oprot.writeFieldBegin(SRC_PATH_FIELD_DESC);
oprot.writeString(struct.srcPath);
oprot.writeFieldEnd();
}
if (struct.srcCluster != null) {
oprot.writeFieldBegin(SRC_CLUSTER_FIELD_DESC);
oprot.writeString(struct.srcCluster);
oprot.writeFieldEnd();
}
if (struct.srcDb != null) {
oprot.writeFieldBegin(SRC_DB_FIELD_DESC);
oprot.writeString(struct.srcDb);
oprot.writeFieldEnd();
}
if (struct.srcTable != null) {
oprot.writeFieldBegin(SRC_TABLE_FIELD_DESC);
oprot.writeString(struct.srcTable);
oprot.writeFieldEnd();
}
if (struct.srcPartitions != null) {
oprot.writeFieldBegin(SRC_PARTITIONS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(
org.apache.thrift.protocol.TType.STRING, struct.srcPartitions.size()));
for (String _iter10 : struct.srcPartitions) {
oprot.writeString(_iter10);
}
oprot.writeListEnd();
}
oprot.writeFieldEnd();
}
if (struct.srcModifiedTime != null) {
oprot.writeFieldBegin(SRC_MODIFIED_TIME_FIELD_DESC);
oprot.writeString(struct.srcModifiedTime);
oprot.writeFieldEnd();
}
if (struct.renameToDb != null) {
oprot.writeFieldBegin(RENAME_TO_DB_FIELD_DESC);
oprot.writeString(struct.renameToDb);
oprot.writeFieldEnd();
}
if (struct.renameToTable != null) {
oprot.writeFieldBegin(RENAME_TO_TABLE_FIELD_DESC);
oprot.writeString(struct.renameToTable);
oprot.writeFieldEnd();
}
if (struct.renameToPath != null) {
oprot.writeFieldBegin(RENAME_TO_PATH_FIELD_DESC);
oprot.writeString(struct.renameToPath);
oprot.writeFieldEnd();
}
if (struct.extras != null) {
oprot.writeFieldBegin(EXTRAS_FIELD_DESC);
{
oprot.writeMapBegin(
new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
org.apache.thrift.protocol.TType.STRING, struct.extras.size()));
for (Map.Entry<String, String> _iter11 : struct.extras.entrySet()) {
oprot.writeString(_iter11.getKey());
oprot.writeString(_iter11.getValue());
}
oprot.writeMapEnd();
}
oprot.writeFieldEnd();
}
if (struct.waitingOnJobs != null) {
oprot.writeFieldBegin(WAITING_ON_JOBS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(
org.apache.thrift.protocol.TType.I64, struct.waitingOnJobs.size()));
for (long _iter12 : struct.waitingOnJobs) {
oprot.writeI64(_iter12);
}
oprot.writeListEnd();
}
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class TReplicationJobTupleSchemeFactory implements SchemeFactory {
public TReplicationJobTupleScheme getScheme() {
return new TReplicationJobTupleScheme();
}
}
private static class TReplicationJobTupleScheme extends TupleScheme<TReplicationJob> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, TReplicationJob struct)
throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetId()) {
optionals.set(0);
}
if (struct.isSetCreateTime()) {
optionals.set(1);
}
if (struct.isSetUpdateTime()) {
optionals.set(2);
}
if (struct.isSetOperation()) {
optionals.set(3);
}
if (struct.isSetStatus()) {
optionals.set(4);
}
if (struct.isSetSrcPath()) {
optionals.set(5);
}
if (struct.isSetSrcCluster()) {
optionals.set(6);
}
if (struct.isSetSrcDb()) {
optionals.set(7);
}
if (struct.isSetSrcTable()) {
optionals.set(8);
}
if (struct.isSetSrcPartitions()) {
optionals.set(9);
}
if (struct.isSetSrcModifiedTime()) {
optionals.set(10);
}
if (struct.isSetRenameToDb()) {
optionals.set(11);
}
if (struct.isSetRenameToTable()) {
optionals.set(12);
}
if (struct.isSetRenameToPath()) {
optionals.set(13);
}
if (struct.isSetExtras()) {
optionals.set(14);
}
if (struct.isSetWaitingOnJobs()) {
optionals.set(15);
}
oprot.writeBitSet(optionals, 16);
if (struct.isSetId()) {
oprot.writeI64(struct.id);
}
if (struct.isSetCreateTime()) {
oprot.writeI64(struct.createTime);
}
if (struct.isSetUpdateTime()) {
oprot.writeI64(struct.updateTime);
}
if (struct.isSetOperation()) {
oprot.writeI32(struct.operation.getValue());
}
if (struct.isSetStatus()) {
oprot.writeI32(struct.status.getValue());
}
if (struct.isSetSrcPath()) {
oprot.writeString(struct.srcPath);
}
if (struct.isSetSrcCluster()) {
oprot.writeString(struct.srcCluster);
}
if (struct.isSetSrcDb()) {
oprot.writeString(struct.srcDb);
}
if (struct.isSetSrcTable()) {
oprot.writeString(struct.srcTable);
}
if (struct.isSetSrcPartitions()) {
{
oprot.writeI32(struct.srcPartitions.size());
for (String _iter13 : struct.srcPartitions) {
oprot.writeString(_iter13);
}
}
}
if (struct.isSetSrcModifiedTime()) {
oprot.writeString(struct.srcModifiedTime);
}
if (struct.isSetRenameToDb()) {
oprot.writeString(struct.renameToDb);
}
if (struct.isSetRenameToTable()) {
oprot.writeString(struct.renameToTable);
}
if (struct.isSetRenameToPath()) {
oprot.writeString(struct.renameToPath);
}
if (struct.isSetExtras()) {
{
oprot.writeI32(struct.extras.size());
for (Map.Entry<String, String> _iter14 : struct.extras.entrySet()) {
oprot.writeString(_iter14.getKey());
oprot.writeString(_iter14.getValue());
}
}
}
if (struct.isSetWaitingOnJobs()) {
{
oprot.writeI32(struct.waitingOnJobs.size());
for (long _iter15 : struct.waitingOnJobs) {
oprot.writeI64(_iter15);
}
}
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, TReplicationJob struct)
throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(16);
if (incoming.get(0)) {
struct.id = iprot.readI64();
struct.setIdIsSet(true);
}
if (incoming.get(1)) {
struct.createTime = iprot.readI64();
struct.setCreateTimeIsSet(true);
}
if (incoming.get(2)) {
struct.updateTime = iprot.readI64();
struct.setUpdateTimeIsSet(true);
}
if (incoming.get(3)) {
struct.operation = TReplicationOperation.findByValue(iprot.readI32());
struct.setOperationIsSet(true);
}
if (incoming.get(4)) {
struct.status = TReplicationStatus.findByValue(iprot.readI32());
struct.setStatusIsSet(true);
}
if (incoming.get(5)) {
struct.srcPath = iprot.readString();
struct.setSrcPathIsSet(true);
}
if (incoming.get(6)) {
struct.srcCluster = iprot.readString();
struct.setSrcClusterIsSet(true);
}
if (incoming.get(7)) {
struct.srcDb = iprot.readString();
struct.setSrcDbIsSet(true);
}
if (incoming.get(8)) {
struct.srcTable = iprot.readString();
struct.setSrcTableIsSet(true);
}
if (incoming.get(9)) {
{
org.apache.thrift.protocol.TList _list16 = new org.apache.thrift.protocol.TList(
org.apache.thrift.protocol.TType.STRING, iprot.readI32());
struct.srcPartitions = new ArrayList<String>(_list16.size);
for (int _i17 = 0; _i17 < _list16.size; ++_i17) {
String _elem18;
_elem18 = iprot.readString();
struct.srcPartitions.add(_elem18);
}
}
struct.setSrcPartitionsIsSet(true);
}
if (incoming.get(10)) {
struct.srcModifiedTime = iprot.readString();
struct.setSrcModifiedTimeIsSet(true);
}
if (incoming.get(11)) {
struct.renameToDb = iprot.readString();
struct.setRenameToDbIsSet(true);
}
if (incoming.get(12)) {
struct.renameToTable = iprot.readString();
struct.setRenameToTableIsSet(true);
}
if (incoming.get(13)) {
struct.renameToPath = iprot.readString();
struct.setRenameToPathIsSet(true);
}
if (incoming.get(14)) {
{
org.apache.thrift.protocol.TMap _map19 =
new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
org.apache.thrift.protocol.TType.STRING, iprot.readI32());
struct.extras = new HashMap<String, String>(2 * _map19.size);
for (int _i20 = 0; _i20 < _map19.size; ++_i20) {
String _key21;
String _val22;
_key21 = iprot.readString();
_val22 = iprot.readString();
struct.extras.put(_key21, _val22);
}
}
struct.setExtrasIsSet(true);
}
if (incoming.get(15)) {
{
org.apache.thrift.protocol.TList _list23 = new org.apache.thrift.protocol.TList(
org.apache.thrift.protocol.TType.I64, iprot.readI32());
struct.waitingOnJobs = new ArrayList<Long>(_list23.size);
for (int _i24 = 0; _i24 < _list23.size; ++_i24) {
long _elem25;
_elem25 = iprot.readI64();
struct.waitingOnJobs.add(_elem25);
}
}
struct.setWaitingOnJobsIsSet(true);
}
}
}
}
| 9,435 |
0 | Create_ds/reair/hive-hooks/src/test/java/com/airbnb | Create_ds/reair/hive-hooks/src/test/java/com/airbnb/hive/CliAuditLogHookTest.java | package com.airbnb.hive;
import static org.junit.Assert.assertEquals;
import com.google.common.collect.Lists;
import com.airbnb.reair.db.DbConnectionFactory;
import com.airbnb.reair.db.EmbeddedMySqlDb;
import com.airbnb.reair.db.StaticDbConnectionFactory;
import com.airbnb.reair.db.TestDbCredentials;
import com.airbnb.reair.hive.hooks.AuditLogHookUtils;
import com.airbnb.reair.hive.hooks.CliAuditLogHook;
import com.airbnb.reair.hive.hooks.HiveOperation;
import com.airbnb.reair.utils.ReplicationTestUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.ql.MapRedStats;
import org.apache.hadoop.mapred.Counters;
import org.junit.BeforeClass;
import org.junit.Test;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class CliAuditLogHookTest {
private static final Log LOG = LogFactory.getLog(
CliAuditLogHookTest.class);
protected static EmbeddedMySqlDb embeddedMySqlDb;
protected static final String DB_NAME = "audit_log_db";
protected static final String AUDIT_LOG_TABLE_NAME = "audit_log";
protected static final String OUTPUT_OBJECTS_TABLE_NAME = "audit_objects";
protected static final String MAP_RED_STATS_TABLE_NAME = "mapred_stats";
protected static final String DEFAULT_QUERY_STRING = "Example query string";
@BeforeClass
public static void setupClass() {
embeddedMySqlDb = new EmbeddedMySqlDb();
embeddedMySqlDb.startDb();
}
/**
* Generates a database connection factory for use in testing.
*
* @return the database connection factory
*
* @throws SQLException if there's an error insert into the DB
*/
public static DbConnectionFactory getDbConnectionFactory()
throws SQLException {
TestDbCredentials testDbCredentials = new TestDbCredentials();
return new StaticDbConnectionFactory(
ReplicationTestUtils.getJdbcUrl(embeddedMySqlDb),
testDbCredentials.getReadWriteUsername(),
testDbCredentials.getReadWritePassword());
}
/**
* Resets the testing database.
*
* @throws SQLException if there's an error inserting to the DB
*/
public static void resetState() throws SQLException {
DbConnectionFactory dbConnectionFactory = getDbConnectionFactory();
ReplicationTestUtils.dropDatabase(dbConnectionFactory, DB_NAME);
AuditLogHookUtils.setupAuditLogTables(
dbConnectionFactory,
DB_NAME,
AUDIT_LOG_TABLE_NAME,
OUTPUT_OBJECTS_TABLE_NAME,
MAP_RED_STATS_TABLE_NAME);
}
@Test
public void testAuditLogTable() throws Exception {
// Setup the audit log DB
resetState();
TestDbCredentials testDbCredentials = new TestDbCredentials();
final DbConnectionFactory dbConnectionFactory = getDbConnectionFactory();
final CliAuditLogHook cliAuditLogHook =
new CliAuditLogHook(testDbCredentials);
// Set up the source
org.apache.hadoop.hive.ql.metadata.Table inputTable =
new org.apache.hadoop.hive.ql.metadata.Table(
"test_db",
"test_source_table");
List<org.apache.hadoop.hive.ql.metadata.Table> inputTables =
new ArrayList<>();
inputTables.add(inputTable);
org.apache.hadoop.hive.ql.metadata.Table outputTable =
new org.apache.hadoop.hive.ql.metadata.Table(
"test_db",
"test_output_table");
outputTable.setCreateTime(0);
outputTable.setOwner("table.owner");
List<org.apache.hadoop.hive.ql.metadata.Table> outputTables =
new ArrayList<>();
outputTables.add(outputTable);
Map<String, MapRedStats> mapRedStatsPerStage = new HashMap<>();
MapRedStats stageOneStats = new MapRedStats(2, 3, 2500, true, "fakeJobId");
Counters counters = new Counters();
counters.incrCounter("SomeCounterGroupName", "SomeCounterName", 3);
stageOneStats.setCounters(counters);
mapRedStatsPerStage.put("Stage-1", stageOneStats);
HiveConf hiveConf = AuditLogHookUtils.getHiveConf(
embeddedMySqlDb,
DB_NAME,
AUDIT_LOG_TABLE_NAME,
OUTPUT_OBJECTS_TABLE_NAME,
MAP_RED_STATS_TABLE_NAME);
AuditLogHookUtils.insertAuditLogEntry(
cliAuditLogHook,
HiveOperation.QUERY,
DEFAULT_QUERY_STRING,
inputTables,
new ArrayList<>(),
outputTables,
new ArrayList<>(),
mapRedStatsPerStage,
hiveConf);
// Check the query audit log
List<String> auditCoreLogColumnsToCheck = Lists.newArrayList(
"command_type", "command", "inputs", "outputs");
List<String> auditCoreLogRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
AUDIT_LOG_TABLE_NAME,
auditCoreLogColumnsToCheck,
null);
List<String> expectedDbRow = Lists.newArrayList(
"QUERY",
DEFAULT_QUERY_STRING,
"{\"tables\":[\"test_db.test_source_table\"]}",
"{\"tables\":[\"test_db.test_output_table\"]}");
assertEquals(expectedDbRow, auditCoreLogRow);
// Check the output objects audit log
List<String> outputObjectsColumnsToCheck = Lists.newArrayList(
"name", "type", "serialized_object");
List<String> outputObjectsRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
OUTPUT_OBJECTS_TABLE_NAME,
outputObjectsColumnsToCheck,
null);
expectedDbRow = Lists.newArrayList(
"test_db.test_output_table",
"TABLE",
"{\"1\":{\"str\":\"test_"
+ "output_table\"},\"2\":{\"str\":\"test_db\"},"
+ "\"3\":{\"str\":\"table.owner\"},"
+ "\"4\":{\"i32\":0},\"5\":{\"i32\":0},\"6\":{\"i3"
+ "2\":0},\"7\":{\"rec\":{\"1\":{\"lst\":[\"rec\",0]}"
+ ",\"3\":{\"str\":\"org.apache.hadoop.mapred.Sequenc"
+ "eFileInputFormat\"},\"4\":{\"str\":\"org.apache.ha"
+ "doop.hive.ql.io.HiveSequenceFileOutputFormat\"},\""
+ "5\":{\"tf\":0},\"6\":{\"i32\":-1},\"7\":{\"rec\":{"
+ "\"2\":{\"str\":\"org.apache.hadoop.hive.serde2.Met"
+ "adataTypedColumnsetSerDe\"},\"3\":{\"map\":[\"str\""
+ ",\"str\",1,{\"serialization.format\":\"1\"}]}}},\""
+ "8\":{\"lst\":[\"str\",0]},\"9\":{\"lst\":[\"rec\","
+ "0]},\"10\":{\"map\":[\"str\",\"str\",0,{}]},\"11\""
+ ":{\"rec\":{\"1\":{\"lst\":[\"str\",0]},\"2\":{\"ls"
+ "t\":[\"lst\",0]},\"3\":{\"map\":[\"lst\",\"str\",0"
+ ",{}]}}}}},\"8\":{\"lst\":[\"rec\",0]},\"9\":{\"map"
+ "\":[\"str\",\"str\",0,{}]},\"12\":{\"str\":\"MANAG"
+ "ED_TABLE\"}}");
assertEquals(expectedDbRow, outputObjectsRow);
// Check the map reduce stats audit log
List<String> mapRedStatsColumnsToCheck = Lists.newArrayList(
"stage", "mappers", "reducers", "cpu_time", "counters");
List<String> mapRedStatsRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
MAP_RED_STATS_TABLE_NAME,
mapRedStatsColumnsToCheck,
null);
expectedDbRow = Lists.newArrayList(
"Stage-1",
"2",
"3",
"2500",
"[{\"groupName\":\"SomeCounterGroupName\",\"counters\":[{\"counterNam"
+ "e\":\"SomeCounterName\",\"value\":3}]}]");
assertEquals(expectedDbRow, mapRedStatsRow);
}
@Test
public void testAuditLogPartition() throws Exception {
// Setup the audit log DB
resetState();
final TestDbCredentials testDbCredentials = new TestDbCredentials();
final DbConnectionFactory dbConnectionFactory = getDbConnectionFactory();
final CliAuditLogHook cliAuditLogHook =
new CliAuditLogHook(testDbCredentials);
// Make a partitioned output table
org.apache.hadoop.hive.ql.metadata.Table qlTable =
new org.apache.hadoop.hive.ql.metadata.Table(
"test_db",
"test_output_table");
List<FieldSchema> partitionCols = new ArrayList<>();
partitionCols.add(new FieldSchema("ds", null, null));
qlTable.setPartCols(partitionCols);
qlTable.setDataLocation(new Path("file://a/b/c"));
qlTable.setCreateTime(0);
// Make the actual partition
Map<String, String> partitionKeyValue = new HashMap<>();
partitionKeyValue.put("ds", "1");
org.apache.hadoop.hive.ql.metadata.Partition outputPartition =
new org.apache.hadoop.hive.ql.metadata.Partition(qlTable,
partitionKeyValue, null);
outputPartition.setLocation("file://a/b/c");
List<org.apache.hadoop.hive.ql.metadata.Partition> outputPartitions =
new ArrayList<>();
outputPartitions.add(outputPartition);
HiveConf hiveConf = AuditLogHookUtils.getHiveConf(
embeddedMySqlDb,
DB_NAME,
AUDIT_LOG_TABLE_NAME,
OUTPUT_OBJECTS_TABLE_NAME,
MAP_RED_STATS_TABLE_NAME);
AuditLogHookUtils.insertAuditLogEntry(
cliAuditLogHook,
HiveOperation.QUERY,
DEFAULT_QUERY_STRING,
new ArrayList<>(),
new ArrayList<>(),
new ArrayList<>(),
outputPartitions,
new HashMap<>(),
hiveConf);
// Check the query audit log
List<String> auditCoreLogColumnsToCheck = Lists.newArrayList(
"command_type", "command", "inputs", "outputs");
List<String> auditCoreLogRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
AUDIT_LOG_TABLE_NAME,
auditCoreLogColumnsToCheck,
null);
List<String> expectedDbRow = Lists.newArrayList(
"QUERY",
DEFAULT_QUERY_STRING,
"{}",
"{\"partitions\":"
+ "[\"test_db.test_output_table/ds=1\"]}");
assertEquals(expectedDbRow, auditCoreLogRow);
// Check the output objects audit log
List<String> outputObjectsColumnsToCheck = Lists.newArrayList(
"name", "type", "serialized_object");
List<String> outputObjectsRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
OUTPUT_OBJECTS_TABLE_NAME,
outputObjectsColumnsToCheck,
"name = 'test_db.test_output_table/ds=1'");
expectedDbRow = Lists.newArrayList(
"test_db.test_output_table/ds=1",
"PARTITION",
"{\"1\":{\"lst\":[\"str\",1,\"1\"]},\"2\":{\"str"
+ "\":\"test_db\"},\"3\":{\"str\":\"test_output_table"
+ "\"},\"4\":{\"i32\":0},\"5\":{\"i32\":0},\"6\":{\"rec"
+ "\":{\"1\":{\"lst\":[\"rec\",0]},\"2\":{\"str\":\""
+ "file://a/b/c\"},\"3\":{\"str\":\"org.apache.hadoop."
+ "mapred.SequenceFileInputFormat\"},\"4\":{\"str\":\""
+ "org.apache.hadoop.hive.ql.io.HiveSequenceFileOutput"
+ "Format\"},\"5\":{\"tf\":0},\"6\":{\"i32\":-1},\"7\""
+ ":{\"rec\":{\"2\":{\"str\":\"org.apache.hadoop.hive."
+ "serde2.MetadataTypedColumnsetSerDe\"},\"3\":{\"map\""
+ ":[\"str\",\"str\",1,{\"serialization.format\":\"1\""
+ "}]}}},\"8\":{\"lst\":[\"str\",0]},\"9\":{\"lst\":[\""
+ "rec\",0]},\"10\":{\"map\":[\"str\",\"str\",0,{}]},\""
+ "11\":{\"rec\":{\"1\":{\"lst\":[\"str\",0]},\"2\":{\""
+ "lst\":[\"lst\",0]},\"3\":{\"map\":[\"lst\",\"str\",0"
+ ",{}]}}}}}}");
assertEquals(expectedDbRow, outputObjectsRow);
outputObjectsRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
OUTPUT_OBJECTS_TABLE_NAME,
outputObjectsColumnsToCheck,
"name = 'test_db.test_output_table'");
expectedDbRow = Lists.newArrayList(
"test_db.test_output_table",
"TABLE",
"{\"1\":{\"str\":\"test_output_table\"},\"2\":{\"str\":\""
+ "test_db\"},\"4\":{\"i32\":0},\"5\":{\"i3"
+ "2\":0},\"6\":{\"i32\":0},\"7\":{\"rec\":{\"1\":{\""
+ "lst\":[\"rec\",0]},\"2\":{\"str\":\"file://a/b/c\""
+ "},\"3\":{\"str\":\"org.apache.hadoop.mapred.Seque"
+ "nceFileInputFormat\"},\"4\":{\"str\":\"org.apache"
+ ".hadoop.hive.ql.io.HiveSequenceFileOutputFormat\""
+ "},\"5\":{\"tf\":0},\"6\":{\"i32\":-1},\"7\":{\"re"
+ "c\":{\"2\":{\"str\":\"org.apache.hadoop.hive.serd"
+ "e2.MetadataTypedColumnsetSerDe\"},\"3\":{\"map\":"
+ "[\"str\",\"str\",1,{\"serialization.format\":\"1\""
+ "}]}}},\"8\":{\"lst\":[\"str\",0]},\"9\":{\"lst\":["
+ "\"rec\",0]},\"10\":{\"map\":[\"str\",\"str\",0,{}]"
+ "},\"11\":{\"rec\":{\"1\":{\"lst\":[\"str\",0]},\"2"
+ "\":{\"lst\":[\"lst\",0]},\"3\":{\"map\":[\"lst\",\""
+ "str\",0,{}]}}}}},\"8\":{\"lst\":[\"rec\",1,{\"1\":"
+ "{\"str\":\"ds\"}}]},\"9\":{\"map\":[\"str\",\"str\""
+ ",0,{}]},\"12\":{\"str\":\"MANAGED_TABLE\"}}");
assertEquals(expectedDbRow, outputObjectsRow);
}
}
| 9,436 |
0 | Create_ds/reair/hive-hooks/src/test/java/com/airbnb | Create_ds/reair/hive-hooks/src/test/java/com/airbnb/hive/MetastoreAuditLogListenerTest.java | package com.airbnb.hive;
import static org.junit.Assert.assertEquals;
import com.google.common.collect.Lists;
import com.airbnb.reair.db.DbConnectionFactory;
import com.airbnb.reair.db.EmbeddedMySqlDb;
import com.airbnb.reair.db.StaticDbConnectionFactory;
import com.airbnb.reair.db.TestDbCredentials;
import com.airbnb.reair.hive.hooks.AuditCoreLogModule;
import com.airbnb.reair.hive.hooks.AuditLogHookUtils;
import com.airbnb.reair.hive.hooks.HiveOperation;
import com.airbnb.reair.hive.hooks.MetastoreAuditLogListener;
import com.airbnb.reair.hive.hooks.ObjectLogModule;
import com.airbnb.reair.utils.ReplicationTestUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStore;
import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
import org.apache.hadoop.hive.metastore.events.DropTableEvent;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.Mockito;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class MetastoreAuditLogListenerTest {
private static final Log LOG = LogFactory.getLog(
MetastoreAuditLogListenerTest.class
);
protected static EmbeddedMySqlDb embeddedMySqlDb;
protected static final String DB_NAME = "audit_log_db";
protected static final String AUDIT_LOG_TABLE_NAME = "audit_log";
protected static final String INPUT_OBJECTS_TABLE_NAME = "audit_objects";
protected static final String OUTPUT_OBJECTS_TABLE_NAME = "audit_objects";
protected static final String MAP_RED_STATS_TABLE_NAME = "mapred_stats";
@BeforeClass
public static void setupClass() {
embeddedMySqlDb = new EmbeddedMySqlDb();
embeddedMySqlDb.startDb();
}
/**
* Generates a database connection factory for use in testing.
*
* @return The database connection factory
* @throws SQLException If there's an error insert into the DB
*/
public static DbConnectionFactory getDbConnectionFactory()
throws SQLException {
TestDbCredentials testDbCredentials = new TestDbCredentials();
return new StaticDbConnectionFactory(
ReplicationTestUtils.getJdbcUrl(embeddedMySqlDb),
testDbCredentials.getReadWriteUsername(),
testDbCredentials.getReadWritePassword()
);
}
/**
* Resets the testing database.
*
* @throws SQLException If there's an error inserting to the DB
*/
public static void resetState() throws SQLException {
DbConnectionFactory dbConnectionFactory = getDbConnectionFactory();
ReplicationTestUtils.dropDatabase(dbConnectionFactory, DB_NAME);
AuditLogHookUtils.setupAuditLogTables(
dbConnectionFactory,
DB_NAME,
AUDIT_LOG_TABLE_NAME,
OUTPUT_OBJECTS_TABLE_NAME,
MAP_RED_STATS_TABLE_NAME
);
}
/**
* Generates a Hive configuration using the test database credentials.
*
* @return The Hive configuration
*/
public HiveConf getHiveConfig() {
final TestDbCredentials testDbCredentials = new TestDbCredentials();
HiveConf hiveConf = new HiveConf();
hiveConf.set(
MetastoreAuditLogListener.JDBC_URL_KEY,
ReplicationTestUtils.getJdbcUrl(embeddedMySqlDb, DB_NAME)
);
hiveConf.set(AuditCoreLogModule.TABLE_NAME_KEY, AUDIT_LOG_TABLE_NAME);
hiveConf.set(ObjectLogModule.TABLE_NAME_KEY, OUTPUT_OBJECTS_TABLE_NAME);
hiveConf.set(
MetastoreAuditLogListener.DB_USERNAME,
testDbCredentials.getReadWriteUsername()
);
hiveConf.set(
MetastoreAuditLogListener.DB_PASSWORD,
testDbCredentials.getReadWritePassword()
);
return hiveConf;
}
@Test
public void testCreateDatabase() throws Exception {
// Setup the audit log DB.
resetState();
final DbConnectionFactory dbConnectionFactory = getDbConnectionFactory();
final HiveConf hiveConf = getHiveConfig();
final MetastoreAuditLogListener metastoreAuditLogListener =
new MetastoreAuditLogListener(hiveConf);
// Set up the source.
Map<String, String> parameters = new HashMap<>();
parameters.put("owner", "root");
Database database = new Database(
"test_db",
"test database",
"hdfs://dummy",
parameters
);
CreateDatabaseEvent event = new CreateDatabaseEvent(database, true, null);
metastoreAuditLogListener.onCreateDatabase(event);
// Check the query audit log.
List<String> auditCoreLogColumnsToCheck = Lists.newArrayList(
"command_type",
"command",
"inputs",
"outputs"
);
List<String> auditCoreLogRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
AUDIT_LOG_TABLE_NAME,
auditCoreLogColumnsToCheck,
null
);
List<String> expectedDbRow = Lists.newArrayList(
HiveOperation.THRIFT_CREATE_DATABASE.name(),
"THRIFT_API",
"{}",
"{\"databases\":[\"test_db\"]}"
);
assertEquals(expectedDbRow, auditCoreLogRow);
// Check the input objects audit log.
List<String> inputObjectsColumnsToCheck = Lists.newArrayList(
"name",
"type",
"serialized_object"
);
List<String> inputObjectsRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
INPUT_OBJECTS_TABLE_NAME,
inputObjectsColumnsToCheck,
"category = 'INPUT'"
);
assertEquals(null, inputObjectsRow);
// Check the output objects audit log.
List<String> outputObjectsColumnsToCheck = Lists.newArrayList(
"name",
"type",
"serialized_object"
);
List<String> outputObjectsRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
OUTPUT_OBJECTS_TABLE_NAME,
outputObjectsColumnsToCheck,
"category = 'OUTPUT'"
);
expectedDbRow = Lists.newArrayList(
"test_db",
"DATABASE",
"{\"1\":{\"str\":\"test_db\"},\"2\":{\"str\":\"test database\"},\"3\":{"
+ "\"str\":\"hdfs://dummy\"},\"4\":{\"map\":[\"str\",\"str\",1,{\"own"
+ "er\":\"root\"}]}}"
);
assertEquals(expectedDbRow, outputObjectsRow);
}
@Test
public void testDropDatabase() throws Exception {
// Setup the audit log DB.
resetState();
final DbConnectionFactory dbConnectionFactory = getDbConnectionFactory();
final HiveConf hiveConf = getHiveConfig();
final MetastoreAuditLogListener metastoreAuditLogListener =
new MetastoreAuditLogListener(hiveConf);
// Set up the source.
Map<String, String> parameters = new HashMap<>();
parameters.put("owner", "root");
Database database = new Database(
"test_db",
"test database",
"hdfs://dummy",
parameters
);
DropDatabaseEvent event = new DropDatabaseEvent(database, true, null);
metastoreAuditLogListener.onDropDatabase(event);
// Check the query audit log.
List<String> auditCoreLogColumnsToCheck = Lists.newArrayList(
"command_type",
"command",
"inputs",
"outputs"
);
List<String> auditCoreLogRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
AUDIT_LOG_TABLE_NAME,
auditCoreLogColumnsToCheck,
null
);
List<String> expectedDbRow = Lists.newArrayList(
HiveOperation.THRIFT_DROP_DATABASE.name(),
"THRIFT_API",
"{\"databases\":[\"test_db\"]}",
"{}"
);
assertEquals(expectedDbRow, auditCoreLogRow);
// Check the input objects audit log.
List<String> inputObjectsColumnsToCheck = Lists.newArrayList(
"name",
"type",
"serialized_object"
);
List<String> inputObjectsRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
INPUT_OBJECTS_TABLE_NAME,
inputObjectsColumnsToCheck,
"category = 'INPUT'"
);
expectedDbRow = Lists.newArrayList(
"test_db",
"DATABASE",
"{\"1\":{\"str\":\"test_db\"},\"2\":{\"str\":\"test database\"},\"3\":{"
+ "\"str\":\"hdfs://dummy\"},\"4\":{\"map\":[\"str\",\"str\",1,{\"own"
+ "er\":\"root\"}]}}"
);
assertEquals(expectedDbRow, inputObjectsRow);
// Check the output objects audit log.
List<String> outputObjectsColumnsToCheck = Lists.newArrayList(
"name",
"type",
"serialized_object"
);
List<String> outputObjectsRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
OUTPUT_OBJECTS_TABLE_NAME,
outputObjectsColumnsToCheck,
"category = 'OUTPUT'"
);
assertEquals(null, outputObjectsRow);
}
@Test
public void testCreateTable() throws Exception {
// Setup the audit log DB.
resetState();
final DbConnectionFactory dbConnectionFactory = getDbConnectionFactory();
final HiveConf hiveConf = getHiveConfig();
final MetastoreAuditLogListener metastoreAuditLogListener =
new MetastoreAuditLogListener(hiveConf);
// Set up the source.
Table table = new Table();
table.setDbName("test_db");
table.setTableName("test_table");
table.setOwner("root");
Map<String, String> parameters = new HashMap<>();
parameters.put("contact", "root@airbnb.com");
table.setParameters(parameters);
table.setSd(new StorageDescriptor());
CreateTableEvent event = new CreateTableEvent(table, true, null);
metastoreAuditLogListener.onCreateTable(event);
// Check the query audit log.
List<String> auditCoreLogColumnsToCheck = Lists.newArrayList(
"command_type",
"command",
"inputs",
"outputs"
);
List<String> auditCoreLogRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
AUDIT_LOG_TABLE_NAME,
auditCoreLogColumnsToCheck,
null
);
List<String> expectedDbRow = Lists.newArrayList(
HiveOperation.THRIFT_CREATE_TABLE.name(),
"THRIFT_API",
"{}",
"{\"tables\":[\"test_db.test_table\"]}"
);
assertEquals(expectedDbRow, auditCoreLogRow);
// Check the input objects audit log.
List<String> inputObjectsColumnsToCheck = Lists.newArrayList(
"name",
"type",
"serialized_object"
);
List<String> inputObjectsRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
INPUT_OBJECTS_TABLE_NAME,
inputObjectsColumnsToCheck,
"category = 'INPUT'"
);
assertEquals(null, inputObjectsRow);
// Check the output objects audit log.
List<String> outputObjectsColumnsToCheck = Lists.newArrayList(
"name",
"type",
"serialized_object"
);
List<String> outputObjectsRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
OUTPUT_OBJECTS_TABLE_NAME,
outputObjectsColumnsToCheck,
"category = 'OUTPUT'"
);
expectedDbRow = Lists.newArrayList(
"test_db.test_table",
"TABLE",
"{\"1\":{\"str\":\"test_table\"},\"2\":{\"str\":\"test_db\"},\"3\":{\"s"
+ "tr\":\"root\"},\"4\":{\"i32\":0},\"5\":{\"i32\":0},\"6\":{\"i32\":"
+ "0},\"7\":{\"rec\":{\"5\":{\"tf\":0},\"6\":{\"i32\":0}}},\"9\":{\"m"
+ "ap\":[\"str\",\"str\",1,{\"contact\":\"root@airbnb.com\"}]}}"
);
assertEquals(expectedDbRow, outputObjectsRow);
}
@Test
public void testDropTable() throws Exception {
// Setup the audit log DB.
resetState();
final DbConnectionFactory dbConnectionFactory = getDbConnectionFactory();
final HiveConf hiveConf = getHiveConfig();
final MetastoreAuditLogListener metastoreAuditLogListener =
new MetastoreAuditLogListener(hiveConf);
// Set up the source.
Table table = new Table();
table.setDbName("test_db");
table.setTableName("test_table");
table.setOwner("root");
Map<String, String> parameters = new HashMap<>();
parameters.put("contact", "root@airbnb.com");
table.setParameters(parameters);
table.setSd(new StorageDescriptor());
DropTableEvent event = new DropTableEvent(table, true, true, null);
metastoreAuditLogListener.onDropTable(event);
// Check the query audit log.
List<String> auditCoreLogColumnsToCheck = Lists.newArrayList(
"command_type",
"command",
"inputs",
"outputs"
);
List<String> auditCoreLogRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
AUDIT_LOG_TABLE_NAME,
auditCoreLogColumnsToCheck,
null
);
List<String> expectedDbRow = Lists.newArrayList(
HiveOperation.THRIFT_DROP_TABLE.name(),
"THRIFT_API",
"{\"tables\":[\"test_db.test_table\"]}",
"{}"
);
assertEquals(expectedDbRow, auditCoreLogRow);
// Check the input objects audit log.
List<String> inputObjectsColumnsToCheck = Lists.newArrayList(
"name",
"type",
"serialized_object"
);
List<String> inputObjectsRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
INPUT_OBJECTS_TABLE_NAME,
inputObjectsColumnsToCheck,
"category = 'INPUT'"
);
expectedDbRow = Lists.newArrayList(
"test_db.test_table",
"TABLE",
"{\"1\":{\"str\":\"test_table\"},\"2\":{\"str\":\"test_db\"},\"3\":{\"s"
+ "tr\":\"root\"},\"4\":{\"i32\":0},\"5\":{\"i32\":0},\"6\":{\"i32\":"
+ "0},\"7\":{\"rec\":{\"5\":{\"tf\":0},\"6\":{\"i32\":0}}},\"9\":{\"m"
+ "ap\":[\"str\",\"str\",1,{\"contact\":\"root@airbnb.com\"}]}}"
);
assertEquals(expectedDbRow, inputObjectsRow);
// Check the output objects audit log.
List<String> outputObjectsColumnsToCheck = Lists.newArrayList(
"name",
"type",
"serialized_object"
);
List<String> outputObjectsRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
OUTPUT_OBJECTS_TABLE_NAME,
outputObjectsColumnsToCheck,
"category = 'OUTPUT'"
);
assertEquals(null, outputObjectsRow);
}
@Test
public void testAlterTable() throws Exception {
// Setup the audit log DB.
resetState();
final DbConnectionFactory dbConnectionFactory = getDbConnectionFactory();
final HiveConf hiveConf = getHiveConfig();
final MetastoreAuditLogListener metastoreAuditLogListener =
new MetastoreAuditLogListener(hiveConf);
// Set up the source.
Table oldTable = new Table();
oldTable.setDbName("test_db");
oldTable.setTableName("test_old_table");
oldTable.setOwner("foo");
oldTable.setSd(new StorageDescriptor());
Map<String, String> oldParameters = new HashMap<>();
oldParameters.put("contact", "foo@airbnb.com");
oldTable.setParameters(oldParameters);
Table newTable = new Table();
newTable.setDbName("test_db");
newTable.setTableName("test_new_table");
newTable.setOwner("bar");
newTable.setSd(new StorageDescriptor());
Map<String, String> newParameters = new HashMap<>();
newParameters.put("contact", "bar@airbnb.com");
newTable.setParameters(newParameters);
AlterTableEvent event = new AlterTableEvent(
oldTable,
newTable,
true,
null
);
metastoreAuditLogListener.onAlterTable(event);
// Check the query audit log.
List<String> auditCoreLogColumnsToCheck = Lists.newArrayList(
"command_type",
"command",
"inputs",
"outputs"
);
List<String> auditCoreLogRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
AUDIT_LOG_TABLE_NAME,
auditCoreLogColumnsToCheck,
null
);
List<String> expectedDbRow = Lists.newArrayList(
HiveOperation.THRIFT_ALTER_TABLE.name(),
"THRIFT_API",
"{\"tables\":[\"test_db.test_old_table\"]}",
"{\"tables\":[\"test_db.test_new_table\"]}"
);
assertEquals(expectedDbRow, auditCoreLogRow);
// Check the input objects audit log.
List<String> inputObjectsColumnsToCheck = Lists.newArrayList(
"name",
"type",
"serialized_object"
);
List<String> inputObjectsRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
INPUT_OBJECTS_TABLE_NAME,
inputObjectsColumnsToCheck,
"category = 'INPUT'"
);
expectedDbRow = Lists.newArrayList(
"test_db.test_old_table",
"TABLE",
"{\"1\":{\"str\":\"test_old_table\"},\"2\":{\"str\":\"test_db\"},\"3\":"
+ "{\"str\":\"foo\"},\"4\":{\"i32\":0},\"5\":{\"i32\":0},\"6\":{\"i32"
+ "\":0},\"7\":{\"rec\":{\"5\":{\"tf\":0},\"6\":{\"i32\":0}}},\"9\":{"
+ "\"map\":[\"str\",\"str\",1,{\"contact\":\"foo@airbnb.com\"}]}}"
);
assertEquals(expectedDbRow, inputObjectsRow);
// Check the output objects audit log.
List<String> outputObjectsColumnsToCheck = Lists.newArrayList(
"name",
"type",
"serialized_object"
);
List<String> outputObjectsRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
OUTPUT_OBJECTS_TABLE_NAME,
outputObjectsColumnsToCheck,
"category = 'OUTPUT'"
);
expectedDbRow = Lists.newArrayList(
"test_db.test_new_table",
"TABLE",
"{\"1\":{\"str\":\"test_new_table\"},\"2\":{\"str\":\"test_db\"},\"3\":"
+ "{\"str\":\"bar\"},\"4\":{\"i32\":0},\"5\":{\"i32\":0},\"6\":{\"i32"
+ "\":0},\"7\":{\"rec\":{\"5\":{\"tf\":0},\"6\":{\"i32\":0}}},\"9\":{"
+ "\"map\":[\"str\",\"str\",1,{\"contact\":\"bar@airbnb.com\"}]}}"
);
assertEquals(expectedDbRow, outputObjectsRow);
}
@Test
public void testAddPartition() throws Exception {
// Setup the audit log DB.
resetState();
final DbConnectionFactory dbConnectionFactory = getDbConnectionFactory();
final HiveConf hiveConf = getHiveConfig();
final MetastoreAuditLogListener metastoreAuditLogListener =
new MetastoreAuditLogListener(hiveConf);
// Set up the source.
StorageDescriptor sd = new StorageDescriptor();
sd.setSerdeInfo(new SerDeInfo());
sd.setLocation("hdfs://dummy/");
Table table = new Table();
table.setDbName("test_db");
table.setTableName("test_table");
table.setTableType("EXTERNAL_TABLE");
table.setSd(sd);
List<FieldSchema> partitionCols = new ArrayList<>();
partitionCols.add(new FieldSchema("ds", "string", "UTC date"));
table.setPartitionKeys(partitionCols);
Partition partition = new Partition();
partition.setDbName("test_db");
partition.setTableName("test_table");
partition.setSd(new StorageDescriptor());
List<String> partitionValues = new ArrayList<>();
partitionValues.add("2016-01-01");
partition.setValues(partitionValues);
AddPartitionEvent event = new AddPartitionEvent(
table,
partition,
false,
null
);
metastoreAuditLogListener.onAddPartition(event);
// Check the query audit log.
List<String> auditCoreLogColumnsToCheck = Lists.newArrayList(
"command_type",
"command",
"inputs",
"outputs"
);
List<String> auditCoreLogRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
AUDIT_LOG_TABLE_NAME,
auditCoreLogColumnsToCheck,
null
);
List<String> expectedDbRow = Lists.newArrayList(
HiveOperation.THRIFT_ADD_PARTITION.name(),
"THRIFT_API",
"{}",
"{\"partitions\":[\"test_db.test_table/ds=2016-01-01\"]}"
);
assertEquals(expectedDbRow, auditCoreLogRow);
// Check the input objects audit log.
List<String> inputObjectsColumnsToCheck = Lists.newArrayList(
"name",
"type",
"serialized_object"
);
List<String> inputObjectsRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
INPUT_OBJECTS_TABLE_NAME,
inputObjectsColumnsToCheck,
"category = 'INPUT'"
);
assertEquals(null, inputObjectsRow);
// Check the output objects audit log.
List<String> outputObjectsColumnsToCheck = Lists.newArrayList(
"name",
"type",
"serialized_object"
);
List<String> outputObjectsRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
OUTPUT_OBJECTS_TABLE_NAME,
outputObjectsColumnsToCheck,
"category = 'REFERENCE_TABLE' AND type = 'TABLE'"
);
expectedDbRow = Lists.newArrayList(
"test_db.test_table",
"TABLE",
"{\"1\":{\"str\":\"test_table\"},\"2\":{\"str\":\"test_db\"},\"4\":{\"i32"
+ "\":0},\"5\":{\"i32\":0},\"6\":{\"i32\":0},\"7\":{\"rec\":{\"2\":{\"s"
+ "tr\":\"hdfs://dummy/\"},\"5\":{\"tf\":0},\"6\":{\"i32\":0},\"7\":{\""
+ "rec\":{}}}},\"8\":{\"lst\":[\"rec\",1,{\"1\":{\"str\":\"ds\"},\"2\":"
+ "{\"str\":\"string\"},\"3\":{\"str\":\"UTC date\"}}]},\"12\":{\"str\""
+ ":\"EXTERNAL_TABLE\"}}"
);
assertEquals(expectedDbRow, outputObjectsRow);
outputObjectsRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
OUTPUT_OBJECTS_TABLE_NAME,
outputObjectsColumnsToCheck,
"category = 'OUTPUT' AND type = 'PARTITION'"
);
expectedDbRow = Lists.newArrayList(
"test_db.test_table/ds=2016-01-01",
"PARTITION",
"{\"1\":{\"lst\":[\"str\",1,\"2016-01-01\"]},\"2\":{\"str\":\"test_db\""
+ "},\"3\":{\"str\":\"test_table\"},\"4\":{\"i32\":0},\"5\":{\"i32\":"
+ "0},\"6\":{\"rec\":{\"2\":{\"str\":\"hdfs://dummy/ds=2016-01-01\"},"
+ "\"5\":{\"tf\":0},\"6\":{\"i32\":0}}}}"
);
assertEquals(expectedDbRow, outputObjectsRow);
}
@Test
public void testDropPartition() throws Exception {
// Setup the audit log DB.
resetState();
final DbConnectionFactory dbConnectionFactory = getDbConnectionFactory();
final HiveConf hiveConf = getHiveConfig();
final MetastoreAuditLogListener metastoreAuditLogListener =
new MetastoreAuditLogListener(hiveConf);
// Set up the source.
StorageDescriptor sd = new StorageDescriptor();
sd.setSerdeInfo(new SerDeInfo());
sd.setLocation("hdfs://dummy/");
Table table = new Table();
table.setDbName("test_db");
table.setTableName("test_table");
table.setTableType("EXTERNAL_TABLE");
table.setSd(sd);
List<FieldSchema> partitionCols = new ArrayList<>();
partitionCols.add(new FieldSchema("ds", "string", "UTC date"));
table.setPartitionKeys(partitionCols);
Partition partition = new Partition();
partition.setDbName("test_db");
partition.setTableName("test_table");
partition.setSd(new StorageDescriptor());
List<String> partitionValues = new ArrayList<>();
partitionValues.add("2016-01-01");
partition.setValues(partitionValues);
DropPartitionEvent event = new DropPartitionEvent(
table,
partition,
false,
true,
null
);
metastoreAuditLogListener.onDropPartition(event);
// Check the query audit log.
List<String> auditCoreLogColumnsToCheck = Lists.newArrayList(
"command_type",
"command",
"inputs",
"outputs"
);
List<String> auditCoreLogRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
AUDIT_LOG_TABLE_NAME,
auditCoreLogColumnsToCheck,
null
);
List<String> expectedDbRow = Lists.newArrayList(
HiveOperation.THRIFT_DROP_PARTITION.name(),
"THRIFT_API",
"{\"partitions\":[\"test_db.test_table/ds=2016-01-01\"]}",
"{}"
);
assertEquals(expectedDbRow, auditCoreLogRow);
// Check the input objects audit log.
List<String> inputObjectsColumnsToCheck = Lists.newArrayList(
"name",
"type",
"serialized_object"
);
List<String> inputObjectsRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
INPUT_OBJECTS_TABLE_NAME,
inputObjectsColumnsToCheck,
"category = 'REFERENCE_TABLE' AND type = 'TABLE'"
);
expectedDbRow = Lists.newArrayList(
"test_db.test_table",
"TABLE",
"{\"1\":{\"str\":\"test_table\"},\"2\":{\"str\":\"test_db\"},\"4\":{\"i"
+ "32\":0},\"5\":{\"i32\":0},\"6\":{\"i32\":0},\"7\":{\"rec\":{\"2\":"
+ "{\"str\":\"hdfs://dummy/\"},\"5\":{\"tf\":0},\"6\":{\"i32\":0},\"7"
+ "\":{\"rec\":{}}}},\"8\":{\"lst\":[\"rec\",1,{\"1\":{\"str\":\"ds\""
+ "},\"2\":{\"str\":\"string\"},\"3\":{\"str\":\"UTC date\"}}]},\"12"
+ "\":{\"str\":\"EXTERNAL_TABLE\"}}"
);
assertEquals(expectedDbRow, inputObjectsRow);
inputObjectsRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
INPUT_OBJECTS_TABLE_NAME,
inputObjectsColumnsToCheck,
"category = 'INPUT' AND type = 'PARTITION'"
);
expectedDbRow = Lists.newArrayList(
"test_db.test_table/ds=2016-01-01",
"PARTITION",
"{\"1\":{\"lst\":[\"str\",1,\"2016-01-01\"]},\"2\":{\"str\":\"test_db\""
+ "},\"3\":{\"str\":\"test_table\"},\"4\":{\"i32\":0},\"5\":{\"i32\":"
+ "0},\"6\":{\"rec\":{\"2\":{\"str\":\"hdfs://dummy/ds=2016-01-01\"},"
+ "\"5\":{\"tf\":0},\"6\":{\"i32\":0}}}}"
);
assertEquals(expectedDbRow, inputObjectsRow);
// Check the output objects audit log.
List<String> outputObjectsColumnsToCheck = Lists.newArrayList(
"name",
"type",
"serialized_object"
);
List<String> outputObjectsRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
OUTPUT_OBJECTS_TABLE_NAME,
outputObjectsColumnsToCheck,
"category = 'OUTPUT'"
);
assertEquals(null, outputObjectsRow);
}
@Test
public void testAlterPartition() throws Exception {
// Setup the audit log DB.
resetState();
final DbConnectionFactory dbConnectionFactory = getDbConnectionFactory();
final HiveConf hiveConf = getHiveConfig();
final MetastoreAuditLogListener metastoreAuditLogListener =
new MetastoreAuditLogListener(hiveConf);
// Set up the source.
StorageDescriptor sd = new StorageDescriptor();
sd.setSerdeInfo(new SerDeInfo());
sd.setLocation("hdfs://dummy");
Table table = new Table();
table.setDbName("test_db");
table.setTableName("test_table");
table.setTableType("EXTERNAL_TABLE");
table.setSd(sd);
List<FieldSchema> partitionCols = new ArrayList<>();
partitionCols.add(new FieldSchema("ds", "string", "UTC date"));
table.setPartitionKeys(partitionCols);
Partition oldPartition = new Partition();
oldPartition.setDbName("test_db");
oldPartition.setTableName("test_table");
oldPartition.setSd(sd);
List<String> oldPartitionValues = new ArrayList<>();
oldPartitionValues.add("2016-01-01");
oldPartition.setValues(oldPartitionValues);
Partition newPartition = new Partition();
newPartition.setDbName("test_db");
newPartition.setTableName("test_table");
newPartition.setSd(sd);
List<String> newPartitionValues = new ArrayList<>();
newPartitionValues.add("2016-01-02");
newPartition.setValues(newPartitionValues);
HMSHandler handler = Mockito.mock(HMSHandler.class);
Mockito.when(
handler.get_table(
"test_db",
"test_table"
)
).thenReturn(table);
AlterPartitionEvent event = new AlterPartitionEvent(
oldPartition,
newPartition,
false,
handler
);
metastoreAuditLogListener.onAlterPartition(event);
// Check the query audit log.
List<String> auditCoreLogColumnsToCheck = Lists.newArrayList(
"command_type",
"command",
"inputs",
"outputs"
);
List<String> auditCoreLogRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
AUDIT_LOG_TABLE_NAME,
auditCoreLogColumnsToCheck,
null
);
List<String> expectedDbRow = Lists.newArrayList(
HiveOperation.THRIFT_ALTER_PARTITION.name(),
"THRIFT_API",
"{\"partitions\":[\"test_db.test_table/ds=2016-01-01\"]}",
"{\"partitions\":[\"test_db.test_table/ds=2016-01-02\"]}"
);
assertEquals(expectedDbRow, auditCoreLogRow);
// Check the input objects audit log. Note there is no easy way to verify
// the table for an AlterPartitionEvent since we instantiate this within the
// event listener where various attributes may be dynamic.
List<String> inputObjectsColumnsToCheck = Lists.newArrayList(
"name",
"type",
"serialized_object"
);
List<String> inputObjectsRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
INPUT_OBJECTS_TABLE_NAME,
inputObjectsColumnsToCheck,
"category = 'INPUT' AND type = 'PARTITION'"
);
expectedDbRow = Lists.newArrayList(
"test_db.test_table/ds=2016-01-01",
"PARTITION",
"{\"1\":{\"lst\":[\"str\",1,\"2016-01-01\"]},\"2\":{\"str\":\"test_db\""
+ "},\"3\":{\"str\":\"test_table\"},\"4\":{\"i32\":0},\"5\":{\"i32\":"
+ "0},\"6\":{\"rec\":{\"2\":{\"str\":\"hdfs://dummy\"},\"5\":{\"tf\":"
+ "0},\"6\":{\"i32\":0},\"7\":{\"rec\":{}}}}}"
);
assertEquals(expectedDbRow, inputObjectsRow);
// Check the output objects audit log. Note there is no easy way to verify
// the table for an AlterPartitionEvent since we instantiate this within the
// event listener where various attributes may be dynamic.
List<String> outputObjectsColumnsToCheck = Lists.newArrayList(
"name",
"type",
"serialized_object"
);
List<String> outputObjectsRow = ReplicationTestUtils.getRow(
dbConnectionFactory,
DB_NAME,
OUTPUT_OBJECTS_TABLE_NAME,
outputObjectsColumnsToCheck,
"category = 'OUTPUT' AND type = 'PARTITION'"
);
expectedDbRow = Lists.newArrayList(
"test_db.test_table/ds=2016-01-02",
"PARTITION",
"{\"1\":{\"lst\":[\"str\",1,\"2016-01-02\"]},\"2\":{\"str\":\"test_db\""
+ "},\"3\":{\"str\":\"test_table\"},\"4\":{\"i32\":0},\"5\":{\"i32\":"
+ "0},\"6\":{\"rec\":{\"2\":{\"str\":\"hdfs://dummy\"},\"5\":{\"tf\":"
+ "0},\"6\":{\"i32\":0},\"7\":{\"rec\":{}}}}}"
);
assertEquals(expectedDbRow, outputObjectsRow);
}
}
| 9,437 |
0 | Create_ds/reair/hive-hooks/src/test/java/com/airbnb/reair/hive | Create_ds/reair/hive-hooks/src/test/java/com/airbnb/reair/hive/hooks/MapRedStatsLogModuleTest.java | package com.airbnb.reair.hive.hooks;
import static org.junit.Assert.assertEquals;
import org.apache.hadoop.mapred.Counters;
import org.junit.Test;
public class MapRedStatsLogModuleTest {
@Test
public void testZeroCounterGroupsToJson() throws SerializationException {
Counters counters = new Counters();
String json = MapRedStatsLogModule.toJson(counters);
assertEquals(json, "[]");
}
@Test
public void testOneGroupOneCounterToJson() throws SerializationException {
Counters counters = new Counters();
counters.incrCounter("SomeCounterGroupName", "SomeCounterName", 3);
String json = MapRedStatsLogModule.toJson(counters);
assertEquals(
"[{\"groupName\":\"SomeCounterGroupName\",\"counters\":[{\"counterNa"
+ "me\":\"SomeCounterName\",\"value\":3}]}]",
json);
}
@Test
public void testOneGroupManyCountersToJson() throws SerializationException {
Counters counters = new Counters();
counters.incrCounter("SomeCounterGroupName", "SomeCounterName", 3);
counters.incrCounter("SomeCounterGroupName", "AnotherCounterName", 4);
counters.incrCounter("SomeCounterGroupName", "YetAnotherCounterName", 4);
String json = MapRedStatsLogModule.toJson(counters);
assertEquals(
"[{\"groupName\":\"SomeCounterGroupName\",\"counters\":[{\"counterNam"
+ "e\":\"AnotherCounterName\",\"value\":4},{\"counterName\":\"SomeCount"
+ "erName\",\"value\":3},{\"counterName\":\"YetAnotherCounterName\",\"v"
+ "alue\":4}]}]",
json);
}
@Test
public void testManyGroupsManyCountersToJson()
throws SerializationException {
Counters counters = new Counters();
counters.incrCounter("SomeCounterGroupName1", "SomeCounterName1", 3);
counters.incrCounter("SomeCounterGroupName1", "SomeCounterName2", 4);
counters.incrCounter("SomeCounterGroupName1", "SomeCounterName3", 5);
counters.incrCounter("SomeCounterGroupName2", "SomeCounterName1", 6);
counters.incrCounter("SomeCounterGroupName2", "SomeCounterName2", 7);
counters.incrCounter("SomeCounterGroupName2", "SomeCounterName3", 8);
counters.incrCounter("SomeCounterGroupName3", "SomeCounterName1", 9);
counters.incrCounter("SomeCounterGroupName3", "SomeCounterName2", 10);
counters.incrCounter("SomeCounterGroupName3", "SomeCounterName3", 11);
String json = MapRedStatsLogModule.toJson(counters);
assertEquals(
"[{\"groupName\":\"SomeCounterGroupName1\",\"counters\":[{\"counterN"
+ "ame\":\"SomeCounterName1\",\"value\":3},{\"counterName\":\"SomeCount"
+ "erName2\",\"value\":4},{\"counterName\":\"SomeCounterName3\",\"value"
+ "\":5},{\"counterName\":\"SomeCounterName1\",\"value\":6},{\"counterN"
+ "ame\":\"SomeCounterName2\",\"value\":7},{\"counterName\":\"SomeCount"
+ "erName3\",\"value\":8},{\"counterName\":\"SomeCounterName1\",\"value"
+ "\":9},{\"counterName\":\"SomeCounterName2\",\"value\":10},{\"counter"
+ "Name\":\"SomeCounterName3\",\"value\":11}]},{\"groupName\":\"SomeCou"
+ "nterGroupName2\",\"counters\":[{\"counterName\":\"SomeCounterName1\""
+ ",\"value\":3},{\"counterName\":\"SomeCounterName2\",\"value\":4},{\""
+ "counterName\":\"SomeCounterName3\",\"value\":5},{\"counterName\":\"S"
+ "omeCounterName1\",\"value\":6},{\"counterName\":\"SomeCounterName2\""
+ ",\"value\":7},{\"counterName\":\"SomeCounterName3\",\"value\":8},{\""
+ "counterName\":\"SomeCounterName1\",\"value\":9},{\"counterName\":\"S"
+ "omeCounterName2\",\"value\":10},{\"counterName\":\"SomeCounterName3"
+ "\",\"value\":11}]},{\"groupName\":\"SomeCounterGroupName3\",\"counte"
+ "rs\":[{\"counterName\":\"SomeCounterName1\",\"value\":3},{\"counterN"
+ "ame\":\"SomeCounterName2\",\"value\":4},{\"counterName\":\"SomeCount"
+ "erName3\",\"value\":5},{\"counterName\":\"SomeCounterName1\",\"value"
+ "\":6},{\"counterName\":\"SomeCounterName2\",\"value\":7},{\"counterN"
+ "ame\":\"SomeCounterName3\",\"value\":8},{\"counterName\":\"SomeCount"
+ "erName1\",\"value\":9},{\"counterName\":\"SomeCounterName2\",\"value"
+ "\":10},{\"counterName\":\"SomeCounterName3\",\"value\":11}]}]",
json);
}
}
| 9,438 |
0 | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive/hooks/ConfigurationDbCredentials.java | package com.airbnb.reair.hive.hooks;
import com.airbnb.reair.db.DbCredentials;
import org.apache.hadoop.conf.Configuration;
import java.io.IOException;
public class ConfigurationDbCredentials implements DbCredentials {
private Configuration conf;
private String usernameKey;
private String passwordKey;
/**
* Constructor.
*
* @param conf configuration object that contain the credentials
* @param usernameKey the key to use for fetching the username
* @param passwordKey the key to use for feting the password
*/
public ConfigurationDbCredentials(Configuration conf, String usernameKey, String passwordKey) {
this.conf = conf;
this.usernameKey = usernameKey;
this.passwordKey = passwordKey;
}
@Override
public void refreshCredsIfNecessary() throws IOException {}
@Override
public String getReadWriteUsername() throws IOException {
String username = conf.get(usernameKey);
if (username == null) {
throw new IOException("Key missing value: " + usernameKey);
}
return username;
}
@Override
public String getReadWritePassword() throws IOException {
String password = conf.get(passwordKey);
if (password == null) {
throw new IOException("Key missing value: " + passwordKey);
}
return password;
}
}
| 9,439 |
0 | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive/hooks/AuditCoreLogModule.java | package com.airbnb.reair.hive.hooks;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.ql.hooks.Entity;
import org.apache.hadoop.hive.ql.hooks.ReadEntity;
import org.apache.hadoop.hive.ql.hooks.WriteEntity;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.log4j.Logger;
import org.apache.thrift.TException;
import org.apache.thrift.TSerializer;
import org.apache.thrift.protocol.TJSONProtocol;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* Log module for adding core query audit data for a query to the DB.
*/
public class AuditCoreLogModule extends BaseLogModule {
public static Logger LOG = Logger.getLogger(AuditCoreLogModule.class);
public static final String TABLE_NAME_KEY =
"airbnb.reair.audit_log.core.table_name";
private final Set<ReadEntity> readEntities;
private final Set<WriteEntity> writeEntities;
private final UserGroupInformation userGroupInformation;
/**
* Constructor.
*
* @param connection the connection to use for connecting to the DB
* @param sessionStateLite session information from Hive for this query
* @param readEntities the entities that were read by the query
* @param writeEntities the entities that were written by the query
* @param userGroupInformation information about the user that ran the query
*
* @throws ConfigurationException if there's an error with the configuration for the hook
*/
public AuditCoreLogModule(final Connection connection,
final SessionStateLite sessionStateLite,
final Set<ReadEntity> readEntities,
final Set<WriteEntity> writeEntities,
final UserGroupInformation userGroupInformation)
throws ConfigurationException {
super(connection, TABLE_NAME_KEY, sessionStateLite);
this.readEntities = readEntities;
this.writeEntities = writeEntities;
this.userGroupInformation = userGroupInformation;
}
/**
* Inserts the core audit data into the DB.
*
* @return the id for the inserted core audit log entry
*
* @throws EntityException if there's an error processing the entities associated with this query
* @throws SQLException if there's an error querying the DB
* @throws UnknownHostException if there's an error getting the IP of this host
*/
public long run()
throws EntityException, SerializationException, SQLException, UnknownHostException {
final String query = String.format("INSERT INTO %s ("
+ "query_id, "
+ "command_type, "
+ "command, "
+ "inputs, "
+ "outputs, "
+ "username, "
+ "ip) "
+ "VALUES (?, ?, ?, ?, ?, ?, ?)",
tableName);
// Write the main audit log entry
int psIndex = 1;
PreparedStatement ps = connection.prepareStatement(query,
Statement.RETURN_GENERATED_KEYS);
ps.setString(psIndex++, sessionStateLite.getQueryId());
ps.setString(psIndex++, sessionStateLite.getCommandType());
ps.setString(psIndex++, sessionStateLite.getCmd());
ps.setString(psIndex++, toJson(readEntities, true));
ps.setString(psIndex++, toJson(writeEntities, true));
ps.setString(psIndex++, userGroupInformation == null ? null :
userGroupInformation.getUserName());
ps.setString(psIndex++, InetAddress.getLocalHost().getHostAddress());
ps.executeUpdate();
ResultSet rs = ps.getGeneratedKeys();
rs.next();
return rs.getLong(1);
}
/**
* Converts the entities into a JSON object. Resulting object will look
* like:
* {
* "tables": [t1, t2...],
* "partitions": [p1, p2...],
* "dummy_partitions": [p1, p2...],
* "local_directories": [d1, d2...],
* "dfs_directories": [d1, d2...]
* }
*
* <p>Where t1... and p1... objects are JSON objects that represent the thrift
* metadata object. If identifierOnly is true, then only a short string
* representation of the object will be used instead. e.g.
* "default.my_table" or "default.my_partitioned_table/ds=1"
*
* @param entities the entities to convert into JSON
* @param identifierOnly whether to use identifiers instead of the full JSON representation
* @return a JSON string representing the entities
* @throws EntityException if there's an error processing an entity
* @throws SerializationException if there's an error serializing to JSON
*/
private static String toJson(Collection<? extends Entity> entities,
boolean identifierOnly)
throws EntityException, SerializationException {
if (entities == null) {
return new JSONObject().toString();
}
List<Database> databases = new ArrayList<>();
List<Table> tables = new ArrayList<>();
List<Partition> partitions = new ArrayList<>();
List<Partition> dummyPartitions = new ArrayList<>();
List<String> localDirectories = new ArrayList<>();
List<String> dfsDirectories = new ArrayList<>();
Map<Partition, String> partitionNames =
new HashMap<>();
for (Entity e : entities) {
switch (e.getType()) {
case DATABASE:
databases.add(e.getDatabase());
break;
case TABLE:
tables.add(e.getTable().getTTable());
break;
case PARTITION:
partitions.add(e.getPartition().getTPartition());
partitionNames.put(e.getPartition().getTPartition(),
e.getPartition().getName());
break;
case DUMMYPARTITION:
dummyPartitions.add(e.getPartition().getTPartition());
partitionNames.put(e.getPartition().getTPartition(),
e.getPartition().getName());
break;
case LOCAL_DIR:
try {
localDirectories.add(e.getLocation().toString());
} catch (Exception ex) {
throw new EntityException(ex);
}
break;
case DFS_DIR:
try {
dfsDirectories.add(e.getLocation().toString());
} catch (Exception ex) {
throw new EntityException(ex);
}
break;
case UDF:
LOG.info(
"Skipping logging of UDF type to audit log - "
+ "displayName: " + e.getUDF().getDisplayName());
break;
default:
throw new EntityException("Unhandled type: "
+ e.getType() + " entity: " + e);
}
}
TSerializer serializer = new TSerializer(new TJSONProtocol.Factory());
JSONArray jsonDatabases = new JSONArray();
JSONArray jsonTables = new JSONArray();
JSONArray jsonPartitions = new JSONArray();
JSONArray jsonDummyPartitions = new JSONArray();
JSONArray jsonLocalDirs = new JSONArray();
JSONArray jsonDfsDirs = new JSONArray();
JSONObject obj = new JSONObject();
try {
for (Database db : databases) {
if (identifierOnly) {
String jsonDatabase = String.format("%s", db.getName());
jsonDatabases.put(jsonDatabase);
} else {
jsonDatabases.put(new JSONObject(serializer.toString(db)));
}
}
for (Table t : tables) {
if (identifierOnly) {
String jsonTable = String.format("%s.%s", t.getDbName(),
t.getTableName());
jsonTables.put(jsonTable);
} else {
jsonTables.put(new JSONObject(serializer.toString(t)));
}
}
for (Partition p : partitions) {
if (identifierOnly) {
String partitionName = String.format("%s.%s/%s", p.getDbName(),
p.getTableName(),
partitionNames.get(p));
jsonPartitions.put(partitionName);
} else {
jsonPartitions.put(new JSONObject(serializer.toString(p)));
}
}
for (Partition p : dummyPartitions) {
if (identifierOnly) {
String dummyPartitionJson = String.format("%s.%s/%s", p.getDbName(),
p.getTableName(),
partitionNames.get(p));
jsonDummyPartitions.put(dummyPartitionJson);
} else {
jsonDummyPartitions.put(new JSONObject(serializer.toString(p)));
}
}
for (String dir : localDirectories) {
jsonLocalDirs.put(dir);
}
for (String dir : dfsDirectories) {
jsonDfsDirs.put(dir);
}
if (jsonDatabases.length() > 0) {
obj.put("databases", jsonDatabases);
}
if (jsonTables.length() > 0) {
obj.put("tables", jsonTables);
}
if (jsonPartitions.length() > 0) {
obj.put("partitions", jsonPartitions);
}
if (jsonDummyPartitions.length() > 0) {
obj.put("dummy_partitions", jsonDummyPartitions);
}
if (jsonLocalDirs.length() > 0) {
obj.put("local_directories", jsonLocalDirs);
}
if (jsonDfsDirs.length() > 0) {
obj.put("dfs_directories", jsonDfsDirs);
}
} catch (TException | JSONException e) {
throw new SerializationException(e);
}
return obj.toString();
}
}
| 9,440 |
0 | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive/hooks/ConfigurationException.java | package com.airbnb.reair.hive.hooks;
/**
* An exception thrown when there is an error with the configuration.
*/
public class ConfigurationException extends Exception {
public ConfigurationException() {}
public ConfigurationException(String message) {
super(message);
}
public ConfigurationException(String message, Throwable cause) {
super(message, cause);
}
public ConfigurationException(Throwable cause) {
super(cause);
}
}
| 9,441 |
0 | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive/hooks/ObjectLogModule.java | package com.airbnb.reair.hive.hooks;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.ql.hooks.Entity;
import org.apache.hadoop.hive.ql.hooks.ReadEntity;
import org.apache.hadoop.hive.ql.hooks.WriteEntity;
import org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType;
import org.apache.thrift.TException;
import org.apache.thrift.TSerializer;
import org.apache.thrift.protocol.TJSONProtocol;
import java.net.URI;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
/**
* A module for logging the Thrift metadata objects associated with a query.
*/
public class ObjectLogModule extends BaseLogModule {
public static final String TABLE_NAME_KEY =
"airbnb.reair.audit_log.objects.table_name";
// The objects table stores serialized forms of the relevant Hive objects
// for that query.
//
// The category describes why the object was logged.
// INPUT - the query modified or altered this object
// OUTPUT - the query modified or altered this object
// RENAMED_FROM - the query renamed this object into the OUTPUT object
// REFERENCE_TABLE - when a partition is changed, the table object is
// logged as well for reference
public enum ObjectCategory {INPUT, OUTPUT, RENAME_FROM, REFERENCE_TABLE}
private final Set<ReadEntity> readEntities;
private final Set<WriteEntity> writeEntities;
private final long auditLogId;
/**
* Constructor.
*
* @param connection the connection to use for connecting to the DB
* @param sessionStateLite session information from Hive for this query
* @param readEntities the entities that were read by the query
* @param writeEntities the entities that were written by the query
* @param auditLogId the audit log ID from the core log module that's associated with this query
*
* @throws ConfigurationException if there's an error with the configuration
*/
public ObjectLogModule(final Connection connection,
final SessionStateLite sessionStateLite,
final Set<ReadEntity> readEntities,
final Set<WriteEntity> writeEntities,
long auditLogId)
throws ConfigurationException {
super(connection, TABLE_NAME_KEY, sessionStateLite);
this.readEntities = readEntities;
this.writeEntities = writeEntities;
this.auditLogId = auditLogId;
}
/**
* Insert serialized forms of the entities into the DB.
*
* @throws EntityException if there's an error processing the entity
* @throws SQLException if there's an error inserting into the DB
*/
public void run() throws SQLException, EntityException {
// Write out the serialized output objects to a separate table
// in separate statements. Attempting to write all the objects
// in a single statement can result in MySQL packet size errors.
// Consider a dynamic partition query that generates 10K
final String query = String.format("INSERT INTO %s ("
+ "audit_log_id, "
+ "category, "
+ "type, "
+ "name, "
+ "serialized_object) "
+ "VALUES (?, ?, ?, ?, ?)",
tableName);
// partitions with Thrift object sizes of 1KB.
PreparedStatement ps = connection.prepareStatement(query);
// If a partition is added to a table, then the table
// technically changed as well. Record this in the output
// objects table as a REFERENCE_TABLE
Set<org.apache.hadoop.hive.ql.metadata.Table>
tableForPartition = new HashSet<>();
String commandType = sessionStateLite.getCommandType();
String[] thriftCommandTypes = {
"THRIFT_ADD_PARTITION",
"THRIFT_ALTER_PARTITION",
"THRIFT_ALTER_TABLE",
"THRIFT_CREATE_DATABASE",
"THRIFT_CREATE_TABLE",
"THRIFT_DROP_DATABASE",
"THRIFT_DROP_PARTITION",
"THRIFT_DROP_TABLE"
};
if (Arrays.asList(thriftCommandTypes).contains(commandType)) {
// Serialize both the inputs and outputs. Additionally for partitions we
// ensure we also serialize the table entity associated with the
// partition as a reference.
for (ReadEntity entity : readEntities) {
if (entity.getType() == Entity.Type.PARTITION) {
addToObjectsTable(
ps,
auditLogId,
ObjectCategory.REFERENCE_TABLE,
new ReadEntity(entity.getT())
);
}
addToObjectsTable(ps, auditLogId, ObjectCategory.INPUT, entity);
}
for (WriteEntity entity : writeEntities) {
if (entity.getType() == Entity.Type.PARTITION) {
addToObjectsTable(
ps,
auditLogId,
ObjectCategory.REFERENCE_TABLE,
new WriteEntity(entity.getT(), WriteType.INSERT)
);
}
addToObjectsTable(ps, auditLogId, ObjectCategory.OUTPUT, entity);
}
} else {
// TODO: ALTERTABLE_EXCHANGEPARTITION is not yet implemented in Hive
// see https://issues.apache.org/jira/browse/HIVE-11554. Use
// HiveOperation class once this is in.
boolean renameTable = "ALTERTABLE_RENAME".equals(commandType);
boolean renamePartition =
"ALTERTABLE_RENAMEPART".equals(commandType)
|| "ALTERTABLE_EXCHANGEPARTITION".equals(commandType);
boolean renameOperation = renameTable || renamePartition;
// When renaming a table, the read entities contain
// source table. When renaming a partition, the read entities
// contain the renamed partition as well as the partition's
// table. For the partition case, filter out the table in
// the read entities.
String renameFromObject = null;
if (renameOperation) {
for (ReadEntity entity : readEntities) {
if (renamePartition && entity.getType() == Entity.Type.TABLE) {
continue;
}
addToObjectsTable(ps, auditLogId, ObjectCategory.RENAME_FROM, entity);
renameFromObject = toIdentifierString(entity);
}
}
for (Entity entity : writeEntities) {
// For rename operations, the source object is also in the
// write entities. For example a rename of `old_table` ->
// `new_table` will have `old_table` in read entities, and
// `old_table` and `new_table` in write entities. Since
// `old_table` is written to the table as a RENAMED_FROM
// entry, we don't also need a OUTPUT entry for `old_table`
if (renameOperation && toIdentifierString(entity).equals(renameFromObject)) {
continue;
}
// Otherwise add it as an output
addToObjectsTable(ps, auditLogId, ObjectCategory.OUTPUT, entity);
// Save the table for the partitions as reference objects
if (entity.getType() == Entity.Type.PARTITION
|| entity.getType() == Entity.Type.DUMMYPARTITION) {
tableForPartition.add(
entity.getPartition().getTable());
}
}
for (org.apache.hadoop.hive.ql.metadata.Table t : tableForPartition) {
// Using DDL_NO_LOCK but the value shouldn't matter
WriteEntity entity = new WriteEntity(t,
WriteEntity.WriteType.DDL_NO_LOCK);
addToObjectsTable(ps, auditLogId,
ObjectCategory.REFERENCE_TABLE, entity);
}
}
}
/**
* Insert the given entity into the objects table using the given {@code ps}.
*
* @param ps the prepared statemtn to use
* @param auditLogId the audit log ID associated with the Hive query for this audit log entry
* @param category the category of the object
* @param entity the entity associated with this query
*
* @throws EntityException if there's an error processing this entity
* @throws SQLException if there's an error inserting into the DB.
*/
private static void addToObjectsTable(
PreparedStatement ps,
long auditLogId,
ObjectCategory category,
Entity entity) throws SQLException, EntityException {
int psIndex = 1;
ps.setLong(psIndex++, auditLogId);
ps.setString(psIndex++, category.toString());
ps.setString(psIndex++, entity.getType().toString());
ps.setString(psIndex++, toIdentifierString(entity));
ps.setString(psIndex, toJson(entity));
ps.executeUpdate();
}
/**
* Convert the given entity into a string that can be used to identify the
* object in the audit log table.
*
* @param entity the entity to convert
* @return a string representing {@code e}
*
* @throws EntityException if there's an error getting the location for the entity
*/
private static String toIdentifierString(Entity entity) throws EntityException {
switch (entity.getType()) {
case DATABASE:
return entity.getDatabase().getName();
case TABLE:
return String.format("%s.%s",
entity.getTable().getDbName(),
entity.getTable().getTableName());
case PARTITION:
case DUMMYPARTITION:
return String.format("%s.%s/%s",
entity.getPartition().getTPartition().getDbName(),
entity.getPartition().getTPartition().getTableName(),
entity.getPartition().getName());
case LOCAL_DIR:
case DFS_DIR:
try {
return entity.getLocation().toString();
} catch (Exception e) {
throw new EntityException(e);
}
default:
throw new EntityException("Unhandled type: "
+ entity.getType() + " entity: " + entity);
}
}
/**
* Converts the object that the entity represents into a JSON string
* @param entity the entity to convert.
*
* @return a JSON representation of {@code e}
* @throws EntityException if there's an error getting the location for the entity
*/
private static String toJson(Entity entity) throws EntityException {
try {
TSerializer serializer = new TSerializer(new TJSONProtocol.Factory());
switch (entity.getType()) {
case DATABASE:
Database db = entity.getDatabase();
return serializer.toString(db);
case TABLE:
Table tableWithLocation = new Table(
entity.getTable().getTTable());
URI dataLocation;
try {
dataLocation = entity.getLocation();
} catch (Exception e) {
throw new EntityException(e);
}
tableWithLocation.getSd().setLocation(
dataLocation == null ? null : dataLocation.toString());
return serializer.toString(entity.getTable().getTTable());
case PARTITION:
case DUMMYPARTITION:
Partition partitionWithLocation = new Partition(
entity.getPartition().getTPartition());
partitionWithLocation.getSd().setLocation(
entity.getPartition().getDataLocation().toString());
return serializer.toString(entity.getPartition().getTPartition());
case LOCAL_DIR:
case DFS_DIR:
try {
return entity.getLocation().toString();
} catch (Exception e) {
throw new EntityException(e);
}
default:
throw new EntityException("Unhandled type: "
+ entity.getType() + " entity: " + entity);
}
} catch (TException e) {
throw new EntityException(e);
}
}
}
| 9,442 |
0 | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive/hooks/SessionStateLite.java | package com.airbnb.reair.hive.hooks;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.MapRedStats;
import org.apache.hadoop.hive.ql.QueryPlan;
import org.apache.hadoop.hive.ql.session.SessionState;
import java.util.HashMap;
import java.util.Map;
/**
* Class to provide a lightweight representation of the Hive session state.
*
* <p>Regrettably the org.apache.hadoop.hive.ql.plan.HiveOperation enum type
* only provides a subset of the operations which are available via Thrift and
* the CLI, hence this class provides a mechanism for including the expanded
* set.</p>
*/
class SessionStateLite {
private String cmd;
private String commandType;
private String queryId;
private HiveConf conf;
private Map<String, MapRedStats> mapRedStats;
/**
* Creates a lightweight representation of the session state.
*
* @param cmd The Hive command either from Thrift or the CLI
* @param commandType The Hive command operation
*/
public SessionStateLite(
String cmd,
HiveOperation commandType,
HiveConf conf
) {
this.cmd = cmd;
this.commandType = commandType.name();
this.conf = new HiveConf(conf);
this.queryId = null;
this.mapRedStats = null;
}
/**
* Creates a lightweight representation of the session state.
*
* @param plan The Hive query plan
*/
public SessionStateLite(QueryPlan plan) {
SessionState sessionState = SessionState.get();
this.conf = new HiveConf(sessionState.getConf());
this.cmd = plan.getQueryStr();
this.commandType = plan.getOperationName();
this.queryId = plan.getQueryId();
this.mapRedStats = new HashMap<>(sessionState.getMapRedStats());
}
/**
* Gets the Hive command either from Thrift or the CLI.
*
* @return The Hive command
*/
String getCmd() {
return cmd;
}
/**
* Gets the command type associated with the session state which is defined
* as the stringified version of the HiveOperation.
*
* @return The command type
*/
String getCommandType() {
return commandType;
}
/**
* Gets the query ID associated with session state. Note this is undefined
* for Thrift hooks.
*
* @return The query ID
*/
String getQueryId() {
return queryId;
}
/**
* Gets the Hive config associated with session state.
*
* @return The Hive config
*/
HiveConf getConf() {
return conf;
}
/**
* Gets the MapReduce associated with session state. Note this is undefined
* for Thrift hooks.
*
* @return The MapReduce statistics
*/
Map<String,MapRedStats> getMapRedStats() {
return mapRedStats;
}
}
| 9,443 |
0 | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive/hooks/BaseLogModule.java | package com.airbnb.reair.hive.hooks;
import org.apache.hadoop.hive.conf.HiveConf;
import java.sql.Connection;
/**
* Base class for audit log modules that write to the database. Audit log modules are run by the
* audit log hook.
*/
public abstract class BaseLogModule {
protected final SessionStateLite sessionStateLite;
// The database connection that audit information to be written via
protected final Connection connection;
// The table that audit information should be rewritten to
protected final String tableName;
/**
* Base constructor for log modules.
*
* @param connection database connection to write the logs to
* @param tableNameKey the config key for the table name to write the logs to
* @param sessionStateLite the session state that contains relevant config
*
* @throws ConfigurationException when the table name is not defined in the configuration
*/
public BaseLogModule(final Connection connection,
final String tableNameKey,
final SessionStateLite sessionStateLite)
throws ConfigurationException {
this.connection = connection;
this.sessionStateLite = sessionStateLite;
// Ensure the table name is set in the config, and fetch it
final HiveConf conf = sessionStateLite.getConf();
tableName = conf.get(tableNameKey);
if (tableName == null) {
throw new ConfigurationException(
String.format("%s is not defined in the conf!", tableNameKey));
}
}
}
| 9,444 |
0 | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive/hooks/MetastoreAuditLogListener.java | package com.airbnb.reair.hive.hooks;
import com.airbnb.reair.db.DbCredentials;
import com.airbnb.reair.utils.RetryableTask;
import com.airbnb.reair.utils.RetryingTaskRunner;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.MetaStoreEventListener;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
import org.apache.hadoop.hive.metastore.events.DropTableEvent;
import org.apache.hadoop.hive.ql.hooks.ReadEntity;
import org.apache.hadoop.hive.ql.hooks.WriteEntity;
import org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.log4j.Logger;
import java.sql.Connection;
import java.sql.DriverManager;
import java.util.HashSet;
import java.util.Set;
/**
* Audit logging for the metastore Thrift server. Comprises of a series of
* event listeners adding auditing to the various Thrift events.
*/
public class MetastoreAuditLogListener extends MetaStoreEventListener {
public static Logger LOG = Logger.getLogger(MetastoreAuditLogListener.class);
// Number of attempts to make.
private static final int NUM_ATTEMPTS = 10;
// Will wait BASE_SLEEP * 2 ^ (attempt no.) between attempts.
private static final int BASE_SLEEP = 1;
public static String DB_USERNAME =
"airbnb.reair.metastore.audit_log.db.username";
public static String DB_PASSWORD =
"airbnb.reair.metastore.audit_log.db.password";
// Keys for values in hive-site.xml.
public static String JDBC_URL_KEY =
"airbnb.reair.metastore.audit_log.jdbc_url";
protected DbCredentials dbCredentials;
/**
* Constructor which defines the relevant DB credentials.
*
* @param config The resource configuration
*/
public MetastoreAuditLogListener(Configuration config) {
super(config);
dbCredentials = new ConfigurationDbCredentials(
getConf(),
DB_USERNAME,
DB_PASSWORD
);
}
/**
* Listener which fires when a table is created.
*
* <p>For auditing purposes the read/write differential is the non-existence
* and existence of the created table respectively.</p>
*
* @param event The create table event
*/
@Override
public void onCreateTable(CreateTableEvent event) throws MetaException {
try {
Set<ReadEntity> readEntities = new HashSet<>();
Set<WriteEntity> writeEntities = new HashSet<>();
writeEntities.add(
new WriteEntity(
new Table(event.getTable()),
WriteType.INSERT
)
);
run(readEntities, writeEntities, HiveOperation.THRIFT_CREATE_TABLE);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Listener which fires when a table is dropped.
*
* <p>For auditing purposes the read/write differential is the existence and
* non-existence of the dropped table respectively.</p>
*
* @param event The drop table event
*/
@Override
public void onDropTable(DropTableEvent event) throws MetaException {
try {
Set<ReadEntity> readEntities = new HashSet<>();
readEntities.add(new ReadEntity(new Table(event.getTable())));
Set<WriteEntity> writeEntities = new HashSet<>();
run(readEntities, writeEntities, HiveOperation.THRIFT_DROP_TABLE);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Listener which fires when a table is altered.
*
* <p>For auditing purposes the read/write differential is the old and new
* table respectively.</p>
*
* @param event The add partition event
*/
@Override
public void onAlterTable(AlterTableEvent event) throws MetaException {
try {
Set<ReadEntity> readEntities = new HashSet<>();
readEntities.add(new ReadEntity(new Table(event.getOldTable())));
Set<WriteEntity> writeEntities = new HashSet<>();
writeEntities.add(
new WriteEntity(
new Table(event.getNewTable()),
WriteType.INSERT
)
);
run(readEntities, writeEntities, HiveOperation.THRIFT_ALTER_TABLE);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Listener which fires when a partition is added.
*
* <p>For auditing purposes the read/write differential is the non-existence
* and existence of the added partition respectively.</p>
*
* @param event The add partition event
*/
@Override
public void onAddPartition(AddPartitionEvent event) throws MetaException {
try {
Table table = new Table(event.getTable());
Set<ReadEntity> readEntities = new HashSet<>();
Set<WriteEntity> writeEntities = new HashSet<>();
for (org.apache.hadoop.hive.metastore.api.Partition partition :
event.getPartitions()) {
writeEntities.add(
new WriteEntity(
new Partition(table, partition),
WriteType.INSERT
)
);
}
run(readEntities, writeEntities, HiveOperation.THRIFT_ADD_PARTITION);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Listener which fires when a partition is dropped.
*
* <p>For auditing purposes the read/write differential is the existence and
* non-existence of the dropped partition respectively.</p>
*
* @param event The drop partition event
*/
@Override
public void onDropPartition(DropPartitionEvent event) throws MetaException {
try {
Set<ReadEntity> readEntities = new HashSet<>();
readEntities.add(
new ReadEntity(
new Partition(new Table(event.getTable()), event.getPartition())
)
);
Set<WriteEntity> writeEntities = new HashSet<>();
run(readEntities, writeEntities, HiveOperation.THRIFT_DROP_PARTITION);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Listener which fires when a partition is altered.
*
* <p>For auditing purposes the read/write differential is the old and new
* partition respectively.</p>
*
* <p>Note that a bug in the AlterPartitionEvent which has been resolved in
* a later version does not provide access to the underlying table associated
* with the partitions, hence it is necessary to fetch it from the metastore.
* </p>
*
* @param event The add partition event
*/
@Override
public void onAlterPartition(AlterPartitionEvent event) throws MetaException {
try {
// Table is invariant and thus an arbitrary choice between old and new.
Table table = new Table(
event.getHandler().get_table(
event.getOldPartition().getDbName(),
event.getOldPartition().getTableName()
)
);
Set<ReadEntity> readEntities = new HashSet<>();
readEntities.add(
new ReadEntity(
new Partition(table, event.getOldPartition())
)
);
Set<WriteEntity> writeEntities = new HashSet<>();
writeEntities.add(
new WriteEntity(
new Partition(table, event.getNewPartition()),
WriteType.INSERT
)
);
run(readEntities, writeEntities, HiveOperation.THRIFT_ALTER_PARTITION);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Listener which fires when a database (schema) is created.
*
* <p>For auditing purposes the read/write differential is the non-existence
* and existence of the created database respectively.</p>
*
* @param event The create database event
*/
@Override
public void onCreateDatabase(CreateDatabaseEvent event) throws MetaException {
try {
Set<ReadEntity> readEntities = new HashSet<>();
Set<WriteEntity> writeEntities = new HashSet<>();
writeEntities.add(new WriteEntity(event.getDatabase(), WriteType.INSERT));
run(readEntities, writeEntities, HiveOperation.THRIFT_CREATE_DATABASE);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Listener which fires when a database (schema) is dropped.
*
* <p>For auditing purposes the read/write differential is the existence and
* non-existence of the dropped database respectively.</p>
*
* @param event The drop database event
*/
@Override
public void onDropDatabase(DropDatabaseEvent event) throws MetaException {
try {
Set<ReadEntity> readEntities = new HashSet<>();
readEntities.add(new ReadEntity(event.getDatabase()));
Set<WriteEntity> writeEntities = new HashSet<>();
run(readEntities, writeEntities, HiveOperation.THRIFT_DROP_DATABASE);
} catch (Exception e) {
throw new MetaException(e.getMessage());
}
}
/**
* Runs the individual metastore audit log modules that make up this hook.
*
* @param readEntities The entities that were read by the query
* @param writeEntities The entities that were written by the query
* @param hiveOperation The Hive operation
* @throws Exception If there is an error running any of the logging modules
*/
private void run(
Set<ReadEntity> readEntities,
Set<WriteEntity> writeEntities,
HiveOperation hiveOperation
) throws Exception {
HiveConf conf = (HiveConf) getConf();
SessionStateLite sessionStateLite = new SessionStateLite(
"THRIFT_API",
hiveOperation,
conf
);
final String jdbcUrl = conf.get(JDBC_URL_KEY);
if (jdbcUrl == null) {
throw new ConfigurationException(
JDBC_URL_KEY + " is not defined in the conf!"
);
}
RetryingTaskRunner runner = new RetryingTaskRunner(
NUM_ATTEMPTS,
BASE_SLEEP
);
long startTime = System.currentTimeMillis();
LOG.debug("Starting insert into metastore audit log");
runner.runWithRetries(new RetryableTask() {
@Override
public void run() throws Exception {
Connection connection = DriverManager.getConnection(
jdbcUrl,
dbCredentials.getReadWriteUsername(),
dbCredentials.getReadWritePassword()
);
connection.setTransactionIsolation(
Connection.TRANSACTION_READ_COMMITTED
);
// Turn off auto commit so that we can ensure that both the
// audit log entry and the output rows appear at the same time.
connection.setAutoCommit(false);
long auditLogId = new AuditCoreLogModule(
connection,
sessionStateLite,
readEntities,
writeEntities,
null
).run();
new ObjectLogModule(
connection,
sessionStateLite,
readEntities,
writeEntities,
auditLogId
).run();
connection.commit();
}
});
LOG.debug(
String.format(
"Applying log modules took %d ms",
System.currentTimeMillis() - startTime
)
);
}
}
| 9,445 |
0 | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive/hooks/SerializationException.java | package com.airbnb.reair.hive.hooks;
/**
* Exception related to serializing data to insert into a DB.
*/
public class SerializationException extends Exception {
public SerializationException(String message) {
super(message);
}
public SerializationException(String message, Throwable cause) {
super(message, cause);
}
public SerializationException(Throwable cause) {
super(cause);
}
}
| 9,446 |
0 | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive/hooks/MapRedStatsLogModule.java | package com.airbnb.reair.hive.hooks;
import com.google.common.annotations.VisibleForTesting;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.hadoop.hive.ql.MapRedStats;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.mapred.Counters;
import org.apache.hadoop.mapred.Counters.Group;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.Map;
/**
* A log module for logging mapreduce stats for each stage of a Hive query.
*/
public class MapRedStatsLogModule extends BaseLogModule {
public static final String TABLE_NAME_KEY =
"airbnb.reair.audit_log.mapred_stats.table_name";
private final long auditLogId;
public MapRedStatsLogModule(final Connection connection,
final SessionStateLite sessionStateLite,
long auditLogId)
throws ConfigurationException {
super(connection, TABLE_NAME_KEY, sessionStateLite);
this.auditLogId = auditLogId;
}
/**
* Runs the log module, writing the relevant audit data to the DB.
*
* @throws SerializationException if there's an error serializing data.
*/
public void run() throws SerializationException, SQLException {
final String query = String.format("INSERT INTO %s ("
+ "audit_log_id, "
+ "stage, "
+ "mappers, "
+ "reducers, "
+ "cpu_time, "
+ "counters) "
+ "VALUES (?, ?, ?, ?, ?, ?)",
tableName);
// Insert a DB row for each Hive stage
Map<String, MapRedStats> statsPerStage = sessionStateLite.getMapRedStats();
for (String stage: statsPerStage.keySet()) {
MapRedStats stats = statsPerStage.get(stage);
PreparedStatement ps = connection.prepareStatement(query);
int psIndex = 1;
ps.setLong(psIndex++, auditLogId);
ps.setString(psIndex++, stage);
ps.setLong(psIndex++, stats.getNumMap());
ps.setLong(psIndex++, stats.getNumReduce());
ps.setLong(psIndex++, stats.getCpuMSec());
ps.setString(psIndex, toJson(stats.getCounters()));
ps.executeUpdate();
}
}
/**
* Converts Hadoop counters to a JSON representation.
*
* @param counters the Hadoop counters to convert
* @return the JSON representation of the given counters
*
* @throws SerializationException if mapping the counters to JSON fails
*/
@VisibleForTesting
static String toJson(Counters counters) throws SerializationException {
ArrayNode countersJsonNode = JsonNodeFactory.instance.arrayNode();
ArrayNode groupsJsonNode = JsonNodeFactory.instance.arrayNode();
for (Group group: counters) {
for (Counters.Counter counter: group) {
ObjectNode counterJsonNode = JsonNodeFactory.instance.objectNode();
counterJsonNode.put("counterName", counter.getName())
.put("value", counter.getValue());
countersJsonNode.add(counterJsonNode);
}
ObjectNode groupJsonNode = JsonNodeFactory.instance.objectNode();
groupJsonNode.put("groupName", group.getDisplayName())
.put("counters", countersJsonNode);
groupsJsonNode.add(groupJsonNode);
}
ObjectMapper mapper = new ObjectMapper();
try {
return mapper.writeValueAsString(groupsJsonNode);
} catch (JsonProcessingException e) {
throw new SerializationException(e);
}
}
}
| 9,447 |
0 | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive/hooks/AuditLogHookUtils.java | package com.airbnb.reair.hive.hooks;
import com.airbnb.reair.db.DbConnectionFactory;
import com.airbnb.reair.db.EmbeddedMySqlDb;
import com.airbnb.reair.db.TestDbCredentials;
import com.airbnb.reair.utils.ReplicationTestUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStore;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
import org.apache.hadoop.hive.ql.MapRedStats;
import org.apache.hadoop.hive.ql.QueryPlan;
import org.apache.hadoop.hive.ql.hooks.HookContext;
import org.apache.hadoop.hive.ql.hooks.ReadEntity;
import org.apache.hadoop.hive.ql.hooks.WriteEntity;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
import org.apache.hadoop.hive.ql.session.SessionState;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class AuditLogHookUtils {
private static final Log LOG = LogFactory.getLog(AuditLogHookUtils.class);
/**
* In the MySQL DB, setup the DB and the tables for the audit log to work
* properly.
* @param connectionFactory a factory for creating connections to the DB that should contain the
* tables
* @param dbName the name of the MySQL DB
* @param auditCoreLogTableName the name of the table containing core audit log data (e.g. query
* string)
* @param objectsTableName the name of the table containing the serialized Thrift objects
* associated with the query.
* @param mapRedStatsTableName the name of the table containing the stats about the map-reduce
* jobs associated with the query
*
* @throws SQLException if there's an error creating the tables on the DB
*/
public static void setupAuditLogTables(
DbConnectionFactory connectionFactory,
String dbName,
String auditCoreLogTableName,
String objectsTableName,
String mapRedStatsTableName) throws SQLException {
// Define the SQL that will do the creation
String createDbSql = String.format("CREATE DATABASE %s", dbName);
String createAuditLogTableSql = String.format(
"CREATE TABLE `%s` ("
+ "`id` bigint(20) NOT NULL AUTO_INCREMENT, "
+ "`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, "
+ "`query_id` varchar(256) DEFAULT NULL,"
+ "`command_type` varchar(64) DEFAULT NULL,"
+ "`command` mediumtext,"
+ "`inputs` mediumtext,"
+ "`outputs` mediumtext,"
+ "`username` varchar(64) DEFAULT NULL,"
+ "`ip` varchar(64) DEFAULT NULL,"
+ "`extras` mediumtext,"
+ "PRIMARY KEY (`id`),"
+ "KEY `create_time_index` (`create_time`)"
+ ") ENGINE=InnoDB", auditCoreLogTableName);
String createObjectsTableSql = String.format(
"CREATE TABLE `%s` ("
+ "`id` bigint(20) NOT NULL AUTO_INCREMENT, "
+ "`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, "
+ "`audit_log_id` bigint(20) NOT NULL, "
+ "`category` varchar(64) DEFAULT NULL, "
+ "`type` varchar(64) DEFAULT NULL, "
+ "`name` varchar(4000) DEFAULT NULL, "
+ "`serialized_object` mediumtext, "
+ "PRIMARY KEY (`id`), "
+ "KEY `create_time_index` (`create_time`) "
+ ") ENGINE=InnoDB", objectsTableName);
String createMapRedStatsTableSql = String.format(
"CREATE TABLE `%s` ("
+ "`id` bigint(20) NOT NULL AUTO_INCREMENT, "
+ "`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, "
+ "`audit_log_id` bigint(20) NOT NULL, "
+ "`stage` varchar(256) NOT NULL, "
+ "`mappers` bigint(20) NOT NULL, "
+ "`reducers` bigint(20) NOT NULL, "
+ "`cpu_time` bigint(20) NOT NULL, "
+ "`counters` text(240000) DEFAULT NULL, "
+ "PRIMARY KEY (`id`), "
+ "KEY `create_time_index` (`create_time`) ,"
+ "KEY `audit_log_id_index` (`audit_log_id`) "
+ ") ENGINE=InnoDB", mapRedStatsTableName);
// Create the database
Connection connection = connectionFactory.getConnection();
Statement statement = connection.createStatement();
// Create the tables
try {
statement.execute(createDbSql);
connection.setCatalog(dbName);
statement = connection.createStatement();
statement.execute(createAuditLogTableSql);
statement.execute(createObjectsTableSql);
statement.execute(createMapRedStatsTableSql);
} finally {
statement.close();
connection.close();
}
}
/**
* Insert an audit log entry that represent a query with the supplied values.
*
* @param cliAuditLogHook the CLI audit log hook to use
* @param operation the type of Hive operation (e.g. ALTER TABLE, QUERY, etc)
* @param command the command / query string that was run
* @param inputTables the tables that were read by the query
* @param inputPartitions the partitions that were read by the query
* @param outputTables the tables that were modified by the query
* @param outputPartitions the partitions that were modified by the query
* @param mapRedStatsPerStage map between the name of the stage and map-reduce job statistics
* @param hiveConf Hive configuration
*
* @throws Exception if there's an error inserting into the audit log
*/
public static void insertAuditLogEntry(
CliAuditLogHook cliAuditLogHook,
HiveOperation operation,
String command,
List<Table> inputTables,
List<org.apache.hadoop.hive.ql.metadata.Partition> inputPartitions,
List<Table> outputTables,
List<org.apache.hadoop.hive.ql.metadata.Partition> outputPartitions,
Map<String, MapRedStats> mapRedStatsPerStage,
HiveConf hiveConf) throws Exception {
Set<ReadEntity> readEntities = new HashSet<>();
Set<WriteEntity> writeEntities = new HashSet<>();
for (Table t : inputTables) {
readEntities.add(new ReadEntity(t));
}
for (org.apache.hadoop.hive.ql.metadata.Partition p : inputPartitions) {
readEntities.add(new ReadEntity(p));
}
for (Table t : outputTables) {
writeEntities.add(new WriteEntity(t, WriteEntity.WriteType.DDL_NO_LOCK));
}
for (org.apache.hadoop.hive.ql.metadata.Partition p : outputPartitions) {
writeEntities.add(new WriteEntity(p, WriteEntity.WriteType.DDL_NO_LOCK));
}
SessionState sessionState = new SessionState(hiveConf);
// Map the HiveOperation to the ...ql.plan.HiveOperation when possible.
// ALTERTABLE_EXCHANGEPARTITION may be the only one that can't be mapped.
org.apache.hadoop.hive.ql.plan.HiveOperation commandType = null;
if (operation != null) {
for (org.apache.hadoop.hive.ql.plan.HiveOperation op :
org.apache.hadoop.hive.ql.plan.HiveOperation.values()) {
if (op.toString().equals(operation.toString())) {
commandType = op;
}
}
if (commandType == null) {
LOG.warn(String.format("Could not find corresponding enum for %s in %s",
operation.toString(),
org.apache.hadoop.hive.ql.plan.HiveOperation.class.getName()));
}
}
sessionState.setMapRedStats(mapRedStatsPerStage);
SessionState.setCurrentSessionState(sessionState);
// Run the hook
SemanticAnalyzer semanticAnalyzer = new SemanticAnalyzer(hiveConf);
QueryPlan queryPlan = new QueryPlan(
command,
semanticAnalyzer,
null,
commandType != null ? commandType.getOperationName() : null
);
HookContext hookContext = new HookContext(queryPlan, null);
hookContext.setInputs(readEntities);
hookContext.setOutputs(writeEntities);
hookContext.setConf(hiveConf);
cliAuditLogHook.run(hookContext);
}
/**
* Insert a thrift audit log entry that represents renaming a table.
*
* @param oldTable the source table
* @param newTable the table renamed to
* @param hiveConf Hive configuration
* @throws Exception if there's an error inserting into the audit log
*/
public static void insertThriftRenameTableLogEntry(
org.apache.hadoop.hive.metastore.api.Table oldTable,
org.apache.hadoop.hive.metastore.api.Table newTable,
HiveConf hiveConf) throws Exception {
final MetastoreAuditLogListener metastoreAuditLogListener =
new MetastoreAuditLogListener(hiveConf);
AlterTableEvent event = new AlterTableEvent(
oldTable,
newTable,
true,
null
);
metastoreAuditLogListener.onAlterTable(event);
}
/**
* Insert a thrift audit log entry that represents renaming a partition.
*
* @param hmsHandler the HMSHandler for the event
* @param oldPartition the old partition name
* @param newPartition the new partition name
* @param hiveConf Hive configuration
* @throws Exception if there's an error inserting into the audit log
*/
public static void insertThriftRenamePartitionLogEntry(
HiveMetaStore.HMSHandler hmsHandler,
Partition oldPartition,
Partition newPartition,
HiveConf hiveConf) throws Exception {
final MetastoreAuditLogListener metastoreAuditLogListener =
new MetastoreAuditLogListener(hiveConf);
AlterPartitionEvent event = new AlterPartitionEvent(
oldPartition,
newPartition,
true,
hmsHandler
);
metastoreAuditLogListener.onAlterPartition(event);
}
/**
* Get a hive conf filled with config values.
*
* @param mySqlDb the database hive should use to write the audit log to
* @param dbName the name of the database to be used
* @param auditCoreLogTableName the table name for the core audit log
* @param outputObjectsTableName the table name for the output objects
* @param mapRedStatsTableName the table name for the map-reduce stats
* @return the hive configuration with the config values set
*/
public static HiveConf getHiveConf(
EmbeddedMySqlDb mySqlDb,
String dbName,
String auditCoreLogTableName,
String outputObjectsTableName,
String mapRedStatsTableName) {
HiveConf hiveConf = new HiveConf();
hiveConf.set(CliAuditLogHook.JDBC_URL_KEY,
ReplicationTestUtils.getJdbcUrl(mySqlDb, dbName));
hiveConf.set(AuditCoreLogModule.TABLE_NAME_KEY, auditCoreLogTableName);
hiveConf.set(ObjectLogModule.TABLE_NAME_KEY, outputObjectsTableName);
hiveConf.set(MapRedStatsLogModule.TABLE_NAME_KEY, mapRedStatsTableName);
return hiveConf;
}
/**
* Get a hive conf for the metastore.
*
* @param mySqlDb the database hive should use to write the audit log to
* @param dbName the name of the database to be used
* @param auditCoreLogTableName the table name for the core audit log
* @param outputObjectsTableName the table name for the output objects
* @return the hive configuration with the config values set
*/
public static HiveConf getMetastoreHiveConf(
EmbeddedMySqlDb mySqlDb,
String dbName,
String auditCoreLogTableName,
String outputObjectsTableName) {
final TestDbCredentials testDbCredentials = new TestDbCredentials();
HiveConf hiveConf = new HiveConf();
hiveConf.set(
MetastoreAuditLogListener.JDBC_URL_KEY,
ReplicationTestUtils.getJdbcUrl(mySqlDb, dbName)
);
hiveConf.set(AuditCoreLogModule.TABLE_NAME_KEY, auditCoreLogTableName);
hiveConf.set(ObjectLogModule.TABLE_NAME_KEY, outputObjectsTableName);
hiveConf.set(
MetastoreAuditLogListener.DB_USERNAME,
testDbCredentials.getReadWriteUsername()
);
hiveConf.set(
MetastoreAuditLogListener.DB_PASSWORD,
testDbCredentials.getReadWritePassword()
);
return hiveConf;
}
}
| 9,448 |
0 | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive/hooks/EntityException.java | package com.airbnb.reair.hive.hooks;
/**
* Exception thrown when there is an error when processing an Entity.
*/
public class EntityException extends Exception {
public EntityException(Throwable cause) {
super(cause);
}
public EntityException(String message) {
super(message);
}
public EntityException(String message, Throwable cause) {
super(message, cause);
}
}
| 9,449 |
0 | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive/hooks/CliAuditLogHook.java | package com.airbnb.reair.hive.hooks;
import com.airbnb.reair.db.DbCredentials;
import com.airbnb.reair.utils.RetryableTask;
import com.airbnb.reair.utils.RetryingTaskRunner;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext;
import org.apache.hadoop.hive.ql.hooks.HookContext;
import org.apache.hadoop.hive.ql.hooks.ReadEntity;
import org.apache.hadoop.hive.ql.hooks.WriteEntity;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.log4j.Logger;
import java.sql.Connection;
import java.sql.DriverManager;
import java.util.Set;
/**
* A post-execute hook that writes information about successfully executed
* queries into a MySQL DB. Every successful query generates an entry in an
* audit log table. In addition, every output object for a successful query
* generates entries in the output objects table, and the map reduce stats
* tables.
*/
public class CliAuditLogHook implements ExecuteWithHookContext {
public static Logger LOG = Logger.getLogger(CliAuditLogHook.class);
// Number of attempts to make
private static final int NUM_ATTEMPTS = 10;
// Will wait BASE_SLEEP * 2 ^ (attempt no.) between attempts
private static final int BASE_SLEEP = 1;
public static String DB_USERNAME =
"airbnb.reair.audit_log.db.username";
public static String DB_PASSWORD =
"airbnb.reair.audit_log.db.password";
// Keys for values in hive-site.xml
public static String JDBC_URL_KEY = "airbnb.reair.audit_log.jdbc_url";
protected DbCredentials dbCreds;
public CliAuditLogHook() {
}
// Constructor used for testing
public CliAuditLogHook(DbCredentials dbCreds) {
this.dbCreds = dbCreds;
}
protected DbCredentials getDbCreds(Configuration conf) {
if (dbCreds == null) {
dbCreds = new ConfigurationDbCredentials(conf, DB_USERNAME, DB_PASSWORD);
}
return dbCreds;
}
@Override
public void run(HookContext hookContext) throws Exception {
Set<ReadEntity> inputs = hookContext.getInputs();
Set<WriteEntity> outputs = hookContext.getOutputs();
UserGroupInformation ugi = hookContext.getUgi();
run(hookContext, inputs, outputs, ugi);
}
/**
*
* @param hookContext
* The hook context passed to each hooks.
* @throws Exception if there's an error
*/
public void run(final HookContext hookContext,
final Set<ReadEntity> readEntities,
final Set<WriteEntity> writeEntities,
final UserGroupInformation userGroupInformation)
throws Exception {
HiveConf conf = hookContext.getConf();
SessionStateLite sessionStateLite = new SessionStateLite(hookContext.getQueryPlan());
final DbCredentials dbCreds = getDbCreds(conf);
final String jdbcUrl = conf.get(JDBC_URL_KEY);
if (jdbcUrl == null) {
throw new ConfigurationException(JDBC_URL_KEY
+ " is not defined in the conf!");
}
RetryingTaskRunner runner = new RetryingTaskRunner(NUM_ATTEMPTS,
BASE_SLEEP);
long startTime = System.currentTimeMillis();
LOG.debug("Starting insert into audit log");
runner.runWithRetries(new RetryableTask() {
@Override
public void run() throws Exception {
Connection connection = DriverManager.getConnection(jdbcUrl,
dbCreds.getReadWriteUsername(),
dbCreds.getReadWritePassword());
connection.setTransactionIsolation(
Connection.TRANSACTION_READ_COMMITTED);
// Turn off auto commit so that we can ensure that both the
// audit log entry and the output rows appear at the same time.
connection.setAutoCommit(false);
runLogModules(
connection,
sessionStateLite,
readEntities,
writeEntities,
userGroupInformation);
connection.commit();
}
});
LOG.debug(String.format("Applying log modules took %d ms",
System.currentTimeMillis() - startTime));
}
/**
* Runs the individual audit log modules that make up this hook.
*
* @param connection connection to the DB for inserting data
* @param sessionStateLite the session state that contains relevant config
* @param readEntities the entities that were read by the query
* @param writeEntities the entities that were written by the query
* @param userGroupInformation information about the user that ran the query
* @return the id column in sql for the core audit log entry for the query
*
* @throws Exception if there's an error running the modules
*/
protected long runLogModules(final Connection connection,
final SessionStateLite sessionStateLite,
final Set<ReadEntity> readEntities,
final Set<WriteEntity> writeEntities,
final UserGroupInformation userGroupInformation)
throws Exception {
long auditLogId = new AuditCoreLogModule(
connection,
sessionStateLite,
readEntities,
writeEntities,
userGroupInformation).run();
new ObjectLogModule(
connection,
sessionStateLite,
readEntities,
writeEntities,
auditLogId).run();
new MapRedStatsLogModule(
connection,
sessionStateLite,
auditLogId).run();
return auditLogId;
}
}
| 9,450 |
0 | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive | Create_ds/reair/hive-hooks/src/main/java/com/airbnb/reair/hive/hooks/HiveOperation.java | package com.airbnb.reair.hive.hooks;
/**
* TODO: Migrate to HiveOperation from hive-exec once exchange partition types are added.
*/
public enum HiveOperation {
ALTERDATABASE,
ALTERINDEX_PROPS,
ALTERINDEX_REBUILD,
ALTERPARTITION_BUCKETNUM,
ALTERPARTITION_FILEFORMAT,
ALTERPARTITION_LOCATION,
ALTERPARTITION_MERGEFILES,
ALTERPARTITION_PROTECTMODE,
ALTERPARTITION_SERDEPROPERTIES,
ALTERPARTITION_SERIALIZER,
ALTERTABLE_ADDCOLS,
ALTERTABLE_ADDPARTS,
ALTERTABLE_ARCHIVE,
ALTERTABLE_BUCKETNUM,
ALTERTABLE_CLUSTER_SORT,
ALTERTABLE_DROPPARTS,
ALTERTABLE_EXCHANGEPARTITION,
ALTERTABLE_FILEFORMAT,
ALTERTABLE_LOCATION,
ALTERTABLE_MERGEFILES,
ALTERTABLE_PROPERTIES,
ALTERTABLE_PROTECTMODE,
ALTERTABLE_RENAME,
ALTERTABLE_RENAMECOL,
ALTERTABLE_RENAMEPART,
ALTERTABLE_REPLACECOLS,
ALTERTABLE_SERDEPROPERTIES,
ALTERTABLE_SERIALIZER,
ALTERTABLE_SKEWED,
ALTERTABLE_TOUCH,
ALTERTABLE_UNARCHIVE,
ALTERTBLPART_SKEWED_LOCATION,
ALTERVIEW_PROPERTIES,
ALTERVIEW_RENAME,
ANALYZE_TABLE,
CREATEDATABASE,
CREATEFUNCTION,
CREATEINDEX,
CREATEMACRO,
CREATEROLE,
CREATETABLE,
CREATETABLE_AS_SELECT,
CREATEVIEW,
DESCDATABASE,
DESCFUNCTION,
DESCTABLE,
DROPDATABASE,
DROPFUNCTION,
DROPINDEX,
DROPMACRO,
DROPROLE,
DROPTABLE,
DROPVIEW,
DROPVIEW_PROPERTIES,
EXPLAIN,
EXPORT,
GRANT_PRIVILEGE,
GRANT_ROLE,
IMPORT,
LOAD,
LOCKTABLE,
MSCK,
QUERY,
REVOKE_PRIVILEGE,
REVOKE_ROLE,
SHOW_CREATETABLE,
SHOW_TABLESTATUS,
SHOW_TBLPROPERTIES,
SHOW_GRANT,
SHOW_ROLES,
SHOW_ROLE_GRANT,
SHOWDATABASES,
SHOWCOLUMNS,
SHOWFUNCTIONS,
SHOWINDEXES,
SHOWLOCKS,
SHOWPARTITIONS,
SHOWTABLES,
SWITCHDATABASE,
THRIFT_ADD_PARTITION,
THRIFT_ALTER_PARTITION,
THRIFT_ALTER_TABLE,
THRIFT_CREATE_DATABASE,
THRIFT_CREATE_TABLE,
THRIFT_DROP_DATABASE,
THRIFT_DROP_PARTITION,
THRIFT_DROP_TABLE,
TRUNCATETABLE,
UNLOCKTABLE;
/**
* Checks to see if the given operation is an operation that was coming from the Hive metastore
* Thrift server.
*
* @param operation the type of operation.
* @return if the given operation is from the Thrift server
*/
public static boolean isThriftOperation(HiveOperation operation) {
if (operation == null) {
return false;
}
switch (operation) {
case THRIFT_ADD_PARTITION:
case THRIFT_ALTER_PARTITION:
case THRIFT_ALTER_TABLE:
case THRIFT_CREATE_DATABASE:
case THRIFT_CREATE_TABLE:
case THRIFT_DROP_DATABASE:
case THRIFT_DROP_PARTITION:
case THRIFT_DROP_TABLE:
return true;
default:
return false;
}
}
}
| 9,451 |
0 | Create_ds/reair/main/thrift/gen-java/com/airbnb/di/hive/replication | Create_ds/reair/main/thrift/gen-java/com/airbnb/di/hive/replication/thrift/TReplicationOperation.java | /**
* Autogenerated by Thrift Compiler (0.9.1)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
package com.airbnb.di.hive.replication.thrift;
import org.apache.thrift.TEnum;
public enum TReplicationOperation implements org.apache.thrift.TEnum {
COPY_UNPARTITIONED_TABLE(0),
COPY_PARTITIONED_TABLE(1),
COPY_PARTITION(2),
COPY_PARTITIONS(3),
DROP_TABLE(4),
DROP_PARTITION(5),
RENAME_TABLE(6),
RENAME_PARTITION(7);
private final int value;
private TReplicationOperation(int value) {
this.value = value;
}
/**
* Get the integer value of this enum value, as defined in the Thrift IDL.
*/
public int getValue() {
return value;
}
/**
* Find a the enum type by its integer value, as defined in the Thrift IDL.
* @return null if the value is not found.
*/
public static TReplicationOperation findByValue(int value) {
switch (value) {
case 0:
return COPY_UNPARTITIONED_TABLE;
case 1:
return COPY_PARTITIONED_TABLE;
case 2:
return COPY_PARTITION;
case 3:
return COPY_PARTITIONS;
case 4:
return DROP_TABLE;
case 5:
return DROP_PARTITION;
case 6:
return RENAME_TABLE;
case 7:
return RENAME_PARTITION;
default:
return null;
}
}
}
| 9,452 |
0 | Create_ds/reair/main/thrift/gen-java/com/airbnb/di/hive/replication | Create_ds/reair/main/thrift/gen-java/com/airbnb/di/hive/replication/thrift/TReplicationStatus.java | /**
* Autogenerated by Thrift Compiler (0.9.1)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
package com.airbnb.di.hive.replication.thrift;
import org.apache.thrift.TEnum;
public enum TReplicationStatus implements org.apache.thrift.TEnum {
PENDING(0),
RUNNING(1),
SUCCESSFUL(2),
FAILED(3),
NOT_COMPLETABLE(4);
private final int value;
private TReplicationStatus(int value) {
this.value = value;
}
/**
* Get the integer value of this enum value, as defined in the Thrift IDL.
*/
public int getValue() {
return value;
}
/**
* Find a the enum type by its integer value, as defined in the Thrift IDL.
* @return null if the value is not found.
*/
public static TReplicationStatus findByValue(int value) {
switch (value) {
case 0:
return PENDING;
case 1:
return RUNNING;
case 2:
return SUCCESSFUL;
case 3:
return FAILED;
case 4:
return NOT_COMPLETABLE;
default:
return null;
}
}
}
| 9,453 |
0 | Create_ds/reair/main/thrift/gen-java/com/airbnb/di/hive/replication | Create_ds/reair/main/thrift/gen-java/com/airbnb/di/hive/replication/thrift/TReplicationService.java | /**
* Autogenerated by Thrift Compiler (0.9.1)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
package com.airbnb.di.hive.replication.thrift;
import org.apache.thrift.EncodingUtils;
import org.apache.thrift.TException;
import org.apache.thrift.async.AsyncMethodCallback;
import org.apache.thrift.protocol.TTupleProtocol;
import org.apache.thrift.scheme.IScheme;
import org.apache.thrift.scheme.SchemeFactory;
import org.apache.thrift.scheme.StandardScheme;
import org.apache.thrift.scheme.TupleScheme;
import org.apache.thrift.server.AbstractNonblockingServer.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.Collections;
import java.util.EnumMap;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class TReplicationService {
public interface Iface {
public List<TReplicationJob> getActiveJobs(long afterId, int maxJobs) throws org.apache.thrift.TException;
public List<TReplicationJob> getRetiredJobs(long afterId, int maxJobs) throws org.apache.thrift.TException;
public Map<Long,TReplicationJob> getJobs(List<Long> ids) throws org.apache.thrift.TException;
public void pause() throws org.apache.thrift.TException;
public void resume() throws org.apache.thrift.TException;
public long getLag() throws org.apache.thrift.TException;
}
public interface AsyncIface {
public void getActiveJobs(long afterId, int maxJobs, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
public void getRetiredJobs(long afterId, int maxJobs, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
public void getJobs(List<Long> ids, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
public void pause(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
public void resume(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
public void getLag(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
}
public static class Client extends org.apache.thrift.TServiceClient implements Iface {
public static class Factory implements org.apache.thrift.TServiceClientFactory<Client> {
public Factory() {}
public Client getClient(org.apache.thrift.protocol.TProtocol prot) {
return new Client(prot);
}
public Client getClient(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) {
return new Client(iprot, oprot);
}
}
public Client(org.apache.thrift.protocol.TProtocol prot)
{
super(prot, prot);
}
public Client(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) {
super(iprot, oprot);
}
public List<TReplicationJob> getActiveJobs(long afterId, int maxJobs) throws org.apache.thrift.TException
{
send_getActiveJobs(afterId, maxJobs);
return recv_getActiveJobs();
}
public void send_getActiveJobs(long afterId, int maxJobs) throws org.apache.thrift.TException
{
getActiveJobs_args args = new getActiveJobs_args();
args.setAfterId(afterId);
args.setMaxJobs(maxJobs);
sendBase("getActiveJobs", args);
}
public List<TReplicationJob> recv_getActiveJobs() throws org.apache.thrift.TException
{
getActiveJobs_result result = new getActiveJobs_result();
receiveBase(result, "getActiveJobs");
if (result.isSetSuccess()) {
return result.success;
}
throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getActiveJobs failed: unknown result");
}
public List<TReplicationJob> getRetiredJobs(long afterId, int maxJobs) throws org.apache.thrift.TException
{
send_getRetiredJobs(afterId, maxJobs);
return recv_getRetiredJobs();
}
public void send_getRetiredJobs(long afterId, int maxJobs) throws org.apache.thrift.TException
{
getRetiredJobs_args args = new getRetiredJobs_args();
args.setAfterId(afterId);
args.setMaxJobs(maxJobs);
sendBase("getRetiredJobs", args);
}
public List<TReplicationJob> recv_getRetiredJobs() throws org.apache.thrift.TException
{
getRetiredJobs_result result = new getRetiredJobs_result();
receiveBase(result, "getRetiredJobs");
if (result.isSetSuccess()) {
return result.success;
}
throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getRetiredJobs failed: unknown result");
}
public Map<Long,TReplicationJob> getJobs(List<Long> ids) throws org.apache.thrift.TException
{
send_getJobs(ids);
return recv_getJobs();
}
public void send_getJobs(List<Long> ids) throws org.apache.thrift.TException
{
getJobs_args args = new getJobs_args();
args.setIds(ids);
sendBase("getJobs", args);
}
public Map<Long,TReplicationJob> recv_getJobs() throws org.apache.thrift.TException
{
getJobs_result result = new getJobs_result();
receiveBase(result, "getJobs");
if (result.isSetSuccess()) {
return result.success;
}
throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getJobs failed: unknown result");
}
public void pause() throws org.apache.thrift.TException
{
send_pause();
recv_pause();
}
public void send_pause() throws org.apache.thrift.TException
{
pause_args args = new pause_args();
sendBase("pause", args);
}
public void recv_pause() throws org.apache.thrift.TException
{
pause_result result = new pause_result();
receiveBase(result, "pause");
return;
}
public void resume() throws org.apache.thrift.TException
{
send_resume();
recv_resume();
}
public void send_resume() throws org.apache.thrift.TException
{
resume_args args = new resume_args();
sendBase("resume", args);
}
public void recv_resume() throws org.apache.thrift.TException
{
resume_result result = new resume_result();
receiveBase(result, "resume");
return;
}
public long getLag() throws org.apache.thrift.TException
{
send_getLag();
return recv_getLag();
}
public void send_getLag() throws org.apache.thrift.TException
{
getLag_args args = new getLag_args();
sendBase("getLag", args);
}
public long recv_getLag() throws org.apache.thrift.TException
{
getLag_result result = new getLag_result();
receiveBase(result, "getLag");
if (result.isSetSuccess()) {
return result.success;
}
throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getLag failed: unknown result");
}
}
public static class AsyncClient extends org.apache.thrift.async.TAsyncClient implements AsyncIface {
public static class Factory implements org.apache.thrift.async.TAsyncClientFactory<AsyncClient> {
private org.apache.thrift.async.TAsyncClientManager clientManager;
private org.apache.thrift.protocol.TProtocolFactory protocolFactory;
public Factory(org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.protocol.TProtocolFactory protocolFactory) {
this.clientManager = clientManager;
this.protocolFactory = protocolFactory;
}
public AsyncClient getAsyncClient(org.apache.thrift.transport.TNonblockingTransport transport) {
return new AsyncClient(protocolFactory, clientManager, transport);
}
}
public AsyncClient(org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.transport.TNonblockingTransport transport) {
super(protocolFactory, clientManager, transport);
}
public void getActiveJobs(long afterId, int maxJobs, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
checkReady();
getActiveJobs_call method_call = new getActiveJobs_call(afterId, maxJobs, resultHandler, this, ___protocolFactory, ___transport);
this.___currentMethod = method_call;
___manager.call(method_call);
}
public static class getActiveJobs_call extends org.apache.thrift.async.TAsyncMethodCall {
private long afterId;
private int maxJobs;
public getActiveJobs_call(long afterId, int maxJobs, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
super(client, protocolFactory, transport, resultHandler, false);
this.afterId = afterId;
this.maxJobs = maxJobs;
}
public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getActiveJobs", org.apache.thrift.protocol.TMessageType.CALL, 0));
getActiveJobs_args args = new getActiveJobs_args();
args.setAfterId(afterId);
args.setMaxJobs(maxJobs);
args.write(prot);
prot.writeMessageEnd();
}
public List<TReplicationJob> getResult() throws org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
return (new Client(prot)).recv_getActiveJobs();
}
}
public void getRetiredJobs(long afterId, int maxJobs, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
checkReady();
getRetiredJobs_call method_call = new getRetiredJobs_call(afterId, maxJobs, resultHandler, this, ___protocolFactory, ___transport);
this.___currentMethod = method_call;
___manager.call(method_call);
}
public static class getRetiredJobs_call extends org.apache.thrift.async.TAsyncMethodCall {
private long afterId;
private int maxJobs;
public getRetiredJobs_call(long afterId, int maxJobs, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
super(client, protocolFactory, transport, resultHandler, false);
this.afterId = afterId;
this.maxJobs = maxJobs;
}
public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getRetiredJobs", org.apache.thrift.protocol.TMessageType.CALL, 0));
getRetiredJobs_args args = new getRetiredJobs_args();
args.setAfterId(afterId);
args.setMaxJobs(maxJobs);
args.write(prot);
prot.writeMessageEnd();
}
public List<TReplicationJob> getResult() throws org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
return (new Client(prot)).recv_getRetiredJobs();
}
}
public void getJobs(List<Long> ids, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
checkReady();
getJobs_call method_call = new getJobs_call(ids, resultHandler, this, ___protocolFactory, ___transport);
this.___currentMethod = method_call;
___manager.call(method_call);
}
public static class getJobs_call extends org.apache.thrift.async.TAsyncMethodCall {
private List<Long> ids;
public getJobs_call(List<Long> ids, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
super(client, protocolFactory, transport, resultHandler, false);
this.ids = ids;
}
public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getJobs", org.apache.thrift.protocol.TMessageType.CALL, 0));
getJobs_args args = new getJobs_args();
args.setIds(ids);
args.write(prot);
prot.writeMessageEnd();
}
public Map<Long,TReplicationJob> getResult() throws org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
return (new Client(prot)).recv_getJobs();
}
}
public void pause(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
checkReady();
pause_call method_call = new pause_call(resultHandler, this, ___protocolFactory, ___transport);
this.___currentMethod = method_call;
___manager.call(method_call);
}
public static class pause_call extends org.apache.thrift.async.TAsyncMethodCall {
public pause_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
super(client, protocolFactory, transport, resultHandler, false);
}
public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("pause", org.apache.thrift.protocol.TMessageType.CALL, 0));
pause_args args = new pause_args();
args.write(prot);
prot.writeMessageEnd();
}
public void getResult() throws org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
(new Client(prot)).recv_pause();
}
}
public void resume(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
checkReady();
resume_call method_call = new resume_call(resultHandler, this, ___protocolFactory, ___transport);
this.___currentMethod = method_call;
___manager.call(method_call);
}
public static class resume_call extends org.apache.thrift.async.TAsyncMethodCall {
public resume_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
super(client, protocolFactory, transport, resultHandler, false);
}
public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("resume", org.apache.thrift.protocol.TMessageType.CALL, 0));
resume_args args = new resume_args();
args.write(prot);
prot.writeMessageEnd();
}
public void getResult() throws org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
(new Client(prot)).recv_resume();
}
}
public void getLag(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
checkReady();
getLag_call method_call = new getLag_call(resultHandler, this, ___protocolFactory, ___transport);
this.___currentMethod = method_call;
___manager.call(method_call);
}
public static class getLag_call extends org.apache.thrift.async.TAsyncMethodCall {
public getLag_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
super(client, protocolFactory, transport, resultHandler, false);
}
public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getLag", org.apache.thrift.protocol.TMessageType.CALL, 0));
getLag_args args = new getLag_args();
args.write(prot);
prot.writeMessageEnd();
}
public long getResult() throws org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
return (new Client(prot)).recv_getLag();
}
}
}
public static class Processor<I extends Iface> extends org.apache.thrift.TBaseProcessor<I> implements org.apache.thrift.TProcessor {
private static final Logger LOGGER = LoggerFactory.getLogger(Processor.class.getName());
public Processor(I iface) {
super(iface, getProcessMap(new HashMap<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>>()));
}
protected Processor(I iface, Map<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>> processMap) {
super(iface, getProcessMap(processMap));
}
private static <I extends Iface> Map<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>> getProcessMap(Map<String, org.apache.thrift.ProcessFunction<I, ? extends org.apache.thrift.TBase>> processMap) {
processMap.put("getActiveJobs", new getActiveJobs());
processMap.put("getRetiredJobs", new getRetiredJobs());
processMap.put("getJobs", new getJobs());
processMap.put("pause", new pause());
processMap.put("resume", new resume());
processMap.put("getLag", new getLag());
return processMap;
}
public static class getActiveJobs<I extends Iface> extends org.apache.thrift.ProcessFunction<I, getActiveJobs_args> {
public getActiveJobs() {
super("getActiveJobs");
}
public getActiveJobs_args getEmptyArgsInstance() {
return new getActiveJobs_args();
}
protected boolean isOneway() {
return false;
}
public getActiveJobs_result getResult(I iface, getActiveJobs_args args) throws org.apache.thrift.TException {
getActiveJobs_result result = new getActiveJobs_result();
result.success = iface.getActiveJobs(args.afterId, args.maxJobs);
return result;
}
}
public static class getRetiredJobs<I extends Iface> extends org.apache.thrift.ProcessFunction<I, getRetiredJobs_args> {
public getRetiredJobs() {
super("getRetiredJobs");
}
public getRetiredJobs_args getEmptyArgsInstance() {
return new getRetiredJobs_args();
}
protected boolean isOneway() {
return false;
}
public getRetiredJobs_result getResult(I iface, getRetiredJobs_args args) throws org.apache.thrift.TException {
getRetiredJobs_result result = new getRetiredJobs_result();
result.success = iface.getRetiredJobs(args.afterId, args.maxJobs);
return result;
}
}
public static class getJobs<I extends Iface> extends org.apache.thrift.ProcessFunction<I, getJobs_args> {
public getJobs() {
super("getJobs");
}
public getJobs_args getEmptyArgsInstance() {
return new getJobs_args();
}
protected boolean isOneway() {
return false;
}
public getJobs_result getResult(I iface, getJobs_args args) throws org.apache.thrift.TException {
getJobs_result result = new getJobs_result();
result.success = iface.getJobs(args.ids);
return result;
}
}
public static class pause<I extends Iface> extends org.apache.thrift.ProcessFunction<I, pause_args> {
public pause() {
super("pause");
}
public pause_args getEmptyArgsInstance() {
return new pause_args();
}
protected boolean isOneway() {
return false;
}
public pause_result getResult(I iface, pause_args args) throws org.apache.thrift.TException {
pause_result result = new pause_result();
iface.pause();
return result;
}
}
public static class resume<I extends Iface> extends org.apache.thrift.ProcessFunction<I, resume_args> {
public resume() {
super("resume");
}
public resume_args getEmptyArgsInstance() {
return new resume_args();
}
protected boolean isOneway() {
return false;
}
public resume_result getResult(I iface, resume_args args) throws org.apache.thrift.TException {
resume_result result = new resume_result();
iface.resume();
return result;
}
}
public static class getLag<I extends Iface> extends org.apache.thrift.ProcessFunction<I, getLag_args> {
public getLag() {
super("getLag");
}
public getLag_args getEmptyArgsInstance() {
return new getLag_args();
}
protected boolean isOneway() {
return false;
}
public getLag_result getResult(I iface, getLag_args args) throws org.apache.thrift.TException {
getLag_result result = new getLag_result();
result.success = iface.getLag();
result.setSuccessIsSet(true);
return result;
}
}
}
public static class AsyncProcessor<I extends AsyncIface> extends org.apache.thrift.TBaseAsyncProcessor<I> {
private static final Logger LOGGER = LoggerFactory.getLogger(AsyncProcessor.class.getName());
public AsyncProcessor(I iface) {
super(iface, getProcessMap(new HashMap<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase, ?>>()));
}
protected AsyncProcessor(I iface, Map<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase, ?>> processMap) {
super(iface, getProcessMap(processMap));
}
private static <I extends AsyncIface> Map<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase,?>> getProcessMap(Map<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase, ?>> processMap) {
processMap.put("getActiveJobs", new getActiveJobs());
processMap.put("getRetiredJobs", new getRetiredJobs());
processMap.put("getJobs", new getJobs());
processMap.put("pause", new pause());
processMap.put("resume", new resume());
processMap.put("getLag", new getLag());
return processMap;
}
public static class getActiveJobs<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, getActiveJobs_args, List<TReplicationJob>> {
public getActiveJobs() {
super("getActiveJobs");
}
public getActiveJobs_args getEmptyArgsInstance() {
return new getActiveJobs_args();
}
public AsyncMethodCallback<List<TReplicationJob>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<List<TReplicationJob>>() {
public void onComplete(List<TReplicationJob> o) {
getActiveJobs_result result = new getActiveJobs_result();
result.success = o;
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
} catch (Exception e) {
LOGGER.error("Exception writing to internal frame buffer", e);
}
fb.close();
}
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
getActiveJobs_result result = new getActiveJobs_result();
{
msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
}
try {
fcall.sendResponse(fb,msg,msgType,seqid);
return;
} catch (Exception ex) {
LOGGER.error("Exception writing to internal frame buffer", ex);
}
fb.close();
}
};
}
protected boolean isOneway() {
return false;
}
public void start(I iface, getActiveJobs_args args, org.apache.thrift.async.AsyncMethodCallback<List<TReplicationJob>> resultHandler) throws TException {
iface.getActiveJobs(args.afterId, args.maxJobs,resultHandler);
}
}
public static class getRetiredJobs<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, getRetiredJobs_args, List<TReplicationJob>> {
public getRetiredJobs() {
super("getRetiredJobs");
}
public getRetiredJobs_args getEmptyArgsInstance() {
return new getRetiredJobs_args();
}
public AsyncMethodCallback<List<TReplicationJob>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<List<TReplicationJob>>() {
public void onComplete(List<TReplicationJob> o) {
getRetiredJobs_result result = new getRetiredJobs_result();
result.success = o;
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
} catch (Exception e) {
LOGGER.error("Exception writing to internal frame buffer", e);
}
fb.close();
}
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
getRetiredJobs_result result = new getRetiredJobs_result();
{
msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
}
try {
fcall.sendResponse(fb,msg,msgType,seqid);
return;
} catch (Exception ex) {
LOGGER.error("Exception writing to internal frame buffer", ex);
}
fb.close();
}
};
}
protected boolean isOneway() {
return false;
}
public void start(I iface, getRetiredJobs_args args, org.apache.thrift.async.AsyncMethodCallback<List<TReplicationJob>> resultHandler) throws TException {
iface.getRetiredJobs(args.afterId, args.maxJobs,resultHandler);
}
}
public static class getJobs<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, getJobs_args, Map<Long,TReplicationJob>> {
public getJobs() {
super("getJobs");
}
public getJobs_args getEmptyArgsInstance() {
return new getJobs_args();
}
public AsyncMethodCallback<Map<Long,TReplicationJob>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<Map<Long,TReplicationJob>>() {
public void onComplete(Map<Long,TReplicationJob> o) {
getJobs_result result = new getJobs_result();
result.success = o;
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
} catch (Exception e) {
LOGGER.error("Exception writing to internal frame buffer", e);
}
fb.close();
}
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
getJobs_result result = new getJobs_result();
{
msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
}
try {
fcall.sendResponse(fb,msg,msgType,seqid);
return;
} catch (Exception ex) {
LOGGER.error("Exception writing to internal frame buffer", ex);
}
fb.close();
}
};
}
protected boolean isOneway() {
return false;
}
public void start(I iface, getJobs_args args, org.apache.thrift.async.AsyncMethodCallback<Map<Long,TReplicationJob>> resultHandler) throws TException {
iface.getJobs(args.ids,resultHandler);
}
}
public static class pause<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, pause_args, Void> {
public pause() {
super("pause");
}
public pause_args getEmptyArgsInstance() {
return new pause_args();
}
public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<Void>() {
public void onComplete(Void o) {
pause_result result = new pause_result();
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
} catch (Exception e) {
LOGGER.error("Exception writing to internal frame buffer", e);
}
fb.close();
}
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
pause_result result = new pause_result();
{
msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
}
try {
fcall.sendResponse(fb,msg,msgType,seqid);
return;
} catch (Exception ex) {
LOGGER.error("Exception writing to internal frame buffer", ex);
}
fb.close();
}
};
}
protected boolean isOneway() {
return false;
}
public void start(I iface, pause_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
iface.pause(resultHandler);
}
}
public static class resume<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, resume_args, Void> {
public resume() {
super("resume");
}
public resume_args getEmptyArgsInstance() {
return new resume_args();
}
public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<Void>() {
public void onComplete(Void o) {
resume_result result = new resume_result();
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
} catch (Exception e) {
LOGGER.error("Exception writing to internal frame buffer", e);
}
fb.close();
}
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
resume_result result = new resume_result();
{
msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
}
try {
fcall.sendResponse(fb,msg,msgType,seqid);
return;
} catch (Exception ex) {
LOGGER.error("Exception writing to internal frame buffer", ex);
}
fb.close();
}
};
}
protected boolean isOneway() {
return false;
}
public void start(I iface, resume_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
iface.resume(resultHandler);
}
}
public static class getLag<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, getLag_args, Long> {
public getLag() {
super("getLag");
}
public getLag_args getEmptyArgsInstance() {
return new getLag_args();
}
public AsyncMethodCallback<Long> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<Long>() {
public void onComplete(Long o) {
getLag_result result = new getLag_result();
result.success = o;
result.setSuccessIsSet(true);
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
} catch (Exception e) {
LOGGER.error("Exception writing to internal frame buffer", e);
}
fb.close();
}
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
getLag_result result = new getLag_result();
{
msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
}
try {
fcall.sendResponse(fb,msg,msgType,seqid);
return;
} catch (Exception ex) {
LOGGER.error("Exception writing to internal frame buffer", ex);
}
fb.close();
}
};
}
protected boolean isOneway() {
return false;
}
public void start(I iface, getLag_args args, org.apache.thrift.async.AsyncMethodCallback<Long> resultHandler) throws TException {
iface.getLag(resultHandler);
}
}
}
public static class getActiveJobs_args implements org.apache.thrift.TBase<getActiveJobs_args, getActiveJobs_args._Fields>, java.io.Serializable, Cloneable, Comparable<getActiveJobs_args> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getActiveJobs_args");
private static final org.apache.thrift.protocol.TField AFTER_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("afterId", org.apache.thrift.protocol.TType.I64, (short)1);
private static final org.apache.thrift.protocol.TField MAX_JOBS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxJobs", org.apache.thrift.protocol.TType.I32, (short)2);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new getActiveJobs_argsStandardSchemeFactory());
schemes.put(TupleScheme.class, new getActiveJobs_argsTupleSchemeFactory());
}
public long afterId; // required
public int maxJobs; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
AFTER_ID((short)1, "afterId"),
MAX_JOBS((short)2, "maxJobs");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // AFTER_ID
return AFTER_ID;
case 2: // MAX_JOBS
return MAX_JOBS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
private static final int __AFTERID_ISSET_ID = 0;
private static final int __MAXJOBS_ISSET_ID = 1;
private byte __isset_bitfield = 0;
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.AFTER_ID, new org.apache.thrift.meta_data.FieldMetaData("afterId", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
tmpMap.put(_Fields.MAX_JOBS, new org.apache.thrift.meta_data.FieldMetaData("maxJobs", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getActiveJobs_args.class, metaDataMap);
}
public getActiveJobs_args() {
}
public getActiveJobs_args(
long afterId,
int maxJobs)
{
this();
this.afterId = afterId;
setAfterIdIsSet(true);
this.maxJobs = maxJobs;
setMaxJobsIsSet(true);
}
/**
* Performs a deep copy on <i>other</i>.
*/
public getActiveJobs_args(getActiveJobs_args other) {
__isset_bitfield = other.__isset_bitfield;
this.afterId = other.afterId;
this.maxJobs = other.maxJobs;
}
public getActiveJobs_args deepCopy() {
return new getActiveJobs_args(this);
}
@Override
public void clear() {
setAfterIdIsSet(false);
this.afterId = 0;
setMaxJobsIsSet(false);
this.maxJobs = 0;
}
public long getAfterId() {
return this.afterId;
}
public getActiveJobs_args setAfterId(long afterId) {
this.afterId = afterId;
setAfterIdIsSet(true);
return this;
}
public void unsetAfterId() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __AFTERID_ISSET_ID);
}
/** Returns true if field afterId is set (has been assigned a value) and false otherwise */
public boolean isSetAfterId() {
return EncodingUtils.testBit(__isset_bitfield, __AFTERID_ISSET_ID);
}
public void setAfterIdIsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __AFTERID_ISSET_ID, value);
}
public int getMaxJobs() {
return this.maxJobs;
}
public getActiveJobs_args setMaxJobs(int maxJobs) {
this.maxJobs = maxJobs;
setMaxJobsIsSet(true);
return this;
}
public void unsetMaxJobs() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAXJOBS_ISSET_ID);
}
/** Returns true if field maxJobs is set (has been assigned a value) and false otherwise */
public boolean isSetMaxJobs() {
return EncodingUtils.testBit(__isset_bitfield, __MAXJOBS_ISSET_ID);
}
public void setMaxJobsIsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAXJOBS_ISSET_ID, value);
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case AFTER_ID:
if (value == null) {
unsetAfterId();
} else {
setAfterId((Long)value);
}
break;
case MAX_JOBS:
if (value == null) {
unsetMaxJobs();
} else {
setMaxJobs((Integer)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case AFTER_ID:
return Long.valueOf(getAfterId());
case MAX_JOBS:
return Integer.valueOf(getMaxJobs());
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case AFTER_ID:
return isSetAfterId();
case MAX_JOBS:
return isSetMaxJobs();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof getActiveJobs_args)
return this.equals((getActiveJobs_args)that);
return false;
}
public boolean equals(getActiveJobs_args that) {
if (that == null)
return false;
boolean this_present_afterId = true;
boolean that_present_afterId = true;
if (this_present_afterId || that_present_afterId) {
if (!(this_present_afterId && that_present_afterId))
return false;
if (this.afterId != that.afterId)
return false;
}
boolean this_present_maxJobs = true;
boolean that_present_maxJobs = true;
if (this_present_maxJobs || that_present_maxJobs) {
if (!(this_present_maxJobs && that_present_maxJobs))
return false;
if (this.maxJobs != that.maxJobs)
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(getActiveJobs_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetAfterId()).compareTo(other.isSetAfterId());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetAfterId()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.afterId, other.afterId);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetMaxJobs()).compareTo(other.isSetMaxJobs());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetMaxJobs()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.maxJobs, other.maxJobs);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("getActiveJobs_args(");
boolean first = true;
sb.append("afterId:");
sb.append(this.afterId);
first = false;
if (!first) sb.append(", ");
sb.append("maxJobs:");
sb.append(this.maxJobs);
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
// it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
__isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class getActiveJobs_argsStandardSchemeFactory implements SchemeFactory {
public getActiveJobs_argsStandardScheme getScheme() {
return new getActiveJobs_argsStandardScheme();
}
}
private static class getActiveJobs_argsStandardScheme extends StandardScheme<getActiveJobs_args> {
public void read(org.apache.thrift.protocol.TProtocol iprot, getActiveJobs_args struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 1: // AFTER_ID
if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
struct.afterId = iprot.readI64();
struct.setAfterIdIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 2: // MAX_JOBS
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.maxJobs = iprot.readI32();
struct.setMaxJobsIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, getActiveJobs_args struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldBegin(AFTER_ID_FIELD_DESC);
oprot.writeI64(struct.afterId);
oprot.writeFieldEnd();
oprot.writeFieldBegin(MAX_JOBS_FIELD_DESC);
oprot.writeI32(struct.maxJobs);
oprot.writeFieldEnd();
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class getActiveJobs_argsTupleSchemeFactory implements SchemeFactory {
public getActiveJobs_argsTupleScheme getScheme() {
return new getActiveJobs_argsTupleScheme();
}
}
private static class getActiveJobs_argsTupleScheme extends TupleScheme<getActiveJobs_args> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, getActiveJobs_args struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetAfterId()) {
optionals.set(0);
}
if (struct.isSetMaxJobs()) {
optionals.set(1);
}
oprot.writeBitSet(optionals, 2);
if (struct.isSetAfterId()) {
oprot.writeI64(struct.afterId);
}
if (struct.isSetMaxJobs()) {
oprot.writeI32(struct.maxJobs);
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, getActiveJobs_args struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(2);
if (incoming.get(0)) {
struct.afterId = iprot.readI64();
struct.setAfterIdIsSet(true);
}
if (incoming.get(1)) {
struct.maxJobs = iprot.readI32();
struct.setMaxJobsIsSet(true);
}
}
}
}
public static class getActiveJobs_result implements org.apache.thrift.TBase<getActiveJobs_result, getActiveJobs_result._Fields>, java.io.Serializable, Cloneable, Comparable<getActiveJobs_result> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getActiveJobs_result");
private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new getActiveJobs_resultStandardSchemeFactory());
schemes.put(TupleScheme.class, new getActiveJobs_resultTupleSchemeFactory());
}
public List<TReplicationJob> success; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
SUCCESS((short)0, "success");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 0: // SUCCESS
return SUCCESS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TReplicationJob.class))));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getActiveJobs_result.class, metaDataMap);
}
public getActiveJobs_result() {
}
public getActiveJobs_result(
List<TReplicationJob> success)
{
this();
this.success = success;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public getActiveJobs_result(getActiveJobs_result other) {
if (other.isSetSuccess()) {
List<TReplicationJob> __this__success = new ArrayList<TReplicationJob>(other.success.size());
for (TReplicationJob other_element : other.success) {
__this__success.add(new TReplicationJob(other_element));
}
this.success = __this__success;
}
}
public getActiveJobs_result deepCopy() {
return new getActiveJobs_result(this);
}
@Override
public void clear() {
this.success = null;
}
public int getSuccessSize() {
return (this.success == null) ? 0 : this.success.size();
}
public java.util.Iterator<TReplicationJob> getSuccessIterator() {
return (this.success == null) ? null : this.success.iterator();
}
public void addToSuccess(TReplicationJob elem) {
if (this.success == null) {
this.success = new ArrayList<TReplicationJob>();
}
this.success.add(elem);
}
public List<TReplicationJob> getSuccess() {
return this.success;
}
public getActiveJobs_result setSuccess(List<TReplicationJob> success) {
this.success = success;
return this;
}
public void unsetSuccess() {
this.success = null;
}
/** Returns true if field success is set (has been assigned a value) and false otherwise */
public boolean isSetSuccess() {
return this.success != null;
}
public void setSuccessIsSet(boolean value) {
if (!value) {
this.success = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case SUCCESS:
if (value == null) {
unsetSuccess();
} else {
setSuccess((List<TReplicationJob>)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case SUCCESS:
return getSuccess();
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case SUCCESS:
return isSetSuccess();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof getActiveJobs_result)
return this.equals((getActiveJobs_result)that);
return false;
}
public boolean equals(getActiveJobs_result that) {
if (that == null)
return false;
boolean this_present_success = true && this.isSetSuccess();
boolean that_present_success = true && that.isSetSuccess();
if (this_present_success || that_present_success) {
if (!(this_present_success && that_present_success))
return false;
if (!this.success.equals(that.success))
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(getActiveJobs_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSuccess()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("getActiveJobs_result(");
boolean first = true;
sb.append("success:");
if (this.success == null) {
sb.append("null");
} else {
sb.append(this.success);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class getActiveJobs_resultStandardSchemeFactory implements SchemeFactory {
public getActiveJobs_resultStandardScheme getScheme() {
return new getActiveJobs_resultStandardScheme();
}
}
private static class getActiveJobs_resultStandardScheme extends StandardScheme<getActiveJobs_result> {
public void read(org.apache.thrift.protocol.TProtocol iprot, getActiveJobs_result struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
org.apache.thrift.protocol.TList _list26 = iprot.readListBegin();
struct.success = new ArrayList<TReplicationJob>(_list26.size);
for (int _i27 = 0; _i27 < _list26.size; ++_i27)
{
TReplicationJob _elem28;
_elem28 = new TReplicationJob();
_elem28.read(iprot);
struct.success.add(_elem28);
}
iprot.readListEnd();
}
struct.setSuccessIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, getActiveJobs_result struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.success != null) {
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
for (TReplicationJob _iter29 : struct.success)
{
_iter29.write(oprot);
}
oprot.writeListEnd();
}
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class getActiveJobs_resultTupleSchemeFactory implements SchemeFactory {
public getActiveJobs_resultTupleScheme getScheme() {
return new getActiveJobs_resultTupleScheme();
}
}
private static class getActiveJobs_resultTupleScheme extends TupleScheme<getActiveJobs_result> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, getActiveJobs_result struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetSuccess()) {
optionals.set(0);
}
oprot.writeBitSet(optionals, 1);
if (struct.isSetSuccess()) {
{
oprot.writeI32(struct.success.size());
for (TReplicationJob _iter30 : struct.success)
{
_iter30.write(oprot);
}
}
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, getActiveJobs_result struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(1);
if (incoming.get(0)) {
{
org.apache.thrift.protocol.TList _list31 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
struct.success = new ArrayList<TReplicationJob>(_list31.size);
for (int _i32 = 0; _i32 < _list31.size; ++_i32)
{
TReplicationJob _elem33;
_elem33 = new TReplicationJob();
_elem33.read(iprot);
struct.success.add(_elem33);
}
}
struct.setSuccessIsSet(true);
}
}
}
}
public static class getRetiredJobs_args implements org.apache.thrift.TBase<getRetiredJobs_args, getRetiredJobs_args._Fields>, java.io.Serializable, Cloneable, Comparable<getRetiredJobs_args> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRetiredJobs_args");
private static final org.apache.thrift.protocol.TField AFTER_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("afterId", org.apache.thrift.protocol.TType.I64, (short)1);
private static final org.apache.thrift.protocol.TField MAX_JOBS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxJobs", org.apache.thrift.protocol.TType.I32, (short)2);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new getRetiredJobs_argsStandardSchemeFactory());
schemes.put(TupleScheme.class, new getRetiredJobs_argsTupleSchemeFactory());
}
public long afterId; // required
public int maxJobs; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
AFTER_ID((short)1, "afterId"),
MAX_JOBS((short)2, "maxJobs");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // AFTER_ID
return AFTER_ID;
case 2: // MAX_JOBS
return MAX_JOBS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
private static final int __AFTERID_ISSET_ID = 0;
private static final int __MAXJOBS_ISSET_ID = 1;
private byte __isset_bitfield = 0;
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.AFTER_ID, new org.apache.thrift.meta_data.FieldMetaData("afterId", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
tmpMap.put(_Fields.MAX_JOBS, new org.apache.thrift.meta_data.FieldMetaData("maxJobs", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRetiredJobs_args.class, metaDataMap);
}
public getRetiredJobs_args() {
}
public getRetiredJobs_args(
long afterId,
int maxJobs)
{
this();
this.afterId = afterId;
setAfterIdIsSet(true);
this.maxJobs = maxJobs;
setMaxJobsIsSet(true);
}
/**
* Performs a deep copy on <i>other</i>.
*/
public getRetiredJobs_args(getRetiredJobs_args other) {
__isset_bitfield = other.__isset_bitfield;
this.afterId = other.afterId;
this.maxJobs = other.maxJobs;
}
public getRetiredJobs_args deepCopy() {
return new getRetiredJobs_args(this);
}
@Override
public void clear() {
setAfterIdIsSet(false);
this.afterId = 0;
setMaxJobsIsSet(false);
this.maxJobs = 0;
}
public long getAfterId() {
return this.afterId;
}
public getRetiredJobs_args setAfterId(long afterId) {
this.afterId = afterId;
setAfterIdIsSet(true);
return this;
}
public void unsetAfterId() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __AFTERID_ISSET_ID);
}
/** Returns true if field afterId is set (has been assigned a value) and false otherwise */
public boolean isSetAfterId() {
return EncodingUtils.testBit(__isset_bitfield, __AFTERID_ISSET_ID);
}
public void setAfterIdIsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __AFTERID_ISSET_ID, value);
}
public int getMaxJobs() {
return this.maxJobs;
}
public getRetiredJobs_args setMaxJobs(int maxJobs) {
this.maxJobs = maxJobs;
setMaxJobsIsSet(true);
return this;
}
public void unsetMaxJobs() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAXJOBS_ISSET_ID);
}
/** Returns true if field maxJobs is set (has been assigned a value) and false otherwise */
public boolean isSetMaxJobs() {
return EncodingUtils.testBit(__isset_bitfield, __MAXJOBS_ISSET_ID);
}
public void setMaxJobsIsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAXJOBS_ISSET_ID, value);
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case AFTER_ID:
if (value == null) {
unsetAfterId();
} else {
setAfterId((Long)value);
}
break;
case MAX_JOBS:
if (value == null) {
unsetMaxJobs();
} else {
setMaxJobs((Integer)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case AFTER_ID:
return Long.valueOf(getAfterId());
case MAX_JOBS:
return Integer.valueOf(getMaxJobs());
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case AFTER_ID:
return isSetAfterId();
case MAX_JOBS:
return isSetMaxJobs();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof getRetiredJobs_args)
return this.equals((getRetiredJobs_args)that);
return false;
}
public boolean equals(getRetiredJobs_args that) {
if (that == null)
return false;
boolean this_present_afterId = true;
boolean that_present_afterId = true;
if (this_present_afterId || that_present_afterId) {
if (!(this_present_afterId && that_present_afterId))
return false;
if (this.afterId != that.afterId)
return false;
}
boolean this_present_maxJobs = true;
boolean that_present_maxJobs = true;
if (this_present_maxJobs || that_present_maxJobs) {
if (!(this_present_maxJobs && that_present_maxJobs))
return false;
if (this.maxJobs != that.maxJobs)
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(getRetiredJobs_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetAfterId()).compareTo(other.isSetAfterId());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetAfterId()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.afterId, other.afterId);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetMaxJobs()).compareTo(other.isSetMaxJobs());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetMaxJobs()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.maxJobs, other.maxJobs);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("getRetiredJobs_args(");
boolean first = true;
sb.append("afterId:");
sb.append(this.afterId);
first = false;
if (!first) sb.append(", ");
sb.append("maxJobs:");
sb.append(this.maxJobs);
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
// it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
__isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class getRetiredJobs_argsStandardSchemeFactory implements SchemeFactory {
public getRetiredJobs_argsStandardScheme getScheme() {
return new getRetiredJobs_argsStandardScheme();
}
}
private static class getRetiredJobs_argsStandardScheme extends StandardScheme<getRetiredJobs_args> {
public void read(org.apache.thrift.protocol.TProtocol iprot, getRetiredJobs_args struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 1: // AFTER_ID
if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
struct.afterId = iprot.readI64();
struct.setAfterIdIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 2: // MAX_JOBS
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.maxJobs = iprot.readI32();
struct.setMaxJobsIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, getRetiredJobs_args struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldBegin(AFTER_ID_FIELD_DESC);
oprot.writeI64(struct.afterId);
oprot.writeFieldEnd();
oprot.writeFieldBegin(MAX_JOBS_FIELD_DESC);
oprot.writeI32(struct.maxJobs);
oprot.writeFieldEnd();
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class getRetiredJobs_argsTupleSchemeFactory implements SchemeFactory {
public getRetiredJobs_argsTupleScheme getScheme() {
return new getRetiredJobs_argsTupleScheme();
}
}
private static class getRetiredJobs_argsTupleScheme extends TupleScheme<getRetiredJobs_args> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, getRetiredJobs_args struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetAfterId()) {
optionals.set(0);
}
if (struct.isSetMaxJobs()) {
optionals.set(1);
}
oprot.writeBitSet(optionals, 2);
if (struct.isSetAfterId()) {
oprot.writeI64(struct.afterId);
}
if (struct.isSetMaxJobs()) {
oprot.writeI32(struct.maxJobs);
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, getRetiredJobs_args struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(2);
if (incoming.get(0)) {
struct.afterId = iprot.readI64();
struct.setAfterIdIsSet(true);
}
if (incoming.get(1)) {
struct.maxJobs = iprot.readI32();
struct.setMaxJobsIsSet(true);
}
}
}
}
public static class getRetiredJobs_result implements org.apache.thrift.TBase<getRetiredJobs_result, getRetiredJobs_result._Fields>, java.io.Serializable, Cloneable, Comparable<getRetiredJobs_result> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRetiredJobs_result");
private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new getRetiredJobs_resultStandardSchemeFactory());
schemes.put(TupleScheme.class, new getRetiredJobs_resultTupleSchemeFactory());
}
public List<TReplicationJob> success; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
SUCCESS((short)0, "success");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 0: // SUCCESS
return SUCCESS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TReplicationJob.class))));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRetiredJobs_result.class, metaDataMap);
}
public getRetiredJobs_result() {
}
public getRetiredJobs_result(
List<TReplicationJob> success)
{
this();
this.success = success;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public getRetiredJobs_result(getRetiredJobs_result other) {
if (other.isSetSuccess()) {
List<TReplicationJob> __this__success = new ArrayList<TReplicationJob>(other.success.size());
for (TReplicationJob other_element : other.success) {
__this__success.add(new TReplicationJob(other_element));
}
this.success = __this__success;
}
}
public getRetiredJobs_result deepCopy() {
return new getRetiredJobs_result(this);
}
@Override
public void clear() {
this.success = null;
}
public int getSuccessSize() {
return (this.success == null) ? 0 : this.success.size();
}
public java.util.Iterator<TReplicationJob> getSuccessIterator() {
return (this.success == null) ? null : this.success.iterator();
}
public void addToSuccess(TReplicationJob elem) {
if (this.success == null) {
this.success = new ArrayList<TReplicationJob>();
}
this.success.add(elem);
}
public List<TReplicationJob> getSuccess() {
return this.success;
}
public getRetiredJobs_result setSuccess(List<TReplicationJob> success) {
this.success = success;
return this;
}
public void unsetSuccess() {
this.success = null;
}
/** Returns true if field success is set (has been assigned a value) and false otherwise */
public boolean isSetSuccess() {
return this.success != null;
}
public void setSuccessIsSet(boolean value) {
if (!value) {
this.success = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case SUCCESS:
if (value == null) {
unsetSuccess();
} else {
setSuccess((List<TReplicationJob>)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case SUCCESS:
return getSuccess();
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case SUCCESS:
return isSetSuccess();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof getRetiredJobs_result)
return this.equals((getRetiredJobs_result)that);
return false;
}
public boolean equals(getRetiredJobs_result that) {
if (that == null)
return false;
boolean this_present_success = true && this.isSetSuccess();
boolean that_present_success = true && that.isSetSuccess();
if (this_present_success || that_present_success) {
if (!(this_present_success && that_present_success))
return false;
if (!this.success.equals(that.success))
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(getRetiredJobs_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSuccess()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("getRetiredJobs_result(");
boolean first = true;
sb.append("success:");
if (this.success == null) {
sb.append("null");
} else {
sb.append(this.success);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class getRetiredJobs_resultStandardSchemeFactory implements SchemeFactory {
public getRetiredJobs_resultStandardScheme getScheme() {
return new getRetiredJobs_resultStandardScheme();
}
}
private static class getRetiredJobs_resultStandardScheme extends StandardScheme<getRetiredJobs_result> {
public void read(org.apache.thrift.protocol.TProtocol iprot, getRetiredJobs_result struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
org.apache.thrift.protocol.TList _list34 = iprot.readListBegin();
struct.success = new ArrayList<TReplicationJob>(_list34.size);
for (int _i35 = 0; _i35 < _list34.size; ++_i35)
{
TReplicationJob _elem36;
_elem36 = new TReplicationJob();
_elem36.read(iprot);
struct.success.add(_elem36);
}
iprot.readListEnd();
}
struct.setSuccessIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, getRetiredJobs_result struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.success != null) {
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
for (TReplicationJob _iter37 : struct.success)
{
_iter37.write(oprot);
}
oprot.writeListEnd();
}
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class getRetiredJobs_resultTupleSchemeFactory implements SchemeFactory {
public getRetiredJobs_resultTupleScheme getScheme() {
return new getRetiredJobs_resultTupleScheme();
}
}
private static class getRetiredJobs_resultTupleScheme extends TupleScheme<getRetiredJobs_result> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, getRetiredJobs_result struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetSuccess()) {
optionals.set(0);
}
oprot.writeBitSet(optionals, 1);
if (struct.isSetSuccess()) {
{
oprot.writeI32(struct.success.size());
for (TReplicationJob _iter38 : struct.success)
{
_iter38.write(oprot);
}
}
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, getRetiredJobs_result struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(1);
if (incoming.get(0)) {
{
org.apache.thrift.protocol.TList _list39 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
struct.success = new ArrayList<TReplicationJob>(_list39.size);
for (int _i40 = 0; _i40 < _list39.size; ++_i40)
{
TReplicationJob _elem41;
_elem41 = new TReplicationJob();
_elem41.read(iprot);
struct.success.add(_elem41);
}
}
struct.setSuccessIsSet(true);
}
}
}
}
public static class getJobs_args implements org.apache.thrift.TBase<getJobs_args, getJobs_args._Fields>, java.io.Serializable, Cloneable, Comparable<getJobs_args> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getJobs_args");
private static final org.apache.thrift.protocol.TField IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("ids", org.apache.thrift.protocol.TType.LIST, (short)1);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new getJobs_argsStandardSchemeFactory());
schemes.put(TupleScheme.class, new getJobs_argsTupleSchemeFactory());
}
public List<Long> ids; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
IDS((short)1, "ids");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // IDS
return IDS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.IDS, new org.apache.thrift.meta_data.FieldMetaData("ids", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getJobs_args.class, metaDataMap);
}
public getJobs_args() {
}
public getJobs_args(
List<Long> ids)
{
this();
this.ids = ids;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public getJobs_args(getJobs_args other) {
if (other.isSetIds()) {
List<Long> __this__ids = new ArrayList<Long>(other.ids);
this.ids = __this__ids;
}
}
public getJobs_args deepCopy() {
return new getJobs_args(this);
}
@Override
public void clear() {
this.ids = null;
}
public int getIdsSize() {
return (this.ids == null) ? 0 : this.ids.size();
}
public java.util.Iterator<Long> getIdsIterator() {
return (this.ids == null) ? null : this.ids.iterator();
}
public void addToIds(long elem) {
if (this.ids == null) {
this.ids = new ArrayList<Long>();
}
this.ids.add(elem);
}
public List<Long> getIds() {
return this.ids;
}
public getJobs_args setIds(List<Long> ids) {
this.ids = ids;
return this;
}
public void unsetIds() {
this.ids = null;
}
/** Returns true if field ids is set (has been assigned a value) and false otherwise */
public boolean isSetIds() {
return this.ids != null;
}
public void setIdsIsSet(boolean value) {
if (!value) {
this.ids = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case IDS:
if (value == null) {
unsetIds();
} else {
setIds((List<Long>)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case IDS:
return getIds();
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case IDS:
return isSetIds();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof getJobs_args)
return this.equals((getJobs_args)that);
return false;
}
public boolean equals(getJobs_args that) {
if (that == null)
return false;
boolean this_present_ids = true && this.isSetIds();
boolean that_present_ids = true && that.isSetIds();
if (this_present_ids || that_present_ids) {
if (!(this_present_ids && that_present_ids))
return false;
if (!this.ids.equals(that.ids))
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(getJobs_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetIds()).compareTo(other.isSetIds());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetIds()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ids, other.ids);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("getJobs_args(");
boolean first = true;
sb.append("ids:");
if (this.ids == null) {
sb.append("null");
} else {
sb.append(this.ids);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class getJobs_argsStandardSchemeFactory implements SchemeFactory {
public getJobs_argsStandardScheme getScheme() {
return new getJobs_argsStandardScheme();
}
}
private static class getJobs_argsStandardScheme extends StandardScheme<getJobs_args> {
public void read(org.apache.thrift.protocol.TProtocol iprot, getJobs_args struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 1: // IDS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
org.apache.thrift.protocol.TList _list42 = iprot.readListBegin();
struct.ids = new ArrayList<Long>(_list42.size);
for (int _i43 = 0; _i43 < _list42.size; ++_i43)
{
long _elem44;
_elem44 = iprot.readI64();
struct.ids.add(_elem44);
}
iprot.readListEnd();
}
struct.setIdsIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, getJobs_args struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.ids != null) {
oprot.writeFieldBegin(IDS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.ids.size()));
for (long _iter45 : struct.ids)
{
oprot.writeI64(_iter45);
}
oprot.writeListEnd();
}
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class getJobs_argsTupleSchemeFactory implements SchemeFactory {
public getJobs_argsTupleScheme getScheme() {
return new getJobs_argsTupleScheme();
}
}
private static class getJobs_argsTupleScheme extends TupleScheme<getJobs_args> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, getJobs_args struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetIds()) {
optionals.set(0);
}
oprot.writeBitSet(optionals, 1);
if (struct.isSetIds()) {
{
oprot.writeI32(struct.ids.size());
for (long _iter46 : struct.ids)
{
oprot.writeI64(_iter46);
}
}
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, getJobs_args struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(1);
if (incoming.get(0)) {
{
org.apache.thrift.protocol.TList _list47 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
struct.ids = new ArrayList<Long>(_list47.size);
for (int _i48 = 0; _i48 < _list47.size; ++_i48)
{
long _elem49;
_elem49 = iprot.readI64();
struct.ids.add(_elem49);
}
}
struct.setIdsIsSet(true);
}
}
}
}
public static class getJobs_result implements org.apache.thrift.TBase<getJobs_result, getJobs_result._Fields>, java.io.Serializable, Cloneable, Comparable<getJobs_result> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getJobs_result");
private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.MAP, (short)0);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new getJobs_resultStandardSchemeFactory());
schemes.put(TupleScheme.class, new getJobs_resultTupleSchemeFactory());
}
public Map<Long,TReplicationJob> success; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
SUCCESS((short)0, "success");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 0: // SUCCESS
return SUCCESS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64),
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TReplicationJob.class))));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getJobs_result.class, metaDataMap);
}
public getJobs_result() {
}
public getJobs_result(
Map<Long,TReplicationJob> success)
{
this();
this.success = success;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public getJobs_result(getJobs_result other) {
if (other.isSetSuccess()) {
Map<Long,TReplicationJob> __this__success = new HashMap<Long,TReplicationJob>(other.success.size());
for (Map.Entry<Long, TReplicationJob> other_element : other.success.entrySet()) {
Long other_element_key = other_element.getKey();
TReplicationJob other_element_value = other_element.getValue();
Long __this__success_copy_key = other_element_key;
TReplicationJob __this__success_copy_value = new TReplicationJob(other_element_value);
__this__success.put(__this__success_copy_key, __this__success_copy_value);
}
this.success = __this__success;
}
}
public getJobs_result deepCopy() {
return new getJobs_result(this);
}
@Override
public void clear() {
this.success = null;
}
public int getSuccessSize() {
return (this.success == null) ? 0 : this.success.size();
}
public void putToSuccess(long key, TReplicationJob val) {
if (this.success == null) {
this.success = new HashMap<Long,TReplicationJob>();
}
this.success.put(key, val);
}
public Map<Long,TReplicationJob> getSuccess() {
return this.success;
}
public getJobs_result setSuccess(Map<Long,TReplicationJob> success) {
this.success = success;
return this;
}
public void unsetSuccess() {
this.success = null;
}
/** Returns true if field success is set (has been assigned a value) and false otherwise */
public boolean isSetSuccess() {
return this.success != null;
}
public void setSuccessIsSet(boolean value) {
if (!value) {
this.success = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case SUCCESS:
if (value == null) {
unsetSuccess();
} else {
setSuccess((Map<Long,TReplicationJob>)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case SUCCESS:
return getSuccess();
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case SUCCESS:
return isSetSuccess();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof getJobs_result)
return this.equals((getJobs_result)that);
return false;
}
public boolean equals(getJobs_result that) {
if (that == null)
return false;
boolean this_present_success = true && this.isSetSuccess();
boolean that_present_success = true && that.isSetSuccess();
if (this_present_success || that_present_success) {
if (!(this_present_success && that_present_success))
return false;
if (!this.success.equals(that.success))
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(getJobs_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSuccess()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("getJobs_result(");
boolean first = true;
sb.append("success:");
if (this.success == null) {
sb.append("null");
} else {
sb.append(this.success);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class getJobs_resultStandardSchemeFactory implements SchemeFactory {
public getJobs_resultStandardScheme getScheme() {
return new getJobs_resultStandardScheme();
}
}
private static class getJobs_resultStandardScheme extends StandardScheme<getJobs_result> {
public void read(org.apache.thrift.protocol.TProtocol iprot, getJobs_result struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
{
org.apache.thrift.protocol.TMap _map50 = iprot.readMapBegin();
struct.success = new HashMap<Long,TReplicationJob>(2*_map50.size);
for (int _i51 = 0; _i51 < _map50.size; ++_i51)
{
long _key52;
TReplicationJob _val53;
_key52 = iprot.readI64();
_val53 = new TReplicationJob();
_val53.read(iprot);
struct.success.put(_key52, _val53);
}
iprot.readMapEnd();
}
struct.setSuccessIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, getJobs_result struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.success != null) {
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
for (Map.Entry<Long, TReplicationJob> _iter54 : struct.success.entrySet())
{
oprot.writeI64(_iter54.getKey());
_iter54.getValue().write(oprot);
}
oprot.writeMapEnd();
}
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class getJobs_resultTupleSchemeFactory implements SchemeFactory {
public getJobs_resultTupleScheme getScheme() {
return new getJobs_resultTupleScheme();
}
}
private static class getJobs_resultTupleScheme extends TupleScheme<getJobs_result> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, getJobs_result struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetSuccess()) {
optionals.set(0);
}
oprot.writeBitSet(optionals, 1);
if (struct.isSetSuccess()) {
{
oprot.writeI32(struct.success.size());
for (Map.Entry<Long, TReplicationJob> _iter55 : struct.success.entrySet())
{
oprot.writeI64(_iter55.getKey());
_iter55.getValue().write(oprot);
}
}
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, getJobs_result struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(1);
if (incoming.get(0)) {
{
org.apache.thrift.protocol.TMap _map56 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
struct.success = new HashMap<Long,TReplicationJob>(2*_map56.size);
for (int _i57 = 0; _i57 < _map56.size; ++_i57)
{
long _key58;
TReplicationJob _val59;
_key58 = iprot.readI64();
_val59 = new TReplicationJob();
_val59.read(iprot);
struct.success.put(_key58, _val59);
}
}
struct.setSuccessIsSet(true);
}
}
}
}
public static class pause_args implements org.apache.thrift.TBase<pause_args, pause_args._Fields>, java.io.Serializable, Cloneable, Comparable<pause_args> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("pause_args");
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new pause_argsStandardSchemeFactory());
schemes.put(TupleScheme.class, new pause_argsTupleSchemeFactory());
}
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
;
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(pause_args.class, metaDataMap);
}
public pause_args() {
}
/**
* Performs a deep copy on <i>other</i>.
*/
public pause_args(pause_args other) {
}
public pause_args deepCopy() {
return new pause_args(this);
}
@Override
public void clear() {
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof pause_args)
return this.equals((pause_args)that);
return false;
}
public boolean equals(pause_args that) {
if (that == null)
return false;
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(pause_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("pause_args(");
boolean first = true;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class pause_argsStandardSchemeFactory implements SchemeFactory {
public pause_argsStandardScheme getScheme() {
return new pause_argsStandardScheme();
}
}
private static class pause_argsStandardScheme extends StandardScheme<pause_args> {
public void read(org.apache.thrift.protocol.TProtocol iprot, pause_args struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, pause_args struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class pause_argsTupleSchemeFactory implements SchemeFactory {
public pause_argsTupleScheme getScheme() {
return new pause_argsTupleScheme();
}
}
private static class pause_argsTupleScheme extends TupleScheme<pause_args> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, pause_args struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, pause_args struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
}
}
}
public static class pause_result implements org.apache.thrift.TBase<pause_result, pause_result._Fields>, java.io.Serializable, Cloneable, Comparable<pause_result> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("pause_result");
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new pause_resultStandardSchemeFactory());
schemes.put(TupleScheme.class, new pause_resultTupleSchemeFactory());
}
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
;
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(pause_result.class, metaDataMap);
}
public pause_result() {
}
/**
* Performs a deep copy on <i>other</i>.
*/
public pause_result(pause_result other) {
}
public pause_result deepCopy() {
return new pause_result(this);
}
@Override
public void clear() {
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof pause_result)
return this.equals((pause_result)that);
return false;
}
public boolean equals(pause_result that) {
if (that == null)
return false;
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(pause_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("pause_result(");
boolean first = true;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class pause_resultStandardSchemeFactory implements SchemeFactory {
public pause_resultStandardScheme getScheme() {
return new pause_resultStandardScheme();
}
}
private static class pause_resultStandardScheme extends StandardScheme<pause_result> {
public void read(org.apache.thrift.protocol.TProtocol iprot, pause_result struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, pause_result struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class pause_resultTupleSchemeFactory implements SchemeFactory {
public pause_resultTupleScheme getScheme() {
return new pause_resultTupleScheme();
}
}
private static class pause_resultTupleScheme extends TupleScheme<pause_result> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, pause_result struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, pause_result struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
}
}
}
public static class resume_args implements org.apache.thrift.TBase<resume_args, resume_args._Fields>, java.io.Serializable, Cloneable, Comparable<resume_args> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("resume_args");
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new resume_argsStandardSchemeFactory());
schemes.put(TupleScheme.class, new resume_argsTupleSchemeFactory());
}
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
;
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(resume_args.class, metaDataMap);
}
public resume_args() {
}
/**
* Performs a deep copy on <i>other</i>.
*/
public resume_args(resume_args other) {
}
public resume_args deepCopy() {
return new resume_args(this);
}
@Override
public void clear() {
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof resume_args)
return this.equals((resume_args)that);
return false;
}
public boolean equals(resume_args that) {
if (that == null)
return false;
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(resume_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("resume_args(");
boolean first = true;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class resume_argsStandardSchemeFactory implements SchemeFactory {
public resume_argsStandardScheme getScheme() {
return new resume_argsStandardScheme();
}
}
private static class resume_argsStandardScheme extends StandardScheme<resume_args> {
public void read(org.apache.thrift.protocol.TProtocol iprot, resume_args struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, resume_args struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class resume_argsTupleSchemeFactory implements SchemeFactory {
public resume_argsTupleScheme getScheme() {
return new resume_argsTupleScheme();
}
}
private static class resume_argsTupleScheme extends TupleScheme<resume_args> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, resume_args struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, resume_args struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
}
}
}
public static class resume_result implements org.apache.thrift.TBase<resume_result, resume_result._Fields>, java.io.Serializable, Cloneable, Comparable<resume_result> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("resume_result");
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new resume_resultStandardSchemeFactory());
schemes.put(TupleScheme.class, new resume_resultTupleSchemeFactory());
}
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
;
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(resume_result.class, metaDataMap);
}
public resume_result() {
}
/**
* Performs a deep copy on <i>other</i>.
*/
public resume_result(resume_result other) {
}
public resume_result deepCopy() {
return new resume_result(this);
}
@Override
public void clear() {
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof resume_result)
return this.equals((resume_result)that);
return false;
}
public boolean equals(resume_result that) {
if (that == null)
return false;
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(resume_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("resume_result(");
boolean first = true;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class resume_resultStandardSchemeFactory implements SchemeFactory {
public resume_resultStandardScheme getScheme() {
return new resume_resultStandardScheme();
}
}
private static class resume_resultStandardScheme extends StandardScheme<resume_result> {
public void read(org.apache.thrift.protocol.TProtocol iprot, resume_result struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, resume_result struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class resume_resultTupleSchemeFactory implements SchemeFactory {
public resume_resultTupleScheme getScheme() {
return new resume_resultTupleScheme();
}
}
private static class resume_resultTupleScheme extends TupleScheme<resume_result> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, resume_result struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, resume_result struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
}
}
}
public static class getLag_args implements org.apache.thrift.TBase<getLag_args, getLag_args._Fields>, java.io.Serializable, Cloneable, Comparable<getLag_args> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getLag_args");
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new getLag_argsStandardSchemeFactory());
schemes.put(TupleScheme.class, new getLag_argsTupleSchemeFactory());
}
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
;
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getLag_args.class, metaDataMap);
}
public getLag_args() {
}
/**
* Performs a deep copy on <i>other</i>.
*/
public getLag_args(getLag_args other) {
}
public getLag_args deepCopy() {
return new getLag_args(this);
}
@Override
public void clear() {
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof getLag_args)
return this.equals((getLag_args)that);
return false;
}
public boolean equals(getLag_args that) {
if (that == null)
return false;
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(getLag_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("getLag_args(");
boolean first = true;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class getLag_argsStandardSchemeFactory implements SchemeFactory {
public getLag_argsStandardScheme getScheme() {
return new getLag_argsStandardScheme();
}
}
private static class getLag_argsStandardScheme extends StandardScheme<getLag_args> {
public void read(org.apache.thrift.protocol.TProtocol iprot, getLag_args struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, getLag_args struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class getLag_argsTupleSchemeFactory implements SchemeFactory {
public getLag_argsTupleScheme getScheme() {
return new getLag_argsTupleScheme();
}
}
private static class getLag_argsTupleScheme extends TupleScheme<getLag_args> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, getLag_args struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, getLag_args struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
}
}
}
public static class getLag_result implements org.apache.thrift.TBase<getLag_result, getLag_result._Fields>, java.io.Serializable, Cloneable, Comparable<getLag_result> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getLag_result");
private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I64, (short)0);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new getLag_resultStandardSchemeFactory());
schemes.put(TupleScheme.class, new getLag_resultTupleSchemeFactory());
}
public long success; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
SUCCESS((short)0, "success");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 0: // SUCCESS
return SUCCESS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
private static final int __SUCCESS_ISSET_ID = 0;
private byte __isset_bitfield = 0;
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getLag_result.class, metaDataMap);
}
public getLag_result() {
}
public getLag_result(
long success)
{
this();
this.success = success;
setSuccessIsSet(true);
}
/**
* Performs a deep copy on <i>other</i>.
*/
public getLag_result(getLag_result other) {
__isset_bitfield = other.__isset_bitfield;
this.success = other.success;
}
public getLag_result deepCopy() {
return new getLag_result(this);
}
@Override
public void clear() {
setSuccessIsSet(false);
this.success = 0;
}
public long getSuccess() {
return this.success;
}
public getLag_result setSuccess(long success) {
this.success = success;
setSuccessIsSet(true);
return this;
}
public void unsetSuccess() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
}
/** Returns true if field success is set (has been assigned a value) and false otherwise */
public boolean isSetSuccess() {
return EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID);
}
public void setSuccessIsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case SUCCESS:
if (value == null) {
unsetSuccess();
} else {
setSuccess((Long)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case SUCCESS:
return Long.valueOf(getSuccess());
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case SUCCESS:
return isSetSuccess();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof getLag_result)
return this.equals((getLag_result)that);
return false;
}
public boolean equals(getLag_result that) {
if (that == null)
return false;
boolean this_present_success = true;
boolean that_present_success = true;
if (this_present_success || that_present_success) {
if (!(this_present_success && that_present_success))
return false;
if (this.success != that.success)
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(getLag_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSuccess()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("getLag_result(");
boolean first = true;
sb.append("success:");
sb.append(this.success);
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
// it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
__isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class getLag_resultStandardSchemeFactory implements SchemeFactory {
public getLag_resultStandardScheme getScheme() {
return new getLag_resultStandardScheme();
}
}
private static class getLag_resultStandardScheme extends StandardScheme<getLag_result> {
public void read(org.apache.thrift.protocol.TProtocol iprot, getLag_result struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
struct.success = iprot.readI64();
struct.setSuccessIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, getLag_result struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.isSetSuccess()) {
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
oprot.writeI64(struct.success);
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class getLag_resultTupleSchemeFactory implements SchemeFactory {
public getLag_resultTupleScheme getScheme() {
return new getLag_resultTupleScheme();
}
}
private static class getLag_resultTupleScheme extends TupleScheme<getLag_result> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, getLag_result struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetSuccess()) {
optionals.set(0);
}
oprot.writeBitSet(optionals, 1);
if (struct.isSetSuccess()) {
oprot.writeI64(struct.success);
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, getLag_result struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(1);
if (incoming.get(0)) {
struct.success = iprot.readI64();
struct.setSuccessIsSet(true);
}
}
}
}
}
| 9,454 |
0 | Create_ds/reair/main/thrift/gen-java/com/airbnb/di/hive/replication | Create_ds/reair/main/thrift/gen-java/com/airbnb/di/hive/replication/thrift/TReplicationJob.java | /**
* Autogenerated by Thrift Compiler (0.9.1)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
package com.airbnb.di.hive.replication.thrift;
import org.apache.thrift.EncodingUtils;
import org.apache.thrift.TException;
import org.apache.thrift.protocol.TTupleProtocol;
import org.apache.thrift.scheme.IScheme;
import org.apache.thrift.scheme.SchemeFactory;
import org.apache.thrift.scheme.StandardScheme;
import org.apache.thrift.scheme.TupleScheme;
import org.apache.thrift.server.AbstractNonblockingServer.*;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.Collections;
import java.util.EnumMap;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class TReplicationJob implements org.apache.thrift.TBase<TReplicationJob, TReplicationJob._Fields>, java.io.Serializable, Cloneable, Comparable<TReplicationJob> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TReplicationJob");
private static final org.apache.thrift.protocol.TField ID_FIELD_DESC = new org.apache.thrift.protocol.TField("id", org.apache.thrift.protocol.TType.I64, (short)1);
private static final org.apache.thrift.protocol.TField CREATE_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("createTime", org.apache.thrift.protocol.TType.I64, (short)2);
private static final org.apache.thrift.protocol.TField UPDATE_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("updateTime", org.apache.thrift.protocol.TType.I64, (short)3);
private static final org.apache.thrift.protocol.TField OPERATION_FIELD_DESC = new org.apache.thrift.protocol.TField("operation", org.apache.thrift.protocol.TType.I32, (short)4);
private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.I32, (short)5);
private static final org.apache.thrift.protocol.TField SRC_PATH_FIELD_DESC = new org.apache.thrift.protocol.TField("srcPath", org.apache.thrift.protocol.TType.STRING, (short)6);
private static final org.apache.thrift.protocol.TField SRC_CLUSTER_FIELD_DESC = new org.apache.thrift.protocol.TField("srcCluster", org.apache.thrift.protocol.TType.STRING, (short)7);
private static final org.apache.thrift.protocol.TField SRC_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("srcDb", org.apache.thrift.protocol.TType.STRING, (short)8);
private static final org.apache.thrift.protocol.TField SRC_TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("srcTable", org.apache.thrift.protocol.TType.STRING, (short)9);
private static final org.apache.thrift.protocol.TField SRC_PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("srcPartitions", org.apache.thrift.protocol.TType.LIST, (short)10);
private static final org.apache.thrift.protocol.TField SRC_MODIFIED_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("srcModifiedTime", org.apache.thrift.protocol.TType.STRING, (short)11);
private static final org.apache.thrift.protocol.TField RENAME_TO_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("renameToDb", org.apache.thrift.protocol.TType.STRING, (short)12);
private static final org.apache.thrift.protocol.TField RENAME_TO_TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("renameToTable", org.apache.thrift.protocol.TType.STRING, (short)13);
private static final org.apache.thrift.protocol.TField RENAME_TO_PATH_FIELD_DESC = new org.apache.thrift.protocol.TField("renameToPath", org.apache.thrift.protocol.TType.STRING, (short)14);
private static final org.apache.thrift.protocol.TField EXTRAS_FIELD_DESC = new org.apache.thrift.protocol.TField("extras", org.apache.thrift.protocol.TType.MAP, (short)15);
private static final org.apache.thrift.protocol.TField WAITING_ON_JOBS_FIELD_DESC = new org.apache.thrift.protocol.TField("waitingOnJobs", org.apache.thrift.protocol.TType.LIST, (short)16);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
schemes.put(StandardScheme.class, new TReplicationJobStandardSchemeFactory());
schemes.put(TupleScheme.class, new TReplicationJobTupleSchemeFactory());
}
public long id; // required
public long createTime; // required
public long updateTime; // required
/**
*
* @see TReplicationOperation
*/
public TReplicationOperation operation; // required
/**
*
* @see TReplicationStatus
*/
public TReplicationStatus status; // required
public String srcPath; // required
public String srcCluster; // required
public String srcDb; // required
public String srcTable; // required
public List<String> srcPartitions; // required
public String srcModifiedTime; // required
public String renameToDb; // required
public String renameToTable; // required
public String renameToPath; // required
public Map<String,String> extras; // required
public List<Long> waitingOnJobs; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
ID((short)1, "id"),
CREATE_TIME((short)2, "createTime"),
UPDATE_TIME((short)3, "updateTime"),
/**
*
* @see TReplicationOperation
*/
OPERATION((short)4, "operation"),
/**
*
* @see TReplicationStatus
*/
STATUS((short)5, "status"),
SRC_PATH((short)6, "srcPath"),
SRC_CLUSTER((short)7, "srcCluster"),
SRC_DB((short)8, "srcDb"),
SRC_TABLE((short)9, "srcTable"),
SRC_PARTITIONS((short)10, "srcPartitions"),
SRC_MODIFIED_TIME((short)11, "srcModifiedTime"),
RENAME_TO_DB((short)12, "renameToDb"),
RENAME_TO_TABLE((short)13, "renameToTable"),
RENAME_TO_PATH((short)14, "renameToPath"),
EXTRAS((short)15, "extras"),
WAITING_ON_JOBS((short)16, "waitingOnJobs");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // ID
return ID;
case 2: // CREATE_TIME
return CREATE_TIME;
case 3: // UPDATE_TIME
return UPDATE_TIME;
case 4: // OPERATION
return OPERATION;
case 5: // STATUS
return STATUS;
case 6: // SRC_PATH
return SRC_PATH;
case 7: // SRC_CLUSTER
return SRC_CLUSTER;
case 8: // SRC_DB
return SRC_DB;
case 9: // SRC_TABLE
return SRC_TABLE;
case 10: // SRC_PARTITIONS
return SRC_PARTITIONS;
case 11: // SRC_MODIFIED_TIME
return SRC_MODIFIED_TIME;
case 12: // RENAME_TO_DB
return RENAME_TO_DB;
case 13: // RENAME_TO_TABLE
return RENAME_TO_TABLE;
case 14: // RENAME_TO_PATH
return RENAME_TO_PATH;
case 15: // EXTRAS
return EXTRAS;
case 16: // WAITING_ON_JOBS
return WAITING_ON_JOBS;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
private static final int __ID_ISSET_ID = 0;
private static final int __CREATETIME_ISSET_ID = 1;
private static final int __UPDATETIME_ISSET_ID = 2;
private byte __isset_bitfield = 0;
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.ID, new org.apache.thrift.meta_data.FieldMetaData("id", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
tmpMap.put(_Fields.CREATE_TIME, new org.apache.thrift.meta_data.FieldMetaData("createTime", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
tmpMap.put(_Fields.UPDATE_TIME, new org.apache.thrift.meta_data.FieldMetaData("updateTime", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
tmpMap.put(_Fields.OPERATION, new org.apache.thrift.meta_data.FieldMetaData("operation", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TReplicationOperation.class)));
tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TReplicationStatus.class)));
tmpMap.put(_Fields.SRC_PATH, new org.apache.thrift.meta_data.FieldMetaData("srcPath", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.SRC_CLUSTER, new org.apache.thrift.meta_data.FieldMetaData("srcCluster", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.SRC_DB, new org.apache.thrift.meta_data.FieldMetaData("srcDb", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.SRC_TABLE, new org.apache.thrift.meta_data.FieldMetaData("srcTable", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.SRC_PARTITIONS, new org.apache.thrift.meta_data.FieldMetaData("srcPartitions", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
tmpMap.put(_Fields.SRC_MODIFIED_TIME, new org.apache.thrift.meta_data.FieldMetaData("srcModifiedTime", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.RENAME_TO_DB, new org.apache.thrift.meta_data.FieldMetaData("renameToDb", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.RENAME_TO_TABLE, new org.apache.thrift.meta_data.FieldMetaData("renameToTable", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.RENAME_TO_PATH, new org.apache.thrift.meta_data.FieldMetaData("renameToPath", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.EXTRAS, new org.apache.thrift.meta_data.FieldMetaData("extras", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING),
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
tmpMap.put(_Fields.WAITING_ON_JOBS, new org.apache.thrift.meta_data.FieldMetaData("waitingOnJobs", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TReplicationJob.class, metaDataMap);
}
public TReplicationJob() {
}
public TReplicationJob(
long id,
long createTime,
long updateTime,
TReplicationOperation operation,
TReplicationStatus status,
String srcPath,
String srcCluster,
String srcDb,
String srcTable,
List<String> srcPartitions,
String srcModifiedTime,
String renameToDb,
String renameToTable,
String renameToPath,
Map<String,String> extras,
List<Long> waitingOnJobs)
{
this();
this.id = id;
setIdIsSet(true);
this.createTime = createTime;
setCreateTimeIsSet(true);
this.updateTime = updateTime;
setUpdateTimeIsSet(true);
this.operation = operation;
this.status = status;
this.srcPath = srcPath;
this.srcCluster = srcCluster;
this.srcDb = srcDb;
this.srcTable = srcTable;
this.srcPartitions = srcPartitions;
this.srcModifiedTime = srcModifiedTime;
this.renameToDb = renameToDb;
this.renameToTable = renameToTable;
this.renameToPath = renameToPath;
this.extras = extras;
this.waitingOnJobs = waitingOnJobs;
}
/**
* Performs a deep copy on <i>other</i>.
*/
public TReplicationJob(TReplicationJob other) {
__isset_bitfield = other.__isset_bitfield;
this.id = other.id;
this.createTime = other.createTime;
this.updateTime = other.updateTime;
if (other.isSetOperation()) {
this.operation = other.operation;
}
if (other.isSetStatus()) {
this.status = other.status;
}
if (other.isSetSrcPath()) {
this.srcPath = other.srcPath;
}
if (other.isSetSrcCluster()) {
this.srcCluster = other.srcCluster;
}
if (other.isSetSrcDb()) {
this.srcDb = other.srcDb;
}
if (other.isSetSrcTable()) {
this.srcTable = other.srcTable;
}
if (other.isSetSrcPartitions()) {
List<String> __this__srcPartitions = new ArrayList<String>(other.srcPartitions);
this.srcPartitions = __this__srcPartitions;
}
if (other.isSetSrcModifiedTime()) {
this.srcModifiedTime = other.srcModifiedTime;
}
if (other.isSetRenameToDb()) {
this.renameToDb = other.renameToDb;
}
if (other.isSetRenameToTable()) {
this.renameToTable = other.renameToTable;
}
if (other.isSetRenameToPath()) {
this.renameToPath = other.renameToPath;
}
if (other.isSetExtras()) {
Map<String,String> __this__extras = new HashMap<String,String>(other.extras);
this.extras = __this__extras;
}
if (other.isSetWaitingOnJobs()) {
List<Long> __this__waitingOnJobs = new ArrayList<Long>(other.waitingOnJobs);
this.waitingOnJobs = __this__waitingOnJobs;
}
}
public TReplicationJob deepCopy() {
return new TReplicationJob(this);
}
@Override
public void clear() {
setIdIsSet(false);
this.id = 0;
setCreateTimeIsSet(false);
this.createTime = 0;
setUpdateTimeIsSet(false);
this.updateTime = 0;
this.operation = null;
this.status = null;
this.srcPath = null;
this.srcCluster = null;
this.srcDb = null;
this.srcTable = null;
this.srcPartitions = null;
this.srcModifiedTime = null;
this.renameToDb = null;
this.renameToTable = null;
this.renameToPath = null;
this.extras = null;
this.waitingOnJobs = null;
}
public long getId() {
return this.id;
}
public TReplicationJob setId(long id) {
this.id = id;
setIdIsSet(true);
return this;
}
public void unsetId() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ID_ISSET_ID);
}
/** Returns true if field id is set (has been assigned a value) and false otherwise */
public boolean isSetId() {
return EncodingUtils.testBit(__isset_bitfield, __ID_ISSET_ID);
}
public void setIdIsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ID_ISSET_ID, value);
}
public long getCreateTime() {
return this.createTime;
}
public TReplicationJob setCreateTime(long createTime) {
this.createTime = createTime;
setCreateTimeIsSet(true);
return this;
}
public void unsetCreateTime() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __CREATETIME_ISSET_ID);
}
/** Returns true if field createTime is set (has been assigned a value) and false otherwise */
public boolean isSetCreateTime() {
return EncodingUtils.testBit(__isset_bitfield, __CREATETIME_ISSET_ID);
}
public void setCreateTimeIsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __CREATETIME_ISSET_ID, value);
}
public long getUpdateTime() {
return this.updateTime;
}
public TReplicationJob setUpdateTime(long updateTime) {
this.updateTime = updateTime;
setUpdateTimeIsSet(true);
return this;
}
public void unsetUpdateTime() {
__isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __UPDATETIME_ISSET_ID);
}
/** Returns true if field updateTime is set (has been assigned a value) and false otherwise */
public boolean isSetUpdateTime() {
return EncodingUtils.testBit(__isset_bitfield, __UPDATETIME_ISSET_ID);
}
public void setUpdateTimeIsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __UPDATETIME_ISSET_ID, value);
}
/**
*
* @see TReplicationOperation
*/
public TReplicationOperation getOperation() {
return this.operation;
}
/**
*
* @see TReplicationOperation
*/
public TReplicationJob setOperation(TReplicationOperation operation) {
this.operation = operation;
return this;
}
public void unsetOperation() {
this.operation = null;
}
/** Returns true if field operation is set (has been assigned a value) and false otherwise */
public boolean isSetOperation() {
return this.operation != null;
}
public void setOperationIsSet(boolean value) {
if (!value) {
this.operation = null;
}
}
/**
*
* @see TReplicationStatus
*/
public TReplicationStatus getStatus() {
return this.status;
}
/**
*
* @see TReplicationStatus
*/
public TReplicationJob setStatus(TReplicationStatus status) {
this.status = status;
return this;
}
public void unsetStatus() {
this.status = null;
}
/** Returns true if field status is set (has been assigned a value) and false otherwise */
public boolean isSetStatus() {
return this.status != null;
}
public void setStatusIsSet(boolean value) {
if (!value) {
this.status = null;
}
}
public String getSrcPath() {
return this.srcPath;
}
public TReplicationJob setSrcPath(String srcPath) {
this.srcPath = srcPath;
return this;
}
public void unsetSrcPath() {
this.srcPath = null;
}
/** Returns true if field srcPath is set (has been assigned a value) and false otherwise */
public boolean isSetSrcPath() {
return this.srcPath != null;
}
public void setSrcPathIsSet(boolean value) {
if (!value) {
this.srcPath = null;
}
}
public String getSrcCluster() {
return this.srcCluster;
}
public TReplicationJob setSrcCluster(String srcCluster) {
this.srcCluster = srcCluster;
return this;
}
public void unsetSrcCluster() {
this.srcCluster = null;
}
/** Returns true if field srcCluster is set (has been assigned a value) and false otherwise */
public boolean isSetSrcCluster() {
return this.srcCluster != null;
}
public void setSrcClusterIsSet(boolean value) {
if (!value) {
this.srcCluster = null;
}
}
public String getSrcDb() {
return this.srcDb;
}
public TReplicationJob setSrcDb(String srcDb) {
this.srcDb = srcDb;
return this;
}
public void unsetSrcDb() {
this.srcDb = null;
}
/** Returns true if field srcDb is set (has been assigned a value) and false otherwise */
public boolean isSetSrcDb() {
return this.srcDb != null;
}
public void setSrcDbIsSet(boolean value) {
if (!value) {
this.srcDb = null;
}
}
public String getSrcTable() {
return this.srcTable;
}
public TReplicationJob setSrcTable(String srcTable) {
this.srcTable = srcTable;
return this;
}
public void unsetSrcTable() {
this.srcTable = null;
}
/** Returns true if field srcTable is set (has been assigned a value) and false otherwise */
public boolean isSetSrcTable() {
return this.srcTable != null;
}
public void setSrcTableIsSet(boolean value) {
if (!value) {
this.srcTable = null;
}
}
public int getSrcPartitionsSize() {
return (this.srcPartitions == null) ? 0 : this.srcPartitions.size();
}
public java.util.Iterator<String> getSrcPartitionsIterator() {
return (this.srcPartitions == null) ? null : this.srcPartitions.iterator();
}
public void addToSrcPartitions(String elem) {
if (this.srcPartitions == null) {
this.srcPartitions = new ArrayList<String>();
}
this.srcPartitions.add(elem);
}
public List<String> getSrcPartitions() {
return this.srcPartitions;
}
public TReplicationJob setSrcPartitions(List<String> srcPartitions) {
this.srcPartitions = srcPartitions;
return this;
}
public void unsetSrcPartitions() {
this.srcPartitions = null;
}
/** Returns true if field srcPartitions is set (has been assigned a value) and false otherwise */
public boolean isSetSrcPartitions() {
return this.srcPartitions != null;
}
public void setSrcPartitionsIsSet(boolean value) {
if (!value) {
this.srcPartitions = null;
}
}
public String getSrcModifiedTime() {
return this.srcModifiedTime;
}
public TReplicationJob setSrcModifiedTime(String srcModifiedTime) {
this.srcModifiedTime = srcModifiedTime;
return this;
}
public void unsetSrcModifiedTime() {
this.srcModifiedTime = null;
}
/** Returns true if field srcModifiedTime is set (has been assigned a value) and false otherwise */
public boolean isSetSrcModifiedTime() {
return this.srcModifiedTime != null;
}
public void setSrcModifiedTimeIsSet(boolean value) {
if (!value) {
this.srcModifiedTime = null;
}
}
public String getRenameToDb() {
return this.renameToDb;
}
public TReplicationJob setRenameToDb(String renameToDb) {
this.renameToDb = renameToDb;
return this;
}
public void unsetRenameToDb() {
this.renameToDb = null;
}
/** Returns true if field renameToDb is set (has been assigned a value) and false otherwise */
public boolean isSetRenameToDb() {
return this.renameToDb != null;
}
public void setRenameToDbIsSet(boolean value) {
if (!value) {
this.renameToDb = null;
}
}
public String getRenameToTable() {
return this.renameToTable;
}
public TReplicationJob setRenameToTable(String renameToTable) {
this.renameToTable = renameToTable;
return this;
}
public void unsetRenameToTable() {
this.renameToTable = null;
}
/** Returns true if field renameToTable is set (has been assigned a value) and false otherwise */
public boolean isSetRenameToTable() {
return this.renameToTable != null;
}
public void setRenameToTableIsSet(boolean value) {
if (!value) {
this.renameToTable = null;
}
}
public String getRenameToPath() {
return this.renameToPath;
}
public TReplicationJob setRenameToPath(String renameToPath) {
this.renameToPath = renameToPath;
return this;
}
public void unsetRenameToPath() {
this.renameToPath = null;
}
/** Returns true if field renameToPath is set (has been assigned a value) and false otherwise */
public boolean isSetRenameToPath() {
return this.renameToPath != null;
}
public void setRenameToPathIsSet(boolean value) {
if (!value) {
this.renameToPath = null;
}
}
public int getExtrasSize() {
return (this.extras == null) ? 0 : this.extras.size();
}
public void putToExtras(String key, String val) {
if (this.extras == null) {
this.extras = new HashMap<String,String>();
}
this.extras.put(key, val);
}
public Map<String,String> getExtras() {
return this.extras;
}
public TReplicationJob setExtras(Map<String,String> extras) {
this.extras = extras;
return this;
}
public void unsetExtras() {
this.extras = null;
}
/** Returns true if field extras is set (has been assigned a value) and false otherwise */
public boolean isSetExtras() {
return this.extras != null;
}
public void setExtrasIsSet(boolean value) {
if (!value) {
this.extras = null;
}
}
public int getWaitingOnJobsSize() {
return (this.waitingOnJobs == null) ? 0 : this.waitingOnJobs.size();
}
public java.util.Iterator<Long> getWaitingOnJobsIterator() {
return (this.waitingOnJobs == null) ? null : this.waitingOnJobs.iterator();
}
public void addToWaitingOnJobs(long elem) {
if (this.waitingOnJobs == null) {
this.waitingOnJobs = new ArrayList<Long>();
}
this.waitingOnJobs.add(elem);
}
public List<Long> getWaitingOnJobs() {
return this.waitingOnJobs;
}
public TReplicationJob setWaitingOnJobs(List<Long> waitingOnJobs) {
this.waitingOnJobs = waitingOnJobs;
return this;
}
public void unsetWaitingOnJobs() {
this.waitingOnJobs = null;
}
/** Returns true if field waitingOnJobs is set (has been assigned a value) and false otherwise */
public boolean isSetWaitingOnJobs() {
return this.waitingOnJobs != null;
}
public void setWaitingOnJobsIsSet(boolean value) {
if (!value) {
this.waitingOnJobs = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case ID:
if (value == null) {
unsetId();
} else {
setId((Long)value);
}
break;
case CREATE_TIME:
if (value == null) {
unsetCreateTime();
} else {
setCreateTime((Long)value);
}
break;
case UPDATE_TIME:
if (value == null) {
unsetUpdateTime();
} else {
setUpdateTime((Long)value);
}
break;
case OPERATION:
if (value == null) {
unsetOperation();
} else {
setOperation((TReplicationOperation)value);
}
break;
case STATUS:
if (value == null) {
unsetStatus();
} else {
setStatus((TReplicationStatus)value);
}
break;
case SRC_PATH:
if (value == null) {
unsetSrcPath();
} else {
setSrcPath((String)value);
}
break;
case SRC_CLUSTER:
if (value == null) {
unsetSrcCluster();
} else {
setSrcCluster((String)value);
}
break;
case SRC_DB:
if (value == null) {
unsetSrcDb();
} else {
setSrcDb((String)value);
}
break;
case SRC_TABLE:
if (value == null) {
unsetSrcTable();
} else {
setSrcTable((String)value);
}
break;
case SRC_PARTITIONS:
if (value == null) {
unsetSrcPartitions();
} else {
setSrcPartitions((List<String>)value);
}
break;
case SRC_MODIFIED_TIME:
if (value == null) {
unsetSrcModifiedTime();
} else {
setSrcModifiedTime((String)value);
}
break;
case RENAME_TO_DB:
if (value == null) {
unsetRenameToDb();
} else {
setRenameToDb((String)value);
}
break;
case RENAME_TO_TABLE:
if (value == null) {
unsetRenameToTable();
} else {
setRenameToTable((String)value);
}
break;
case RENAME_TO_PATH:
if (value == null) {
unsetRenameToPath();
} else {
setRenameToPath((String)value);
}
break;
case EXTRAS:
if (value == null) {
unsetExtras();
} else {
setExtras((Map<String,String>)value);
}
break;
case WAITING_ON_JOBS:
if (value == null) {
unsetWaitingOnJobs();
} else {
setWaitingOnJobs((List<Long>)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case ID:
return Long.valueOf(getId());
case CREATE_TIME:
return Long.valueOf(getCreateTime());
case UPDATE_TIME:
return Long.valueOf(getUpdateTime());
case OPERATION:
return getOperation();
case STATUS:
return getStatus();
case SRC_PATH:
return getSrcPath();
case SRC_CLUSTER:
return getSrcCluster();
case SRC_DB:
return getSrcDb();
case SRC_TABLE:
return getSrcTable();
case SRC_PARTITIONS:
return getSrcPartitions();
case SRC_MODIFIED_TIME:
return getSrcModifiedTime();
case RENAME_TO_DB:
return getRenameToDb();
case RENAME_TO_TABLE:
return getRenameToTable();
case RENAME_TO_PATH:
return getRenameToPath();
case EXTRAS:
return getExtras();
case WAITING_ON_JOBS:
return getWaitingOnJobs();
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case ID:
return isSetId();
case CREATE_TIME:
return isSetCreateTime();
case UPDATE_TIME:
return isSetUpdateTime();
case OPERATION:
return isSetOperation();
case STATUS:
return isSetStatus();
case SRC_PATH:
return isSetSrcPath();
case SRC_CLUSTER:
return isSetSrcCluster();
case SRC_DB:
return isSetSrcDb();
case SRC_TABLE:
return isSetSrcTable();
case SRC_PARTITIONS:
return isSetSrcPartitions();
case SRC_MODIFIED_TIME:
return isSetSrcModifiedTime();
case RENAME_TO_DB:
return isSetRenameToDb();
case RENAME_TO_TABLE:
return isSetRenameToTable();
case RENAME_TO_PATH:
return isSetRenameToPath();
case EXTRAS:
return isSetExtras();
case WAITING_ON_JOBS:
return isSetWaitingOnJobs();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof TReplicationJob)
return this.equals((TReplicationJob)that);
return false;
}
public boolean equals(TReplicationJob that) {
if (that == null)
return false;
boolean this_present_id = true;
boolean that_present_id = true;
if (this_present_id || that_present_id) {
if (!(this_present_id && that_present_id))
return false;
if (this.id != that.id)
return false;
}
boolean this_present_createTime = true;
boolean that_present_createTime = true;
if (this_present_createTime || that_present_createTime) {
if (!(this_present_createTime && that_present_createTime))
return false;
if (this.createTime != that.createTime)
return false;
}
boolean this_present_updateTime = true;
boolean that_present_updateTime = true;
if (this_present_updateTime || that_present_updateTime) {
if (!(this_present_updateTime && that_present_updateTime))
return false;
if (this.updateTime != that.updateTime)
return false;
}
boolean this_present_operation = true && this.isSetOperation();
boolean that_present_operation = true && that.isSetOperation();
if (this_present_operation || that_present_operation) {
if (!(this_present_operation && that_present_operation))
return false;
if (!this.operation.equals(that.operation))
return false;
}
boolean this_present_status = true && this.isSetStatus();
boolean that_present_status = true && that.isSetStatus();
if (this_present_status || that_present_status) {
if (!(this_present_status && that_present_status))
return false;
if (!this.status.equals(that.status))
return false;
}
boolean this_present_srcPath = true && this.isSetSrcPath();
boolean that_present_srcPath = true && that.isSetSrcPath();
if (this_present_srcPath || that_present_srcPath) {
if (!(this_present_srcPath && that_present_srcPath))
return false;
if (!this.srcPath.equals(that.srcPath))
return false;
}
boolean this_present_srcCluster = true && this.isSetSrcCluster();
boolean that_present_srcCluster = true && that.isSetSrcCluster();
if (this_present_srcCluster || that_present_srcCluster) {
if (!(this_present_srcCluster && that_present_srcCluster))
return false;
if (!this.srcCluster.equals(that.srcCluster))
return false;
}
boolean this_present_srcDb = true && this.isSetSrcDb();
boolean that_present_srcDb = true && that.isSetSrcDb();
if (this_present_srcDb || that_present_srcDb) {
if (!(this_present_srcDb && that_present_srcDb))
return false;
if (!this.srcDb.equals(that.srcDb))
return false;
}
boolean this_present_srcTable = true && this.isSetSrcTable();
boolean that_present_srcTable = true && that.isSetSrcTable();
if (this_present_srcTable || that_present_srcTable) {
if (!(this_present_srcTable && that_present_srcTable))
return false;
if (!this.srcTable.equals(that.srcTable))
return false;
}
boolean this_present_srcPartitions = true && this.isSetSrcPartitions();
boolean that_present_srcPartitions = true && that.isSetSrcPartitions();
if (this_present_srcPartitions || that_present_srcPartitions) {
if (!(this_present_srcPartitions && that_present_srcPartitions))
return false;
if (!this.srcPartitions.equals(that.srcPartitions))
return false;
}
boolean this_present_srcModifiedTime = true && this.isSetSrcModifiedTime();
boolean that_present_srcModifiedTime = true && that.isSetSrcModifiedTime();
if (this_present_srcModifiedTime || that_present_srcModifiedTime) {
if (!(this_present_srcModifiedTime && that_present_srcModifiedTime))
return false;
if (!this.srcModifiedTime.equals(that.srcModifiedTime))
return false;
}
boolean this_present_renameToDb = true && this.isSetRenameToDb();
boolean that_present_renameToDb = true && that.isSetRenameToDb();
if (this_present_renameToDb || that_present_renameToDb) {
if (!(this_present_renameToDb && that_present_renameToDb))
return false;
if (!this.renameToDb.equals(that.renameToDb))
return false;
}
boolean this_present_renameToTable = true && this.isSetRenameToTable();
boolean that_present_renameToTable = true && that.isSetRenameToTable();
if (this_present_renameToTable || that_present_renameToTable) {
if (!(this_present_renameToTable && that_present_renameToTable))
return false;
if (!this.renameToTable.equals(that.renameToTable))
return false;
}
boolean this_present_renameToPath = true && this.isSetRenameToPath();
boolean that_present_renameToPath = true && that.isSetRenameToPath();
if (this_present_renameToPath || that_present_renameToPath) {
if (!(this_present_renameToPath && that_present_renameToPath))
return false;
if (!this.renameToPath.equals(that.renameToPath))
return false;
}
boolean this_present_extras = true && this.isSetExtras();
boolean that_present_extras = true && that.isSetExtras();
if (this_present_extras || that_present_extras) {
if (!(this_present_extras && that_present_extras))
return false;
if (!this.extras.equals(that.extras))
return false;
}
boolean this_present_waitingOnJobs = true && this.isSetWaitingOnJobs();
boolean that_present_waitingOnJobs = true && that.isSetWaitingOnJobs();
if (this_present_waitingOnJobs || that_present_waitingOnJobs) {
if (!(this_present_waitingOnJobs && that_present_waitingOnJobs))
return false;
if (!this.waitingOnJobs.equals(that.waitingOnJobs))
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
@Override
public int compareTo(TReplicationJob other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
lastComparison = Boolean.valueOf(isSetId()).compareTo(other.isSetId());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetId()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.id, other.id);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetCreateTime()).compareTo(other.isSetCreateTime());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetCreateTime()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.createTime, other.createTime);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetUpdateTime()).compareTo(other.isSetUpdateTime());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetUpdateTime()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.updateTime, other.updateTime);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetOperation()).compareTo(other.isSetOperation());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetOperation()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operation, other.operation);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetStatus()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetSrcPath()).compareTo(other.isSetSrcPath());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSrcPath()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.srcPath, other.srcPath);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetSrcCluster()).compareTo(other.isSetSrcCluster());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSrcCluster()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.srcCluster, other.srcCluster);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetSrcDb()).compareTo(other.isSetSrcDb());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSrcDb()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.srcDb, other.srcDb);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetSrcTable()).compareTo(other.isSetSrcTable());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSrcTable()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.srcTable, other.srcTable);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetSrcPartitions()).compareTo(other.isSetSrcPartitions());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSrcPartitions()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.srcPartitions, other.srcPartitions);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetSrcModifiedTime()).compareTo(other.isSetSrcModifiedTime());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetSrcModifiedTime()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.srcModifiedTime, other.srcModifiedTime);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetRenameToDb()).compareTo(other.isSetRenameToDb());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetRenameToDb()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.renameToDb, other.renameToDb);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetRenameToTable()).compareTo(other.isSetRenameToTable());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetRenameToTable()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.renameToTable, other.renameToTable);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetRenameToPath()).compareTo(other.isSetRenameToPath());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetRenameToPath()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.renameToPath, other.renameToPath);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetExtras()).compareTo(other.isSetExtras());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetExtras()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.extras, other.extras);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetWaitingOnJobs()).compareTo(other.isSetWaitingOnJobs());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetWaitingOnJobs()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.waitingOnJobs, other.waitingOnJobs);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("TReplicationJob(");
boolean first = true;
sb.append("id:");
sb.append(this.id);
first = false;
if (!first) sb.append(", ");
sb.append("createTime:");
sb.append(this.createTime);
first = false;
if (!first) sb.append(", ");
sb.append("updateTime:");
sb.append(this.updateTime);
first = false;
if (!first) sb.append(", ");
sb.append("operation:");
if (this.operation == null) {
sb.append("null");
} else {
sb.append(this.operation);
}
first = false;
if (!first) sb.append(", ");
sb.append("status:");
if (this.status == null) {
sb.append("null");
} else {
sb.append(this.status);
}
first = false;
if (!first) sb.append(", ");
sb.append("srcPath:");
if (this.srcPath == null) {
sb.append("null");
} else {
sb.append(this.srcPath);
}
first = false;
if (!first) sb.append(", ");
sb.append("srcCluster:");
if (this.srcCluster == null) {
sb.append("null");
} else {
sb.append(this.srcCluster);
}
first = false;
if (!first) sb.append(", ");
sb.append("srcDb:");
if (this.srcDb == null) {
sb.append("null");
} else {
sb.append(this.srcDb);
}
first = false;
if (!first) sb.append(", ");
sb.append("srcTable:");
if (this.srcTable == null) {
sb.append("null");
} else {
sb.append(this.srcTable);
}
first = false;
if (!first) sb.append(", ");
sb.append("srcPartitions:");
if (this.srcPartitions == null) {
sb.append("null");
} else {
sb.append(this.srcPartitions);
}
first = false;
if (!first) sb.append(", ");
sb.append("srcModifiedTime:");
if (this.srcModifiedTime == null) {
sb.append("null");
} else {
sb.append(this.srcModifiedTime);
}
first = false;
if (!first) sb.append(", ");
sb.append("renameToDb:");
if (this.renameToDb == null) {
sb.append("null");
} else {
sb.append(this.renameToDb);
}
first = false;
if (!first) sb.append(", ");
sb.append("renameToTable:");
if (this.renameToTable == null) {
sb.append("null");
} else {
sb.append(this.renameToTable);
}
first = false;
if (!first) sb.append(", ");
sb.append("renameToPath:");
if (this.renameToPath == null) {
sb.append("null");
} else {
sb.append(this.renameToPath);
}
first = false;
if (!first) sb.append(", ");
sb.append("extras:");
if (this.extras == null) {
sb.append("null");
} else {
sb.append(this.extras);
}
first = false;
if (!first) sb.append(", ");
sb.append("waitingOnJobs:");
if (this.waitingOnJobs == null) {
sb.append("null");
} else {
sb.append(this.waitingOnJobs);
}
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
// check for sub-struct validity
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
// it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
__isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private static class TReplicationJobStandardSchemeFactory implements SchemeFactory {
public TReplicationJobStandardScheme getScheme() {
return new TReplicationJobStandardScheme();
}
}
private static class TReplicationJobStandardScheme extends StandardScheme<TReplicationJob> {
public void read(org.apache.thrift.protocol.TProtocol iprot, TReplicationJob struct) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true)
{
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 1: // ID
if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
struct.id = iprot.readI64();
struct.setIdIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 2: // CREATE_TIME
if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
struct.createTime = iprot.readI64();
struct.setCreateTimeIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 3: // UPDATE_TIME
if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
struct.updateTime = iprot.readI64();
struct.setUpdateTimeIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 4: // OPERATION
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.operation = TReplicationOperation.findByValue(iprot.readI32());
struct.setOperationIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 5: // STATUS
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.status = TReplicationStatus.findByValue(iprot.readI32());
struct.setStatusIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 6: // SRC_PATH
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.srcPath = iprot.readString();
struct.setSrcPathIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 7: // SRC_CLUSTER
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.srcCluster = iprot.readString();
struct.setSrcClusterIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 8: // SRC_DB
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.srcDb = iprot.readString();
struct.setSrcDbIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 9: // SRC_TABLE
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.srcTable = iprot.readString();
struct.setSrcTableIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 10: // SRC_PARTITIONS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
org.apache.thrift.protocol.TList _list0 = iprot.readListBegin();
struct.srcPartitions = new ArrayList<String>(_list0.size);
for (int _i1 = 0; _i1 < _list0.size; ++_i1)
{
String _elem2;
_elem2 = iprot.readString();
struct.srcPartitions.add(_elem2);
}
iprot.readListEnd();
}
struct.setSrcPartitionsIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 11: // SRC_MODIFIED_TIME
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.srcModifiedTime = iprot.readString();
struct.setSrcModifiedTimeIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 12: // RENAME_TO_DB
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.renameToDb = iprot.readString();
struct.setRenameToDbIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 13: // RENAME_TO_TABLE
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.renameToTable = iprot.readString();
struct.setRenameToTableIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 14: // RENAME_TO_PATH
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.renameToPath = iprot.readString();
struct.setRenameToPathIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 15: // EXTRAS
if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
{
org.apache.thrift.protocol.TMap _map3 = iprot.readMapBegin();
struct.extras = new HashMap<String,String>(2*_map3.size);
for (int _i4 = 0; _i4 < _map3.size; ++_i4)
{
String _key5;
String _val6;
_key5 = iprot.readString();
_val6 = iprot.readString();
struct.extras.put(_key5, _val6);
}
iprot.readMapEnd();
}
struct.setExtrasIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
case 16: // WAITING_ON_JOBS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
org.apache.thrift.protocol.TList _list7 = iprot.readListBegin();
struct.waitingOnJobs = new ArrayList<Long>(_list7.size);
for (int _i8 = 0; _i8 < _list7.size; ++_i8)
{
long _elem9;
_elem9 = iprot.readI64();
struct.waitingOnJobs.add(_elem9);
}
iprot.readListEnd();
}
struct.setWaitingOnJobsIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot, TReplicationJob struct) throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldBegin(ID_FIELD_DESC);
oprot.writeI64(struct.id);
oprot.writeFieldEnd();
oprot.writeFieldBegin(CREATE_TIME_FIELD_DESC);
oprot.writeI64(struct.createTime);
oprot.writeFieldEnd();
oprot.writeFieldBegin(UPDATE_TIME_FIELD_DESC);
oprot.writeI64(struct.updateTime);
oprot.writeFieldEnd();
if (struct.operation != null) {
oprot.writeFieldBegin(OPERATION_FIELD_DESC);
oprot.writeI32(struct.operation.getValue());
oprot.writeFieldEnd();
}
if (struct.status != null) {
oprot.writeFieldBegin(STATUS_FIELD_DESC);
oprot.writeI32(struct.status.getValue());
oprot.writeFieldEnd();
}
if (struct.srcPath != null) {
oprot.writeFieldBegin(SRC_PATH_FIELD_DESC);
oprot.writeString(struct.srcPath);
oprot.writeFieldEnd();
}
if (struct.srcCluster != null) {
oprot.writeFieldBegin(SRC_CLUSTER_FIELD_DESC);
oprot.writeString(struct.srcCluster);
oprot.writeFieldEnd();
}
if (struct.srcDb != null) {
oprot.writeFieldBegin(SRC_DB_FIELD_DESC);
oprot.writeString(struct.srcDb);
oprot.writeFieldEnd();
}
if (struct.srcTable != null) {
oprot.writeFieldBegin(SRC_TABLE_FIELD_DESC);
oprot.writeString(struct.srcTable);
oprot.writeFieldEnd();
}
if (struct.srcPartitions != null) {
oprot.writeFieldBegin(SRC_PARTITIONS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.srcPartitions.size()));
for (String _iter10 : struct.srcPartitions)
{
oprot.writeString(_iter10);
}
oprot.writeListEnd();
}
oprot.writeFieldEnd();
}
if (struct.srcModifiedTime != null) {
oprot.writeFieldBegin(SRC_MODIFIED_TIME_FIELD_DESC);
oprot.writeString(struct.srcModifiedTime);
oprot.writeFieldEnd();
}
if (struct.renameToDb != null) {
oprot.writeFieldBegin(RENAME_TO_DB_FIELD_DESC);
oprot.writeString(struct.renameToDb);
oprot.writeFieldEnd();
}
if (struct.renameToTable != null) {
oprot.writeFieldBegin(RENAME_TO_TABLE_FIELD_DESC);
oprot.writeString(struct.renameToTable);
oprot.writeFieldEnd();
}
if (struct.renameToPath != null) {
oprot.writeFieldBegin(RENAME_TO_PATH_FIELD_DESC);
oprot.writeString(struct.renameToPath);
oprot.writeFieldEnd();
}
if (struct.extras != null) {
oprot.writeFieldBegin(EXTRAS_FIELD_DESC);
{
oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.extras.size()));
for (Map.Entry<String, String> _iter11 : struct.extras.entrySet())
{
oprot.writeString(_iter11.getKey());
oprot.writeString(_iter11.getValue());
}
oprot.writeMapEnd();
}
oprot.writeFieldEnd();
}
if (struct.waitingOnJobs != null) {
oprot.writeFieldBegin(WAITING_ON_JOBS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.waitingOnJobs.size()));
for (long _iter12 : struct.waitingOnJobs)
{
oprot.writeI64(_iter12);
}
oprot.writeListEnd();
}
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static class TReplicationJobTupleSchemeFactory implements SchemeFactory {
public TReplicationJobTupleScheme getScheme() {
return new TReplicationJobTupleScheme();
}
}
private static class TReplicationJobTupleScheme extends TupleScheme<TReplicationJob> {
@Override
public void write(org.apache.thrift.protocol.TProtocol prot, TReplicationJob struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
BitSet optionals = new BitSet();
if (struct.isSetId()) {
optionals.set(0);
}
if (struct.isSetCreateTime()) {
optionals.set(1);
}
if (struct.isSetUpdateTime()) {
optionals.set(2);
}
if (struct.isSetOperation()) {
optionals.set(3);
}
if (struct.isSetStatus()) {
optionals.set(4);
}
if (struct.isSetSrcPath()) {
optionals.set(5);
}
if (struct.isSetSrcCluster()) {
optionals.set(6);
}
if (struct.isSetSrcDb()) {
optionals.set(7);
}
if (struct.isSetSrcTable()) {
optionals.set(8);
}
if (struct.isSetSrcPartitions()) {
optionals.set(9);
}
if (struct.isSetSrcModifiedTime()) {
optionals.set(10);
}
if (struct.isSetRenameToDb()) {
optionals.set(11);
}
if (struct.isSetRenameToTable()) {
optionals.set(12);
}
if (struct.isSetRenameToPath()) {
optionals.set(13);
}
if (struct.isSetExtras()) {
optionals.set(14);
}
if (struct.isSetWaitingOnJobs()) {
optionals.set(15);
}
oprot.writeBitSet(optionals, 16);
if (struct.isSetId()) {
oprot.writeI64(struct.id);
}
if (struct.isSetCreateTime()) {
oprot.writeI64(struct.createTime);
}
if (struct.isSetUpdateTime()) {
oprot.writeI64(struct.updateTime);
}
if (struct.isSetOperation()) {
oprot.writeI32(struct.operation.getValue());
}
if (struct.isSetStatus()) {
oprot.writeI32(struct.status.getValue());
}
if (struct.isSetSrcPath()) {
oprot.writeString(struct.srcPath);
}
if (struct.isSetSrcCluster()) {
oprot.writeString(struct.srcCluster);
}
if (struct.isSetSrcDb()) {
oprot.writeString(struct.srcDb);
}
if (struct.isSetSrcTable()) {
oprot.writeString(struct.srcTable);
}
if (struct.isSetSrcPartitions()) {
{
oprot.writeI32(struct.srcPartitions.size());
for (String _iter13 : struct.srcPartitions)
{
oprot.writeString(_iter13);
}
}
}
if (struct.isSetSrcModifiedTime()) {
oprot.writeString(struct.srcModifiedTime);
}
if (struct.isSetRenameToDb()) {
oprot.writeString(struct.renameToDb);
}
if (struct.isSetRenameToTable()) {
oprot.writeString(struct.renameToTable);
}
if (struct.isSetRenameToPath()) {
oprot.writeString(struct.renameToPath);
}
if (struct.isSetExtras()) {
{
oprot.writeI32(struct.extras.size());
for (Map.Entry<String, String> _iter14 : struct.extras.entrySet())
{
oprot.writeString(_iter14.getKey());
oprot.writeString(_iter14.getValue());
}
}
}
if (struct.isSetWaitingOnJobs()) {
{
oprot.writeI32(struct.waitingOnJobs.size());
for (long _iter15 : struct.waitingOnJobs)
{
oprot.writeI64(_iter15);
}
}
}
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, TReplicationJob struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
BitSet incoming = iprot.readBitSet(16);
if (incoming.get(0)) {
struct.id = iprot.readI64();
struct.setIdIsSet(true);
}
if (incoming.get(1)) {
struct.createTime = iprot.readI64();
struct.setCreateTimeIsSet(true);
}
if (incoming.get(2)) {
struct.updateTime = iprot.readI64();
struct.setUpdateTimeIsSet(true);
}
if (incoming.get(3)) {
struct.operation = TReplicationOperation.findByValue(iprot.readI32());
struct.setOperationIsSet(true);
}
if (incoming.get(4)) {
struct.status = TReplicationStatus.findByValue(iprot.readI32());
struct.setStatusIsSet(true);
}
if (incoming.get(5)) {
struct.srcPath = iprot.readString();
struct.setSrcPathIsSet(true);
}
if (incoming.get(6)) {
struct.srcCluster = iprot.readString();
struct.setSrcClusterIsSet(true);
}
if (incoming.get(7)) {
struct.srcDb = iprot.readString();
struct.setSrcDbIsSet(true);
}
if (incoming.get(8)) {
struct.srcTable = iprot.readString();
struct.setSrcTableIsSet(true);
}
if (incoming.get(9)) {
{
org.apache.thrift.protocol.TList _list16 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
struct.srcPartitions = new ArrayList<String>(_list16.size);
for (int _i17 = 0; _i17 < _list16.size; ++_i17)
{
String _elem18;
_elem18 = iprot.readString();
struct.srcPartitions.add(_elem18);
}
}
struct.setSrcPartitionsIsSet(true);
}
if (incoming.get(10)) {
struct.srcModifiedTime = iprot.readString();
struct.setSrcModifiedTimeIsSet(true);
}
if (incoming.get(11)) {
struct.renameToDb = iprot.readString();
struct.setRenameToDbIsSet(true);
}
if (incoming.get(12)) {
struct.renameToTable = iprot.readString();
struct.setRenameToTableIsSet(true);
}
if (incoming.get(13)) {
struct.renameToPath = iprot.readString();
struct.setRenameToPathIsSet(true);
}
if (incoming.get(14)) {
{
org.apache.thrift.protocol.TMap _map19 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
struct.extras = new HashMap<String,String>(2*_map19.size);
for (int _i20 = 0; _i20 < _map19.size; ++_i20)
{
String _key21;
String _val22;
_key21 = iprot.readString();
_val22 = iprot.readString();
struct.extras.put(_key21, _val22);
}
}
struct.setExtrasIsSet(true);
}
if (incoming.get(15)) {
{
org.apache.thrift.protocol.TList _list23 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
struct.waitingOnJobs = new ArrayList<Long>(_list23.size);
for (int _i24 = 0; _i24 < _list23.size; ++_i24)
{
long _elem25;
_elem25 = iprot.readI64();
struct.waitingOnJobs.add(_elem25);
}
}
struct.setWaitingOnJobsIsSet(true);
}
}
}
}
| 9,455 |
0 | Create_ds/reair/main/src/test/java | Create_ds/reair/main/src/test/java/test/CopyPartitionTaskTest.java | package test;
import static org.junit.Assert.assertEquals;
import com.airbnb.reair.common.DistCpException;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.RunInfo;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.incremental.primitives.CopyPartitionTask;
import com.airbnb.reair.utils.ReplicationTestUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import org.junit.Test;
import java.io.IOException;
import java.util.Optional;
public class CopyPartitionTaskTest extends MockClusterTest {
private static final Log LOG = LogFactory.getLog(CopyPartitionTaskTest.class);
@Test
public void testCopyPartition()
throws ConfigurationException, IOException, HiveMetastoreException, DistCpException {
// Create a partitioned table in the source
HiveObjectSpec tableSpec = new HiveObjectSpec("test_db", "test_table");
Table srcTable = ReplicationTestUtils.createPartitionedTable(conf, srcMetastore, tableSpec,
TableType.MANAGED_TABLE, srcWarehouseRoot);
// Create a partition in the source table
HiveObjectSpec partitionSpec = new HiveObjectSpec("test_db", "test_table", "ds=1/hr=1");
Partition srcPartition =
ReplicationTestUtils.createPartition(conf, srcMetastore, partitionSpec);
// Copy the partition
CopyPartitionTask copyPartitionTask = new CopyPartitionTask(conf, destinationObjectFactory,
conflictHandler, srcCluster, destCluster, partitionSpec,
ReplicationUtils.getLocation(srcPartition), Optional.empty(), directoryCopier, true);
RunInfo status = copyPartitionTask.runTask();
// Verify that the partition got copied
assertEquals(RunInfo.RunStatus.SUCCESSFUL, status.getRunStatus());
assertEquals(9, status.getBytesCopied());
// Copying a new partition without a data copy should not succeed.
partitionSpec = new HiveObjectSpec("test_db", "test_table", "ds=1/hr=2");
ReplicationTestUtils.createPartition(conf, srcMetastore, partitionSpec);
copyPartitionTask = new CopyPartitionTask(conf, destinationObjectFactory, conflictHandler,
srcCluster, destCluster, partitionSpec, ReplicationUtils.getLocation(srcPartition),
Optional.<Path>empty(), directoryCopier, false);
status = copyPartitionTask.runTask();
assertEquals(RunInfo.RunStatus.NOT_COMPLETABLE, status.getRunStatus());
assertEquals(0, status.getBytesCopied());
}
@Test
public void testCopyPartitionView()
throws ConfigurationException, IOException, HiveMetastoreException, DistCpException {
// Create a partitioned table in the source
HiveObjectSpec tableSpec = new HiveObjectSpec("test_db", "test_table_view");
ReplicationTestUtils.createPartitionedTable(conf, srcMetastore, tableSpec,
TableType.VIRTUAL_VIEW, srcWarehouseRoot);
// Create a partition in the source table
HiveObjectSpec partitionSpec = new HiveObjectSpec("test_db", "test_table_view", "ds=1/hr=1");
Partition srcPartition =
ReplicationTestUtils.createPartition(conf, srcMetastore, partitionSpec);
// Copy the partition
CopyPartitionTask copyPartitionTask = new CopyPartitionTask(conf, destinationObjectFactory,
conflictHandler, srcCluster, destCluster, partitionSpec,
ReplicationUtils.getLocation(srcPartition), Optional.<Path>empty(), directoryCopier, true);
RunInfo status = copyPartitionTask.runTask();
// Verify that the partition got copied
assertEquals(RunInfo.RunStatus.SUCCESSFUL, status.getRunStatus());
assertEquals(0, status.getBytesCopied());
}
}
| 9,456 |
0 | Create_ds/reair/main/src/test/java | Create_ds/reair/main/src/test/java/test/TaskEstimatorTest.java | package test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import com.airbnb.reair.common.DistCpException;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.common.HiveParameterKeys;
import com.airbnb.reair.incremental.DirectoryCopier;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.RunInfo;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.incremental.deploy.ConfigurationKeys;
import com.airbnb.reair.incremental.primitives.CopyPartitionTask;
import com.airbnb.reair.incremental.primitives.CopyPartitionedTableTask;
import com.airbnb.reair.incremental.primitives.CopyUnpartitionedTableTask;
import com.airbnb.reair.incremental.primitives.TaskEstimate;
import com.airbnb.reair.incremental.primitives.TaskEstimator;
import com.airbnb.reair.utils.ReplicationTestUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.sql.SQLException;
import java.util.Optional;
public class TaskEstimatorTest extends MockClusterTest {
private static final Log LOG = LogFactory.getLog(TaskEstimatorTest.class);
// Common names to use for testing
private static final String HIVE_DB = "test_db";
private static final String HIVE_TABLE = "test_table";
private static final String HIVE_PARTITION = "ds=1/hr=1";
@BeforeClass
public static void setupClass() throws IOException, SQLException {
MockClusterTest.setupClass();
}
@Test
public void testEstimatesForUnpartitionedTable()
throws ConfigurationException, IOException, HiveMetastoreException, DistCpException {
final DirectoryCopier directoryCopier =
new DirectoryCopier(conf, srcCluster.getTmpDir(), false);
// Create an unpartitioned table in the source
final HiveObjectSpec spec = new HiveObjectSpec(HIVE_DB, HIVE_TABLE);
final Table srcTable = ReplicationTestUtils.createUnpartitionedTable(conf, srcMetastore, spec,
TableType.MANAGED_TABLE, srcWarehouseRoot);
final TaskEstimator estimator =
new TaskEstimator(conf, destinationObjectFactory, srcCluster, destCluster, directoryCopier);
// Table exists in source, but not in dest. It should copy the table.
TaskEstimate estimate = estimator.analyze(spec);
assertTrue(estimate.getTaskType() == TaskEstimate.TaskType.COPY_UNPARTITIONED_TABLE);
assertTrue(estimate.isUpdateMetadata());
assertTrue(estimate.isUpdateData());
assertTrue(estimate.getSrcPath().get().equals(new Path(srcTable.getSd().getLocation())));
// Replicate the table
final CopyUnpartitionedTableTask copyJob =
new CopyUnpartitionedTableTask(conf, destinationObjectFactory, conflictHandler, srcCluster,
destCluster, spec, ReplicationUtils.getLocation(srcTable), directoryCopier, true);
final RunInfo status = copyJob.runTask();
assertEquals(RunInfo.RunStatus.SUCCESSFUL, status.getRunStatus());
// A copy has been made on the destination. Now it shouldn't need to do
// anything.
estimate = estimator.analyze(spec);
assertTrue(estimate.getTaskType() == TaskEstimate.TaskType.NO_OP);
// Change the the source metadata. It should now require a metadata
// update.
srcTable.getParameters().put("foo", "bar");
srcMetastore.alterTable(HIVE_DB, HIVE_TABLE, srcTable);
estimate = estimator.analyze(spec);
assertTrue(estimate.getTaskType() == TaskEstimate.TaskType.COPY_UNPARTITIONED_TABLE);
assertTrue(estimate.isUpdateMetadata());
assertFalse(estimate.isUpdateData());
// Change the source data. It should now require a data update as well.
ReplicationTestUtils.createTextFile(conf, new Path(srcTable.getSd().getLocation()),
"new_file.txt", "456");
estimate = estimator.analyze(spec);
assertTrue(estimate.getTaskType() == TaskEstimate.TaskType.COPY_UNPARTITIONED_TABLE);
assertTrue(estimate.isUpdateMetadata());
assertTrue(estimate.isUpdateData());
assertTrue(estimate.getSrcPath().get().equals(new Path(srcTable.getSd().getLocation())));
// Drop the source. It should now be a drop.
srcMetastore.dropTable(HIVE_DB, HIVE_TABLE, true);
estimate = estimator.analyze(spec);
assertTrue(estimate.getTaskType() == TaskEstimate.TaskType.DROP_TABLE);
}
@Test
public void testEstimatesForPartitionedTable()
throws IOException, HiveMetastoreException, DistCpException {
final DirectoryCopier directoryCopier =
new DirectoryCopier(conf, srcCluster.getTmpDir(), false);
// Create an partitioned table in the source
final HiveObjectSpec spec = new HiveObjectSpec(HIVE_DB, HIVE_TABLE);
final Table srcTable = ReplicationTestUtils.createPartitionedTable(conf, srcMetastore, spec,
TableType.MANAGED_TABLE, srcWarehouseRoot);
final TaskEstimator estimator =
new TaskEstimator(conf, destinationObjectFactory, srcCluster, destCluster, directoryCopier);
// Table exists in source, but not in dest. It should copy the table.
TaskEstimate estimate = estimator.analyze(spec);
assertTrue(estimate.getTaskType() == TaskEstimate.TaskType.COPY_PARTITIONED_TABLE);
assertTrue(estimate.isUpdateMetadata());
assertFalse(estimate.isUpdateData());
// Replicate the table
final CopyPartitionedTableTask copyJob =
new CopyPartitionedTableTask(conf, destinationObjectFactory, conflictHandler, srcCluster,
destCluster, spec, ReplicationUtils.getLocation(srcTable));
final RunInfo status = copyJob.runTask();
assertEquals(RunInfo.RunStatus.SUCCESSFUL, status.getRunStatus());
// A copy has been made on the destination. Now it shouldn't need to do
// anything.
estimate = estimator.analyze(spec);
assertTrue(estimate.getTaskType() == TaskEstimate.TaskType.NO_OP);
// Change the the source metadata. It should now require a metadata
// update.
srcTable.getParameters().put("foo", "bar");
srcMetastore.alterTable(HIVE_DB, HIVE_TABLE, srcTable);
estimate = estimator.analyze(spec);
assertTrue(estimate.getTaskType() == TaskEstimate.TaskType.COPY_PARTITIONED_TABLE);
assertTrue(estimate.isUpdateMetadata());
assertFalse(estimate.isUpdateData());
// Drop the source. It should now be a drop.
srcMetastore.dropTable(HIVE_DB, HIVE_TABLE, true);
estimate = estimator.analyze(spec);
assertTrue(estimate.getTaskType() == TaskEstimate.TaskType.DROP_TABLE);
}
@Test
public void testEstimatesForPartition()
throws ConfigurationException, IOException, HiveMetastoreException, DistCpException {
final DirectoryCopier directoryCopier =
new DirectoryCopier(conf, srcCluster.getTmpDir(), false);
// Create an partitioned table in the source
final HiveObjectSpec tableSpec = new HiveObjectSpec(HIVE_DB, HIVE_TABLE);
final Table srcTable =
ReplicationTestUtils.createPartitionedTable(conf, srcMetastore, tableSpec,
TableType.MANAGED_TABLE, srcWarehouseRoot);
// Create a partition in the source
final HiveObjectSpec spec = new HiveObjectSpec(HIVE_DB, HIVE_TABLE, HIVE_PARTITION);
final Partition srcPartition = ReplicationTestUtils.createPartition(conf, srcMetastore, spec);
TaskEstimator estimator =
new TaskEstimator(conf, destinationObjectFactory, srcCluster, destCluster, directoryCopier);
// Partition exists in source, but not in dest. It should copy the
// partition.
TaskEstimate estimate = estimator.analyze(spec);
assertTrue(estimate.getTaskType() == TaskEstimate.TaskType.COPY_PARTITION);
assertTrue(estimate.isUpdateMetadata());
assertTrue(estimate.isUpdateData());
assertTrue(estimate.getSrcPath().get().equals(new Path(srcPartition.getSd().getLocation())));
// Replicate the partition
final CopyPartitionTask copyJob = new CopyPartitionTask(conf, destinationObjectFactory,
conflictHandler, srcCluster, destCluster, spec, ReplicationUtils.getLocation(srcTable),
Optional.<Path>empty(), directoryCopier, true);
final RunInfo status = copyJob.runTask();
assertEquals(RunInfo.RunStatus.SUCCESSFUL, status.getRunStatus());
// A copy has been made on the destination. Now it shouldn't need to do
// anything.
estimate = estimator.analyze(spec);
assertTrue(estimate.getTaskType() == TaskEstimate.TaskType.NO_OP);
// Change the the source metadata. It should now require a metadata
// update.
srcPartition.getParameters().put("foo", "bar");
srcMetastore.alterPartition(HIVE_DB, HIVE_TABLE, srcPartition);
estimate = estimator.analyze(spec);
assertTrue(estimate.getTaskType() == TaskEstimate.TaskType.COPY_PARTITION);
assertTrue(estimate.isUpdateMetadata());
assertFalse(estimate.isUpdateData());
// Change the source data. It should now require a data update as well.
ReplicationTestUtils.createTextFile(conf, new Path(srcPartition.getSd().getLocation()),
"new_file.txt", "456");
estimate = estimator.analyze(spec);
assertTrue(estimate.getTaskType() == TaskEstimate.TaskType.COPY_PARTITION);
assertTrue(estimate.isUpdateMetadata());
assertTrue(estimate.isUpdateData());
assertTrue(estimate.getSrcPath().get().equals(new Path(srcPartition.getSd().getLocation())));
// Drop the source. It should now be a drop.
srcMetastore.dropTable(HIVE_DB, HIVE_TABLE, true);
estimate = estimator.analyze(spec);
assertTrue(estimate.getTaskType() == TaskEstimate.TaskType.DROP_PARTITION);
}
@Test
public void testEstimatesForUnpartitionedTableOverwriteNewer()
throws IOException, HiveMetastoreException, DistCpException {
// Overriding the default configuration, and make overwrite_newer = false.
YarnConfiguration conf = new YarnConfiguration(MockClusterTest.conf);
conf.set(ConfigurationKeys.BATCH_JOB_OVERWRITE_NEWER, Boolean.FALSE.toString());
final DirectoryCopier directoryCopier =
new DirectoryCopier(conf, srcCluster.getTmpDir(), false);
// Create an unpartitioned table in the source
final HiveObjectSpec spec = new HiveObjectSpec(HIVE_DB, HIVE_TABLE);
final Table srcTable = ReplicationTestUtils.createUnpartitionedTable(conf, srcMetastore, spec,
TableType.MANAGED_TABLE, srcWarehouseRoot);
final long srcLmt = ReplicationUtils.getLastModifiedTime(srcTable);
// Create an unpartitioned table in the destination that is newer
final Table destTable = ReplicationTestUtils.createUnpartitionedTable(conf, destMetastore, spec,
TableType.MANAGED_TABLE, destWarehouseRoot);
destTable.putToParameters(HiveParameterKeys.TLDT, Long.toString(srcLmt + 1));
destMetastore.alterTable(HIVE_DB, HIVE_TABLE, destTable);
// Confirm that we won't overwrite newer tables
final TaskEstimator estimator =
new TaskEstimator(conf, destinationObjectFactory, srcCluster, destCluster, directoryCopier);
TaskEstimate estimate = estimator.analyze(spec);
assertTrue(estimate.getTaskType() == TaskEstimate.TaskType.NO_OP);
// Modify the dest table to be older
destTable.putToParameters(HiveParameterKeys.TLDT, Long.toString(srcLmt - 1));
destMetastore.alterTable(HIVE_DB, HIVE_TABLE, destTable);
// Confirm that we will still overwrite older tables
TaskEstimate estimate2 = estimator.analyze(spec);
assertTrue(estimate2.getTaskType() == TaskEstimate.TaskType.COPY_UNPARTITIONED_TABLE);
assertTrue(estimate2.isUpdateMetadata());
assertTrue(estimate2.getSrcPath().get().equals(new Path(srcTable.getSd().getLocation())));
// Drop the source and destination
srcMetastore.dropTable(HIVE_DB, HIVE_TABLE, true);
destMetastore.dropTable(HIVE_DB, HIVE_TABLE, true);
}
@Test
public void testEstimatesForPartitionOverwriteNewer()
throws IOException, HiveMetastoreException, DistCpException {
// Overriding the default configuration, and make overwrite_newer = false.
YarnConfiguration conf = new YarnConfiguration(MockClusterTest.conf);
conf.set(ConfigurationKeys.BATCH_JOB_OVERWRITE_NEWER, Boolean.FALSE.toString());
final DirectoryCopier directoryCopier =
new DirectoryCopier(conf, srcCluster.getTmpDir(), false);
// Create an unpartitioned table in the source
final HiveObjectSpec tableSpec = new HiveObjectSpec(HIVE_DB, HIVE_TABLE);
final Table srcTable =
ReplicationTestUtils.createPartitionedTable(conf, srcMetastore, tableSpec,
TableType.MANAGED_TABLE, srcWarehouseRoot);
// Create a partition in the source
final HiveObjectSpec spec = new HiveObjectSpec(HIVE_DB, HIVE_TABLE, HIVE_PARTITION);
final Partition srcPartition = ReplicationTestUtils.createPartition(conf, srcMetastore, spec);
final long srcLmt = ReplicationUtils.getLastModifiedTime(srcPartition);
// Create an partitioned table in the destination that is newer
final Table destTable =
ReplicationTestUtils.createPartitionedTable(conf, destMetastore, tableSpec,
TableType.MANAGED_TABLE, destWarehouseRoot);
final Partition destPartition = ReplicationTestUtils.createPartition(conf, destMetastore, spec);
destPartition.putToParameters(HiveParameterKeys.TLDT, Long.toString(srcLmt + 1));
destMetastore.alterPartition(HIVE_DB, HIVE_TABLE, destPartition);
// Confirm that we won't overwrite newer partitions
final TaskEstimator estimator =
new TaskEstimator(conf, destinationObjectFactory, srcCluster, destCluster, directoryCopier);
TaskEstimate estimate = estimator.analyze(spec);
assertTrue(estimate.getTaskType() == TaskEstimate.TaskType.NO_OP);
// Modify the dest partition to be older
destPartition.putToParameters(HiveParameterKeys.TLDT, Long.toString(srcLmt - 1));
destMetastore.alterPartition(HIVE_DB, HIVE_TABLE, destPartition);
// Confirm that we will still overwrite older partitions
TaskEstimate estimate2 = estimator.analyze(spec);
assertTrue(estimate2.getTaskType() == TaskEstimate.TaskType.COPY_PARTITION);
assertTrue(estimate2.isUpdateMetadata());
assertTrue(estimate2.getSrcPath().get().equals(new Path(srcPartition.getSd().getLocation())));
// Drop the source and destination
srcMetastore.dropTable(HIVE_DB, HIVE_TABLE, true);
destMetastore.dropTable(HIVE_DB, HIVE_TABLE, true);
}
}
| 9,457 |
0 | Create_ds/reair/main/src/test/java | Create_ds/reair/main/src/test/java/test/DbKeyValueStoreTest.java | package test;
import static org.junit.Assert.assertEquals;
import com.airbnb.reair.db.DbConnectionFactory;
import com.airbnb.reair.db.DbKeyValueStore;
import com.airbnb.reair.db.EmbeddedMySqlDb;
import com.airbnb.reair.db.StaticDbConnectionFactory;
import com.airbnb.reair.utils.ReplicationTestUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Optional;
public class DbKeyValueStoreTest {
private static final Log LOG = LogFactory.getLog(DbKeyValueStore.class);
private static EmbeddedMySqlDb embeddedMySqlDb;
private static String MYSQL_TEST_DB_NAME = "replication_test";
private static String MYSQL_TEST_TABLE_NAME = "key_value";
/**
* Configures this class for testing by setting up the embedded DB and creating a test database.
*
* @throws ClassNotFoundException if there's an error instantiating the JDBC driver
* @throws SQLException if there's an error querying the embedded DB
*/
@BeforeClass
public static void setupClass() throws ClassNotFoundException, SQLException {
// Create the MySQL process
embeddedMySqlDb = new EmbeddedMySqlDb();
embeddedMySqlDb.startDb();
// Create the DB within MySQL
Class.forName("com.mysql.jdbc.Driver");
String username = embeddedMySqlDb.getUsername();
String password = embeddedMySqlDb.getPassword();
Connection connection = DriverManager
.getConnection(ReplicationTestUtils.getJdbcUrl(embeddedMySqlDb), username, password);
Statement statement = connection.createStatement();
String sql = "CREATE DATABASE " + MYSQL_TEST_DB_NAME;
statement.executeUpdate(sql);
connection.close();
}
@Test
public void testSetAndChangeKey() throws SQLException {
DbConnectionFactory dbConnectionFactory = new StaticDbConnectionFactory(
ReplicationTestUtils.getJdbcUrl(embeddedMySqlDb, MYSQL_TEST_DB_NAME),
embeddedMySqlDb.getUsername(), embeddedMySqlDb.getPassword());
// Create the table
String createTableSql = DbKeyValueStore.getCreateTableSql("key_value");
Connection connection = dbConnectionFactory.getConnection();
Statement statement = connection.createStatement();
statement.execute(createTableSql);
DbKeyValueStore kvStore = new DbKeyValueStore(dbConnectionFactory, MYSQL_TEST_TABLE_NAME);
// Set a key, and make sure you get the same value back
kvStore.set("foo", "bar");
assertEquals(Optional.of("bar"), kvStore.get("foo"));
// Change a key, make sure you get the new value
kvStore.set("foo", "baz");
assertEquals(Optional.of("baz"), kvStore.get("foo"));
// Make sure that you get empty for invalid keys
assertEquals(Optional.empty(), kvStore.get("baz"));
}
@AfterClass
public static void tearDownClass() {
embeddedMySqlDb.stopDb();
}
}
| 9,458 |
0 | Create_ds/reair/main/src/test/java | Create_ds/reair/main/src/test/java/test/PersistedJobInfoStoreTest.java | package test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.db.DbConnectionFactory;
import com.airbnb.reair.db.EmbeddedMySqlDb;
import com.airbnb.reair.db.StaticDbConnectionFactory;
import com.airbnb.reair.incremental.ReplicationOperation;
import com.airbnb.reair.incremental.ReplicationStatus;
import com.airbnb.reair.incremental.StateUpdateException;
import com.airbnb.reair.incremental.db.PersistedJobInfo;
import com.airbnb.reair.incremental.db.PersistedJobInfoStore;
import com.airbnb.reair.utils.ReplicationTestUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
public class PersistedJobInfoStoreTest {
private static final Log LOG = LogFactory.getLog(PersistedJobInfoStoreTest.class);
private static EmbeddedMySqlDb embeddedMySqlDb;
private static final String MYSQL_TEST_DB_NAME = "replication_test";
private static final String MYSQL_TEST_TABLE_NAME = "replication_jobs";
private static DbConnectionFactory dbConnectionFactory;
private static PersistedJobInfoStore jobStore;
/**
* Setups up this class for testing.
*
* @throws ClassNotFoundException if there's an error initializing the JDBC driver
* @throws SQLException if there's an error querying the database
*/
@BeforeClass
public static void setupClass() throws ClassNotFoundException, SQLException {
// Create the MySQL process
embeddedMySqlDb = new EmbeddedMySqlDb();
embeddedMySqlDb.startDb();
// Create the DB within MySQL
Class.forName("com.mysql.jdbc.Driver");
String username = embeddedMySqlDb.getUsername();
String password = embeddedMySqlDb.getPassword();
Connection connection = DriverManager
.getConnection(ReplicationTestUtils.getJdbcUrl(embeddedMySqlDb), username, password);
Statement statement = connection.createStatement();
String sql = "CREATE DATABASE " + MYSQL_TEST_DB_NAME;
statement.executeUpdate(sql);
dbConnectionFactory = new StaticDbConnectionFactory(
ReplicationTestUtils.getJdbcUrl(embeddedMySqlDb, MYSQL_TEST_DB_NAME),
embeddedMySqlDb.getUsername(), embeddedMySqlDb.getPassword());
jobStore =
new PersistedJobInfoStore(new Configuration(), dbConnectionFactory, MYSQL_TEST_TABLE_NAME);
Statement statement2 = dbConnectionFactory.getConnection().createStatement();
statement2.execute(PersistedJobInfoStore.getCreateTableSql("replication_jobs"));
}
@Test
public void testCreateAndUpdate() throws StateUpdateException, SQLException, Exception {
Connection connection = dbConnectionFactory.getConnection();
// Test out creation
List<String> partitionNames = new ArrayList<>();
partitionNames.add("ds=1/hr=1");
Map<String, String> extras = new HashMap<>();
extras.put("foo", "bar");
PersistedJobInfo testJob = PersistedJobInfo.createDeferred(
ReplicationOperation.COPY_UNPARTITIONED_TABLE,
ReplicationStatus.PENDING,
Optional.of(new Path("file:///tmp/test_table")),
"src_cluster",
new HiveObjectSpec("test_db", "test_table", "ds=1/hr=1"),
partitionNames,
Optional.of("1"),
Optional.of(new HiveObjectSpec("test_db", "renamed_table", "ds=1/hr=1")),
Optional.of(new Path("file://tmp/a/b/c")),
extras);
jobStore.createMany(Arrays.asList(testJob));
// Test out retrieval
Map<Long, PersistedJobInfo> idToJob = new HashMap<>();
List<PersistedJobInfo> persistedJobInfos = jobStore.getRunnableFromDb();
for (PersistedJobInfo persistedJobInfo : persistedJobInfos) {
idToJob.put(persistedJobInfo.getId(), persistedJobInfo);
}
// Make sure that the job that was created is the same as the job that
// was retrieved
assertEquals(testJob, idToJob.get(testJob.getId()));
// Try modifying the job
testJob.setStatus(ReplicationStatus.RUNNING);
jobStore.persist(testJob);
// Verify that the change is retrieved
idToJob.clear();
persistedJobInfos = jobStore.getRunnableFromDb();
for (PersistedJobInfo persistedJobInfo : persistedJobInfos) {
idToJob.put(persistedJobInfo.getId(), persistedJobInfo);
}
assertEquals(testJob, idToJob.get(testJob.getId()));
}
@Test
public void testCreateOne() throws Exception {
HiveObjectSpec hiveObjectSpec = new HiveObjectSpec(
"a","b");
List<String> srcPartitionNames = new ArrayList<>();
PersistedJobInfo persistedJobInfoCompletableFuture =
PersistedJobInfo.createDeferred(
ReplicationOperation.COPY_PARTITION,
ReplicationStatus.PENDING,
Optional.empty(),
"a",
hiveObjectSpec,
srcPartitionNames,
Optional.empty(),
Optional.empty(),
Optional.empty(),
new HashMap<>());
jobStore.createMany(Arrays.asList(persistedJobInfoCompletableFuture));
}
@Test
public void testCreateManyWithMany() throws Exception {
List<List<String>> expectedResults = new ArrayList<>();
List<String> results1 = new ArrayList<>(Arrays.asList("aa", "ab", "ac", "ad"));
List<String> results2 = new ArrayList<>(Arrays.asList("ba", "bb", "bc"));
List<String> results3 = new ArrayList<>(Arrays.asList("cc"));
List<String> results4 = new ArrayList<>();
List<String> results5 = new ArrayList<>(Arrays.asList("ea", "eb"));
expectedResults.add(results1);
expectedResults.add(results2);
expectedResults.add(results3);
expectedResults.add(results4);
expectedResults.add(results5);
HiveObjectSpec hiveObjectSpec = new HiveObjectSpec("a", "b");
List<List<PersistedJobInfo>> actualResults = new ArrayList<>();
List<PersistedJobInfo> jobs = new ArrayList<>();
for (List<String> ll : expectedResults) {
List<PersistedJobInfo> subResults = new ArrayList<>();
for (String srcCluster : ll) {
PersistedJobInfo persistedJobInfo =
PersistedJobInfo.createDeferred(
ReplicationOperation.COPY_PARTITION,
ReplicationStatus.PENDING,
Optional.empty(),
srcCluster,
hiveObjectSpec,
new ArrayList<>(),
Optional.empty(),
Optional.empty(),
Optional.empty(),
new HashMap<>());
subResults.add(persistedJobInfo);
jobs.add(persistedJobInfo);
}
actualResults.add(subResults);
}
jobStore.createMany(jobs);
for (int i = 0; i < expectedResults.size(); i++) {
assertEquals(expectedResults.get(i).size(), actualResults.get(i).size());
for (int j = 0; j < expectedResults.get(i).size(); j++) {
assertEquals(
expectedResults.get(i).get(j),
actualResults.get(i).get(j).getSrcClusterName());
}
}
boolean exceptionThrown = false;
try {
jobStore.createMany(jobs);
} catch (StateUpdateException e) {
exceptionThrown = true;
}
assertTrue(exceptionThrown);
}
@Test
public void testCreateNone() throws StateUpdateException {
jobStore.createMany(new ArrayList<>());
}
@AfterClass
public static void tearDownClass() {
embeddedMySqlDb.stopDb();
}
}
| 9,459 |
0 | Create_ds/reair/main/src/test/java | Create_ds/reair/main/src/test/java/test/DropPartitionTest.java | package test;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import com.airbnb.reair.common.DistCpException;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.RunInfo;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.incremental.primitives.CopyPartitionTask;
import com.airbnb.reair.incremental.primitives.DropPartitionTask;
import com.airbnb.reair.utils.ReplicationTestUtils;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import org.junit.Test;
import java.io.IOException;
import java.util.Optional;
public class DropPartitionTest extends MockClusterTest {
@Test
public void testDrop()
throws ConfigurationException, DistCpException, HiveMetastoreException, IOException {
String dbName = "test_db";
String tableName = "test_Table";
// Create a partitioned table in the source
HiveObjectSpec tableSpec = new HiveObjectSpec(dbName, tableName);
Table srcTable = ReplicationTestUtils.createPartitionedTable(conf, srcMetastore, tableSpec,
TableType.MANAGED_TABLE, srcWarehouseRoot);
// Create a partition in the source table
String partitionName = "ds=1/hr=2";
HiveObjectSpec partitionSpec = new HiveObjectSpec(dbName, tableName, partitionName);
Partition srcPartition =
ReplicationTestUtils.createPartition(conf, srcMetastore, partitionSpec);
// Copy the partition
CopyPartitionTask copyPartitionTask = new CopyPartitionTask(conf, destinationObjectFactory,
conflictHandler, srcCluster, destCluster, partitionSpec,
ReplicationUtils.getLocation(srcPartition), Optional.empty(), directoryCopier, true);
RunInfo status = copyPartitionTask.runTask();
// Verify that the table exists on the destination
assertTrue(destMetastore.existsTable(dbName, tableName));
// Pretend that a drop operation needs to be performed
DropPartitionTask dropPartitionTask = new DropPartitionTask(srcCluster, destCluster,
partitionSpec, ReplicationUtils.getTldt(srcPartition));
dropPartitionTask.runTask();
// Verify that the table exists, but the partition doest
assertTrue(destMetastore.existsTable(dbName, tableName));
assertFalse(destMetastore.existsPartition(dbName, tableName, partitionName));
// Create a different partition on the destination, but with the same name
Partition destPartition =
ReplicationTestUtils.createPartition(conf, destMetastore, partitionSpec);
// Pretend that a drop operation needs to be performed
dropPartitionTask.runTask();
// Verify that the partition still exists on the destination
assertTrue(destMetastore.existsPartition(dbName, tableName, partitionName));
}
}
| 9,460 |
0 | Create_ds/reair/main/src/test/java | Create_ds/reair/main/src/test/java/test/RenamePartitionTaskTest.java | package test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import com.airbnb.reair.common.DistCpException;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.RunInfo;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.incremental.primitives.CopyPartitionTask;
import com.airbnb.reair.incremental.primitives.RenamePartitionTask;
import com.airbnb.reair.multiprocessing.ParallelJobExecutor;
import com.airbnb.reair.utils.ReplicationTestUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
public class RenamePartitionTaskTest extends MockClusterTest {
private static ParallelJobExecutor jobExecutor = new ParallelJobExecutor(1);
/**
* Sets up this class for testing.
*
* @throws IOException if there's an error accessing the local filesystem
* @throws SQLException if there's an error querying the embedded DB
*/
@BeforeClass
public static void setupClass() throws IOException, SQLException {
MockClusterTest.setupClass();
jobExecutor.start();
}
@Test
public void testRenamePartition()
throws ConfigurationException, IOException, HiveMetastoreException, DistCpException {
final String dbName = "test_db";
final String tableName = "test_table";
final String oldPartitionName = "ds=1/hr=1";
final String newPartitionName = "ds=1/hr=2";
// Create an partitioned table in the source
final HiveObjectSpec originalTableSpec = new HiveObjectSpec(dbName, tableName);
final HiveObjectSpec oldPartitionSpec = new HiveObjectSpec(dbName, tableName, oldPartitionName);
final HiveObjectSpec newPartitionSpec = new HiveObjectSpec(dbName, tableName, newPartitionName);
ReplicationTestUtils.createPartitionedTable(conf, srcMetastore,
originalTableSpec, TableType.MANAGED_TABLE, srcWarehouseRoot);
final Partition oldPartition =
ReplicationTestUtils.createPartition(conf, srcMetastore, oldPartitionSpec);
// Copy the partition
final Configuration testConf = new Configuration(conf);
final CopyPartitionTask copyJob = new CopyPartitionTask(testConf, destinationObjectFactory,
conflictHandler, srcCluster, destCluster, oldPartitionSpec,
ReplicationUtils.getLocation(oldPartition), Optional.empty(), directoryCopier, true);
copyJob.runTask();
// Rename the source partition
final Partition newPartition = new Partition(oldPartition);
final List<String> newValues = new ArrayList<>();
newValues.add("1");
newValues.add("2");
newPartition.setValues(newValues);
srcMetastore.renamePartition(dbName, tableName, oldPartition.getValues(), newPartition);
// Propagate the rename
final RenamePartitionTask task = new RenamePartitionTask(testConf, destinationObjectFactory,
conflictHandler, srcCluster, destCluster, oldPartitionSpec, newPartitionSpec,
ReplicationUtils.getLocation(oldPartition), ReplicationUtils.getLocation(newPartition),
ReplicationUtils.getTldt(oldPartition), directoryCopier);
final RunInfo runInfo = task.runTask();
// Check to make sure that the rename has succeeded
assertEquals(RunInfo.RunStatus.SUCCESSFUL, runInfo.getRunStatus());
assertTrue(destMetastore.existsPartition(newPartitionSpec.getDbName(),
newPartitionSpec.getTableName(), newPartitionSpec.getPartitionName()));
assertFalse(destMetastore.existsPartition(oldPartitionSpec.getDbName(),
oldPartitionSpec.getTableName(), oldPartitionSpec.getPartitionName()));
assertEquals(ReplicationTestUtils.getModifiedTime(srcMetastore, newPartitionSpec),
ReplicationTestUtils.getModifiedTime(destMetastore, newPartitionSpec));
}
@Test
public void testRenamePartitionByThrift()
throws ConfigurationException, IOException, HiveMetastoreException, DistCpException {
final String dbName = "test_db";
final String tableName = "test_table";
final String oldPartitionName = "ds=1/hr=1";
final String newPartitionName = "ds=1/hr=2";
// Create an partitioned table in the source
final HiveObjectSpec originalTableSpec = new HiveObjectSpec(dbName, tableName);
final HiveObjectSpec oldPartitionSpec = new HiveObjectSpec(dbName, tableName, oldPartitionName);
final HiveObjectSpec newPartitionSpec = new HiveObjectSpec(dbName, tableName, newPartitionName);
ReplicationTestUtils.createPartitionedTable(conf, srcMetastore,
originalTableSpec, TableType.MANAGED_TABLE, srcWarehouseRoot);
final Partition oldPartition =
ReplicationTestUtils.createPartition(conf, srcMetastore, oldPartitionSpec);
// Copy the partition
final Configuration testConf = new Configuration(conf);
final CopyPartitionTask copyJob = new CopyPartitionTask(testConf, destinationObjectFactory,
conflictHandler, srcCluster, destCluster, oldPartitionSpec,
ReplicationUtils.getLocation(oldPartition), Optional.empty(), directoryCopier, true);
copyJob.runTask();
// Rename the source partition
final Partition newPartition = new Partition(oldPartition);
final List<String> newValues = new ArrayList<>();
newValues.add("1");
newValues.add("2");
newPartition.setValues(newValues);
srcMetastore.renamePartition(dbName, tableName, oldPartition.getValues(), newPartition);
// Propagate the rename
final RenamePartitionTask task = new RenamePartitionTask(testConf, destinationObjectFactory,
conflictHandler, srcCluster, destCluster, oldPartitionSpec, newPartitionSpec,
ReplicationUtils.getLocation(oldPartition), ReplicationUtils.getLocation(newPartition),
ReplicationUtils.getTldt(oldPartition), directoryCopier);
final RunInfo runInfo = task.runTask();
// Check to make sure that the rename has succeeded
assertEquals(RunInfo.RunStatus.SUCCESSFUL, runInfo.getRunStatus());
assertTrue(destMetastore.existsPartition(newPartitionSpec.getDbName(),
newPartitionSpec.getTableName(), newPartitionSpec.getPartitionName()));
assertFalse(destMetastore.existsPartition(oldPartitionSpec.getDbName(),
oldPartitionSpec.getTableName(), oldPartitionSpec.getPartitionName()));
assertEquals(ReplicationTestUtils.getModifiedTime(srcMetastore, newPartitionSpec),
ReplicationTestUtils.getModifiedTime(destMetastore, newPartitionSpec));
}
}
| 9,461 |
0 | Create_ds/reair/main/src/test/java | Create_ds/reair/main/src/test/java/test/MockHiveMetastoreClient.java | package test;
import com.google.common.collect.Lists;
import com.airbnb.reair.common.HiveMetastoreClient;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
/**
* Simulates a Hive metastore client connected to a Hive metastore Thrift server.
*/
public class MockHiveMetastoreClient implements HiveMetastoreClient {
private Map<String, Database> dbNameToDatabase;
private Map<HiveObjectSpec, Table> specToTable;
private Map<HiveObjectSpec, Partition> specToPartition;
/**
* Creates a Hive metastore client that simulates the behavior of the Hive metastore.
*/
public MockHiveMetastoreClient() {
dbNameToDatabase = new HashMap<>();
specToTable = new HashMap<>();
specToPartition = new HashMap<>();
}
/**
* Returns the partition name (e.g. ds=1/hr=2) given a Table and Partition object. For simplicity,
* this does not handle special characters properly.
*
* @param table the table that the partition belongs to
* @param partition the partition to get the name for
* @return the name of the partition
* @throws HiveMetastoreException if the schema between the table and partition do not match
*/
private String getPartitionName(Table table, Partition partition) throws HiveMetastoreException {
if (table.getPartitionKeys().size() != partition.getValues().size()) {
throw new HiveMetastoreException(
"Partition column mismatch: " + "table has " + table.getPartitionKeys().size()
+ " columns " + "while partition has " + partition.getValues().size() + " values");
}
List<String> keyValues = new ArrayList<>();
int keyValueIndex = 0;
for (FieldSchema field : table.getPartitionKeys()) {
keyValues.add(field.getName() + "=" + partition.getValues().get(keyValueIndex));
keyValueIndex++;
}
return StringUtils.join(keyValues, "/");
}
private String getPartitionName(Table table, List<String> values) {
StringBuilder sb = new StringBuilder();
int index = 0;
for (FieldSchema fs : table.getPartitionKeys()) {
if (index > 0) {
sb.append("/");
}
sb.append(fs.getName());
sb.append("=");
sb.append(values.get(index));
index++;
}
return sb.toString();
}
@Override
public Partition addPartition(Partition partition) throws HiveMetastoreException {
HiveObjectSpec tableSpec = new HiveObjectSpec(partition.getDbName(), partition.getTableName());
if (!specToTable.containsKey(tableSpec)) {
throw new HiveMetastoreException("Unknown table: " + tableSpec);
}
Table table = specToTable.get(tableSpec);
String partitionName = getPartitionName(table, partition);
HiveObjectSpec partitionSpec =
new HiveObjectSpec(tableSpec.getDbName(), tableSpec.getTableName(), partitionName);
if (specToPartition.containsKey(partitionSpec)) {
throw new HiveMetastoreException("Partition already exists: " + partitionSpec);
}
specToPartition.put(partitionSpec, partition);
return partition;
}
@Override
public Table getTable(String dbName, String tableName) throws HiveMetastoreException {
return specToTable.get(new HiveObjectSpec(dbName, tableName));
}
@Override
public Partition getPartition(String dbName, String tableName, String partitionName)
throws HiveMetastoreException {
return specToPartition.get(new HiveObjectSpec(dbName, tableName, partitionName));
}
@Override
public void alterPartition(String dbName, String tableName, Partition partition)
throws HiveMetastoreException {
HiveObjectSpec tableSpec = new HiveObjectSpec(partition.getDbName(), partition.getTableName());
if (!specToTable.containsKey(tableSpec)) {
throw new HiveMetastoreException("Unknown table: " + tableSpec);
}
Table table = specToTable.get(tableSpec);
String partitionName = getPartitionName(table, partition);
HiveObjectSpec partitionSpec =
new HiveObjectSpec(tableSpec.getDbName(), tableSpec.getTableName(), partitionName);
if (!specToPartition.containsKey(partitionSpec)) {
throw new HiveMetastoreException("Partition does not exist: " + partitionSpec);
}
specToPartition.put(partitionSpec, partition);
}
@Override
public void createDatabase(Database db) throws HiveMetastoreException {
if (dbNameToDatabase.containsKey(db.getName())) {
throw new HiveMetastoreException("DB " + db.getName() + " already exists!");
}
dbNameToDatabase.put(db.getName(), db);
}
@Override
public Database getDatabase(String dbName) throws HiveMetastoreException {
return dbNameToDatabase.get(dbName);
}
@Override
public boolean existsDb(String dbName) throws HiveMetastoreException {
return getDatabase(dbName) != null;
}
@Override
public void createTable(Table table) throws HiveMetastoreException {
if (!existsDb(table.getDbName())) {
throw new HiveMetastoreException("DB " + table.getDbName() + " does not exist!");
}
HiveObjectSpec tableSpec = new HiveObjectSpec(table.getDbName(), table.getTableName());
if (specToTable.containsKey(tableSpec)) {
throw new HiveMetastoreException("Table already exists: " + tableSpec);
}
specToTable.put(tableSpec, table);
}
@Override
public void alterTable(
String dbName,
String tableName,
Table table) throws HiveMetastoreException {
HiveObjectSpec existingTableSpec = new HiveObjectSpec(dbName, tableName);
HiveObjectSpec newTableSpec = new HiveObjectSpec(table.getDbName(), table.getTableName());
if (!specToTable.containsKey(existingTableSpec)) {
throw new HiveMetastoreException("Unknown table: " + existingTableSpec);
}
Table removedTable = specToTable.remove(existingTableSpec);
if (removedTable == null) {
throw new RuntimeException("Shouldn't happen!");
}
specToTable.put(newTableSpec, table);
}
@Override
public boolean isPartitioned(String dbName, String tableName) throws HiveMetastoreException {
return getTable(dbName, tableName).getPartitionKeys().size() > 0;
}
@Override
public boolean existsPartition(String dbName, String tableName, String partitionName)
throws HiveMetastoreException {
return getPartition(dbName, tableName, partitionName) != null;
}
@Override
public boolean existsTable(String dbName, String tableName) throws HiveMetastoreException {
return getTable(dbName, tableName) != null;
}
/**
* Drops the table, but for safety, doesn't delete the data.
*/
@Override
public void dropTable(String dbName, String tableName, boolean deleteData)
throws HiveMetastoreException {
HiveObjectSpec tableSpec = new HiveObjectSpec(dbName, tableName);
if (!existsTable(dbName, tableName)) {
throw new HiveMetastoreException("Missing table: " + tableSpec);
}
// Remove the table
specToTable.remove(new HiveObjectSpec(dbName, tableName));
// Remove associated partitions
Iterator<Map.Entry<HiveObjectSpec, Partition>> mapIterator =
specToPartition.entrySet().iterator();
while (mapIterator.hasNext()) {
Map.Entry<HiveObjectSpec, Partition> entry = mapIterator.next();
if (entry.getKey().getTableSpec().equals(tableSpec)) {
mapIterator.remove();
}
}
// For safety, don't delete data.
}
/**
* Drops the partition, but for safety, doesn't delete the data.
*/
@Override
public void dropPartition(
String dbName,
String tableName,
String partitionName,
boolean deleteData) throws HiveMetastoreException {
HiveObjectSpec partitionSpec = new HiveObjectSpec(dbName, tableName, partitionName);
if (!existsPartition(dbName, tableName, partitionName)) {
throw new HiveMetastoreException("Missing partition: " + partitionSpec);
}
specToPartition.remove(partitionSpec);
}
@Override
public List<String> getPartitionNames(String dbName, String tableName)
throws HiveMetastoreException {
List<String> partitionNames = new ArrayList<>();
HiveObjectSpec tableSpec = new HiveObjectSpec(dbName, tableName);
for (Map.Entry<HiveObjectSpec, Partition> entry : specToPartition.entrySet()) {
if (tableSpec.equals(entry.getKey().getTableSpec())) {
partitionNames.add(entry.getKey().getPartitionName());
}
}
return partitionNames;
}
@Override
public Map<String, String> partitionNameToMap(String partitionName)
throws HiveMetastoreException {
LinkedHashMap<String, String> partitionKeyToValue = new LinkedHashMap<>();
String[] keyValues = partitionName.split("/");
for (String keyValue : keyValues) {
String[] keyValueSplit = keyValue.split("=");
String key = keyValueSplit[0];
String value = keyValueSplit[1];
partitionKeyToValue.put(key, value);
}
return partitionKeyToValue;
}
@Override
public List<String> getTables(String dbName, String tableName) throws HiveMetastoreException {
if (!tableName.equals("*")) {
throw new RuntimeException("Only * (wildcard) is supported in " + "the mock client");
}
List<String> tableNames = new ArrayList<>();
for (HiveObjectSpec spec : specToTable.keySet()) {
if (spec.getDbName().equals(dbName)) {
tableNames.add(spec.getTableName());
}
}
return tableNames;
}
/**
* Converts a map of partition key-value pairs to a name. Note that special characters are not
* escaped unlike in production, and the order of the key is dictated by the iteration order for
* the map.
*/
private static String partitionSpecToName(Map<String, String> spec) {
StringBuilder sb = new StringBuilder();
for (Map.Entry<String, String> entry : spec.entrySet()) {
if (sb.length() != 0) {
sb.append("/");
}
sb.append(entry.getKey() + "=" + entry.getValue());
}
return sb.toString();
}
@Override
public Partition exchangePartition(
Map<String, String> partitionSpecs,
String sourceDb,
String sourceTable,
String destDb,
String destinationTableName)
throws HiveMetastoreException {
final String partitionName = partitionSpecToName(partitionSpecs);
final HiveObjectSpec exchangeFromPartitionSpec =
new HiveObjectSpec(sourceDb, sourceTable, partitionName);
final HiveObjectSpec exchangeToPartitionSpec =
new HiveObjectSpec(destDb, destinationTableName, partitionName);
if (!existsPartition(sourceDb, sourceTable, partitionName)) {
throw new HiveMetastoreException(
String.format("Unknown source partition %s.%s/%s", sourceDb, sourceTable, partitionName));
}
if (!existsTable(destDb, destinationTableName)) {
throw new HiveMetastoreException(
String.format("Unknown destination table %s.%s", destDb, destinationTableName));
}
Partition partition = specToPartition.remove(exchangeFromPartitionSpec);
partition.setDbName(destDb);
partition.setTableName(destinationTableName);
specToPartition.put(exchangeToPartitionSpec, partition);
return partition;
}
@Override
public void renamePartition(
String db,
String tableName,
List<String> partitionValues,
Partition partition)
throws HiveMetastoreException {
HiveObjectSpec tableSpec = new HiveObjectSpec(db, tableName);
Table table = specToTable.get(tableSpec);
String renameFromPartitionName = getPartitionName(table, partitionValues);
String renameToPartitionName = getPartitionName(table, partition.getValues());
HiveObjectSpec renameFromSpec = new HiveObjectSpec(db, tableName, renameFromPartitionName);
HiveObjectSpec renameToSpec = new HiveObjectSpec(db, tableName, renameToPartitionName);
if (specToPartition.containsKey(renameToSpec)) {
throw new HiveMetastoreException("Partition already exists: " + renameToSpec);
}
if (!specToPartition.containsKey(renameFromSpec)) {
throw new HiveMetastoreException("Partition doesn't exist: " + renameFromPartitionName);
}
Partition removed = specToPartition.remove(renameFromSpec);
removed.setValues(new ArrayList<>(partition.getValues()));
specToPartition.put(renameToSpec, removed);
}
@Override
public List<String> getAllDatabases() throws HiveMetastoreException {
return Lists.newArrayList(dbNameToDatabase.keySet());
}
@Override
public List<String> getAllTables(final String dbName) throws HiveMetastoreException {
ArrayList<String> tables = new ArrayList<>();
for (HiveObjectSpec spec : specToTable.keySet()) {
if (spec.getDbName().equals(dbName)) {
tables.add(spec.getTableName());
}
}
return tables;
}
@Override
public void close() {
}
}
| 9,462 |
0 | Create_ds/reair/main/src/test/java | Create_ds/reair/main/src/test/java/test/RenameTableTaskTest.java | package test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import com.airbnb.reair.common.DistCpException;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.RunInfo;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.incremental.primitives.CopyUnpartitionedTableTask;
import com.airbnb.reair.incremental.primitives.RenameTableTask;
import com.airbnb.reair.multiprocessing.ParallelJobExecutor;
import com.airbnb.reair.utils.ReplicationTestUtils;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.Table;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.sql.SQLException;
public class RenameTableTaskTest extends MockClusterTest {
private static ParallelJobExecutor jobExecutor = new ParallelJobExecutor(1);
/**
* Sets up this class before running tests.
*
* @throws IOException if there's an error accessing the local filesystem
* @throws SQLException if there's an error querying the embedded DB
*/
@BeforeClass
public static void setupClass() throws IOException, SQLException {
MockClusterTest.setupClass();
jobExecutor.start();
}
@Test
public void testRenameTable()
throws ConfigurationException, IOException, HiveMetastoreException, DistCpException {
final String dbName = "test_db";
final String tableName = "test_table";
final String newTableName = "new_test_table";
// Create an unpartitioned table in the source
final HiveObjectSpec originalTableSpec = new HiveObjectSpec(dbName, tableName);
final Table srcTable = ReplicationTestUtils.createUnpartitionedTable(conf, srcMetastore,
originalTableSpec, TableType.MANAGED_TABLE, srcWarehouseRoot);
// Copy the table
final CopyUnpartitionedTableTask copyJob = new CopyUnpartitionedTableTask(conf,
destinationObjectFactory, conflictHandler, srcCluster, destCluster, originalTableSpec,
ReplicationUtils.getLocation(srcTable), directoryCopier, true);
final RunInfo status = copyJob.runTask();
// Rename the source table
final Table originalSrcTable = new Table(srcTable);
srcTable.setTableName(newTableName);
srcMetastore.alterTable(dbName, tableName, srcTable);
final HiveObjectSpec newTableSpec = new HiveObjectSpec(dbName, newTableName);
ReplicationTestUtils.updateModifiedTime(srcMetastore, newTableSpec);
// Propagate the rename
final RenameTableTask job = new RenameTableTask(conf, srcCluster, destCluster,
destinationObjectFactory, conflictHandler, originalTableSpec, newTableSpec,
ReplicationUtils.getLocation(originalSrcTable), ReplicationUtils.getLocation(srcTable),
ReplicationUtils.getTldt(originalSrcTable), jobExecutor, directoryCopier);
final RunInfo runInfo = job.runTask();
// Check to make sure that the rename has succeeded
assertEquals(RunInfo.RunStatus.SUCCESSFUL, runInfo.getRunStatus());
assertTrue(destMetastore.existsTable(newTableSpec.getDbName(), newTableSpec.getTableName()));
assertFalse(
destMetastore.existsTable(originalTableSpec.getDbName(), originalTableSpec.getTableName()));
assertEquals(ReplicationTestUtils.getModifiedTime(srcMetastore, newTableSpec),
ReplicationTestUtils.getModifiedTime(destMetastore, newTableSpec));
}
@Test
public void testRenameTableReqiringCopy()
throws ConfigurationException, IOException, HiveMetastoreException, DistCpException {
final String dbName = "test_db";
final String tableName = "test_table";
final String newTableName = "new_test_table";
// Create an unpartitioned table in the source
final HiveObjectSpec originalTableSpec = new HiveObjectSpec(dbName, tableName);
final Table srcTable = ReplicationTestUtils.createUnpartitionedTable(conf, srcMetastore,
originalTableSpec, TableType.MANAGED_TABLE, srcWarehouseRoot);
// Rename the source table. Note that the source table wasn't copied to
// the destination.
final Table originalSrcTable = new Table(srcTable);
srcTable.setTableName(newTableName);
srcMetastore.alterTable(dbName, tableName, srcTable);
final HiveObjectSpec newTableSpec = new HiveObjectSpec(dbName, newTableName);
ReplicationTestUtils.updateModifiedTime(srcMetastore, newTableSpec);
// Propagate the rename
final RenameTableTask job = new RenameTableTask(conf, srcCluster, destCluster,
destinationObjectFactory, conflictHandler, originalTableSpec, newTableSpec,
ReplicationUtils.getLocation(originalSrcTable), ReplicationUtils.getLocation(srcTable),
ReplicationUtils.getTldt(originalSrcTable), jobExecutor, directoryCopier);
final RunInfo runInfo = job.runTask();
// Check to make sure that the expected table exists has succeeded
assertEquals(RunInfo.RunStatus.SUCCESSFUL, runInfo.getRunStatus());
assertTrue(destMetastore.existsTable(newTableSpec.getDbName(), newTableSpec.getTableName()));
assertFalse(
destMetastore.existsTable(originalTableSpec.getDbName(), originalTableSpec.getTableName()));
}
}
| 9,463 |
0 | Create_ds/reair/main/src/test/java | Create_ds/reair/main/src/test/java/test/MockCluster.java | package test;
import com.airbnb.reair.common.HiveMetastoreClient;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.incremental.configuration.Cluster;
import org.apache.hadoop.fs.Path;
public class MockCluster implements Cluster {
private String name;
private HiveMetastoreClient client;
private Path fsRoot;
private Path tmpDir;
/**
* Constructs a mock cluster with static values.
*
* @param name name of the cluster
* @param client the Hive metastore client to use
* @param fsRoot the root of the warehouse directory associated with the supplied metastore
* @param tmpDir the root of the directory to use for temporary files
*/
public MockCluster(String name, HiveMetastoreClient client, Path fsRoot, Path tmpDir) {
this.name = name;
this.client = client;
this.fsRoot = fsRoot;
this.tmpDir = tmpDir;
}
@Override
public HiveMetastoreClient getMetastoreClient() throws HiveMetastoreException {
return client;
}
@Override
public Path getFsRoot() {
return fsRoot;
}
@Override
public Path getTmpDir() {
return tmpDir;
}
@Override
public String getName() {
return name;
}
}
| 9,464 |
0 | Create_ds/reair/main/src/test/java | Create_ds/reair/main/src/test/java/test/DistCpWrapperOptionsTest.java | package test;
import static org.junit.Assert.assertEquals;
import com.airbnb.reair.common.DistCpWrapperOptions;
import org.junit.Test;
import java.util.Arrays;
import java.util.List;
public class DistCpWrapperOptionsTest {
@Test
public void testGetDistCpTimeout() {
DistCpWrapperOptions distCpWrapperOptions = new DistCpWrapperOptions(null, null, null, null);
distCpWrapperOptions.setDistCpJobTimeout(1_000L);
assertEquals(1_000L, distCpWrapperOptions.getDistcpTimeout(Arrays.asList(), 100L));
distCpWrapperOptions.setDistcpDynamicJobTimeoutEnabled(false);
assertEquals(1_000L, distCpWrapperOptions.getDistcpTimeout(Arrays.asList(), 100L));
}
@Test
public void testComputeLongestMapper() {
DistCpWrapperOptions distCpWrapperOptions = new DistCpWrapperOptions(null, null, null, null);
List<Long> testCase1 = Arrays.asList(0L, 0L, 0L);
long procs1 = 500L;
assertEquals(0L, distCpWrapperOptions.computeLongestMapper(testCase1, procs1));
List<Long> testCase2 = Arrays.asList();
long procs2 = 2L;
assertEquals(0L, distCpWrapperOptions.computeLongestMapper(testCase2, procs2));
List<Long> testCase3 = Arrays.asList();
long procs3 = 0L;
assertEquals(0L, distCpWrapperOptions.computeLongestMapper(testCase3, procs3));
List<Long> testCase4 = Arrays.asList(100L);
long procs4 = 1L;
assertEquals(100L, distCpWrapperOptions.computeLongestMapper(testCase4, procs4));
List<Long> testCase5 = Arrays.asList(100L);
long procs5 = 2L;
assertEquals(100L, distCpWrapperOptions.computeLongestMapper(testCase5, procs5));
List<Long> testCase6 = Arrays.asList(100L, 1L);
long procs6 = 2L;
assertEquals(100L, distCpWrapperOptions.computeLongestMapper(testCase6, procs6));
List<Long> testCase7 = Arrays.asList(100L, 100L, 100L);
long procs7 = 1L;
assertEquals(300L, distCpWrapperOptions.computeLongestMapper(testCase7, procs7));
List<Long> testCase8 = Arrays.asList(100L, 50L, 51L);
long procs8 = 2L;
assertEquals(101L, distCpWrapperOptions.computeLongestMapper(testCase8, procs8));
List<Long> testCase9 = Arrays.asList(100L, 50L, 50L, 50L, 51L);
long procs9 = 3L;
assertEquals(101L, distCpWrapperOptions.computeLongestMapper(testCase9, procs9));
List<Long> testCase10 = Arrays.asList(100L, 50L, 50L, 50L, 50L);
long procs10 = 2L;
assertEquals(150L, distCpWrapperOptions.computeLongestMapper(testCase10, procs10));
}
@Test
public void testGetDistCpTimeout_Dynamic() {
DistCpWrapperOptions distCpWrapperOptions = new DistCpWrapperOptions(null, null, null, null);
distCpWrapperOptions.setDistcpDynamicJobTimeoutEnabled(true);
distCpWrapperOptions.setDistcpDynamicJobTimeoutBase(100L);
distCpWrapperOptions.setDistcpDynamicJobTimeoutMax(9999L);
distCpWrapperOptions.setDistcpDynamicJobTimeoutMsPerGbPerMapper(10L);
// test empty files
assertEquals(100L, distCpWrapperOptions.getDistcpTimeout(Arrays.asList(0L, 0L), 2L));
// test no files
assertEquals(100L, distCpWrapperOptions.getDistcpTimeout(Arrays.asList(), 2L));
// test 1 file
assertEquals(110L, distCpWrapperOptions.getDistcpTimeout(Arrays.asList(1_000_000_000L), 2L));
// normal test case
assertEquals(120L, distCpWrapperOptions.getDistcpTimeout(
Arrays.asList(1_000_000_000L, 500_000_000L, 500_000_001L), 2L));
// test greater than max
assertEquals(9999L, distCpWrapperOptions.getDistcpTimeout(Arrays.asList(Long.MAX_VALUE), 1L));
}
}
| 9,465 |
0 | Create_ds/reair/main/src/test/java | Create_ds/reair/main/src/test/java/test/MockHiveMetastoreClientTest.java | package test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import com.google.common.collect.Lists;
import com.airbnb.reair.common.HiveMetastoreException;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import org.junit.Before;
import org.junit.Test;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
/**
* Test for the fake metastore client that we'll use later for testing.
*/
public class MockHiveMetastoreClientTest {
private static MockHiveMetastoreClient mockHiveMetastoreClient;
@Before
public void setUp() {
mockHiveMetastoreClient = new MockHiveMetastoreClient();
}
@Test
public void testCreateAndDropTable() throws HiveMetastoreException {
final String dbName = "test_db";
final String tableName = "test_table";
// First create a db and a table
mockHiveMetastoreClient.createDatabase(new Database(dbName, null, null, null));
Table table = new Table();
table.setDbName(dbName);
table.setTableName(tableName);
mockHiveMetastoreClient.createTable(table);
// Verify that you get the same table back
assertEquals(mockHiveMetastoreClient.getTable(dbName, tableName), table);
// Drop it
mockHiveMetastoreClient.dropTable(dbName, tableName, false);
// Verify that you can't get the table any more
assertNull(mockHiveMetastoreClient.getTable(dbName, tableName));
assertFalse(mockHiveMetastoreClient.existsTable(dbName, tableName));
}
@Test
public void testCreateAndDropPartition() throws HiveMetastoreException {
final String dbName = "test_db";
final String tableName = "test_table";
final String partitionName = "ds=1/hr=2";
final List<String> partitionValues = new ArrayList<>();
partitionValues.add("1");
partitionValues.add("2");
// First create the db and a partitioned table
mockHiveMetastoreClient.createDatabase(new Database(dbName, null, null, null));
Table table = new Table();
table.setDbName(dbName);
table.setTableName(tableName);
List<FieldSchema> partitionCols = new ArrayList<>();
partitionCols.add(new FieldSchema("ds", "string", "my ds comment"));
partitionCols.add(new FieldSchema("hr", "string", "my hr comment"));
table.setPartitionKeys(partitionCols);
mockHiveMetastoreClient.createTable(table);
// Then try adding a partition
Partition partition = new Partition();
partition.setDbName(dbName);
partition.setTableName(tableName);
partition.setValues(partitionValues);
mockHiveMetastoreClient.addPartition(partition);
// Verify that you get back the same partition
assertEquals(mockHiveMetastoreClient.getPartition(dbName, tableName, partitionName), partition);
// Try dropping the partition and verify that it doesn't exist
mockHiveMetastoreClient.dropPartition(dbName, tableName, partitionName, false);
assertNull(mockHiveMetastoreClient.getPartition(dbName, tableName, partitionName));
assertFalse(mockHiveMetastoreClient.existsPartition(dbName, tableName, partitionName));
// Try adding a partition again
mockHiveMetastoreClient.addPartition(partition);
// Drop the table
mockHiveMetastoreClient.dropTable(dbName, tableName, false);
// Verify that the partition doesn't exist
assertNull(mockHiveMetastoreClient.getPartition(dbName, tableName, partitionName));
}
@Test
public void testPartitionNameToMap() throws HiveMetastoreException {
String partitionName = "ds=1/hr=2/min=3/sec=4";
LinkedHashMap<String, String> expectedKeyValueMap = new LinkedHashMap<>();
expectedKeyValueMap.put("ds", "1");
expectedKeyValueMap.put("hr", "2");
expectedKeyValueMap.put("min", "3");
expectedKeyValueMap.put("sec", "4");
Map<String, String> keyValueMap = mockHiveMetastoreClient.partitionNameToMap(partitionName);
// Double check if iteration over the keySet / values is defined
assertEquals(expectedKeyValueMap.keySet(), keyValueMap.keySet());
assertEquals(Lists.newArrayList(expectedKeyValueMap.values()),
Lists.newArrayList(keyValueMap.values()));
}
@Test
public void testRenameTable() throws HiveMetastoreException {
final String dbName = "test_db";
final String tableName = "test_table";
final String newTableName = "new_test_table";
// First create the DB and the table
mockHiveMetastoreClient.createDatabase(new Database(dbName, null, null, null));
final Table table = new Table();
table.setDbName(dbName);
table.setTableName(tableName);
mockHiveMetastoreClient.createTable(table);
// Verify that you get the same table back
assertEquals(mockHiveMetastoreClient.getTable(dbName, tableName), table);
// Rename it
final Table newTable = new Table(table);
newTable.setTableName(newTableName);
mockHiveMetastoreClient.alterTable(dbName, tableName, newTable);
// Verify that you can't get the old table any more
assertNull(mockHiveMetastoreClient.getTable(dbName, tableName));
// Verify that you can get the new table
assertEquals(mockHiveMetastoreClient.getTable(dbName, newTableName), newTable);
}
}
| 9,466 |
0 | Create_ds/reair/main/src/test/java | Create_ds/reair/main/src/test/java/test/CopyPartitionedTableTaskTest.java | package test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import com.airbnb.reair.common.DistCpException;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.RunInfo;
import com.airbnb.reair.incremental.primitives.CopyPartitionedTableTask;
import com.airbnb.reair.utils.ReplicationTestUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.Table;
import org.junit.Test;
import java.io.IOException;
public class CopyPartitionedTableTaskTest extends MockClusterTest {
private static final Log LOG = LogFactory.getLog(CopyPartitionedTableTaskTest.class);
@Test
public void testCopyPartitionedTable()
throws IOException, HiveMetastoreException, DistCpException {
// Create a partitioned table in the source
HiveObjectSpec spec = new HiveObjectSpec("test_db", "test_table");
Table srcTable = ReplicationTestUtils.createPartitionedTable(conf, srcMetastore, spec,
TableType.MANAGED_TABLE, srcWarehouseRoot);
// Copy the table
CopyPartitionedTableTask copyJob = new CopyPartitionedTableTask(conf, destinationObjectFactory,
conflictHandler, srcCluster, destCluster, spec, ReplicationUtils.getLocation(srcTable));
RunInfo status = copyJob.runTask();
// Verify that the table exists on the destination, the location is
// within the destination filesystem, and no data was copied.
assertEquals(RunInfo.RunStatus.SUCCESSFUL, status.getRunStatus());
Table destTable = destMetastore.getTable(spec.getDbName(), spec.getTableName());
assertNotNull(destTable);
assertTrue(destTable.getSd().getLocation().startsWith(destCluster.getFsRoot() + "/"));
assertEquals(0, status.getBytesCopied());
// Verify that doing a copy again is a no-op
RunInfo rerunStatus = copyJob.runTask();
assertEquals(RunInfo.RunStatus.SUCCESSFUL, rerunStatus.getRunStatus());
assertEquals(0, rerunStatus.getBytesCopied());
}
// Additional test cases - copying of other table types such as views?
}
| 9,467 |
0 | Create_ds/reair/main/src/test/java | Create_ds/reair/main/src/test/java/test/CopyUnpartitionedTableTaskTest.java | package test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import com.airbnb.reair.common.DistCpException;
import com.airbnb.reair.common.FsUtils;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.RunInfo;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.incremental.primitives.CopyUnpartitionedTableTask;
import com.airbnb.reair.utils.ReplicationTestUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.Table;
import org.junit.Test;
import java.io.IOException;
public class CopyUnpartitionedTableTaskTest extends MockClusterTest {
private static final Log LOG = LogFactory.getLog(CopyUnpartitionedTableTaskTest.class);
@Test
public void testCopyUnpartitionedTable()
throws ConfigurationException, IOException, HiveMetastoreException, DistCpException {
// Create an unpartitioned table in the source
HiveObjectSpec spec = new HiveObjectSpec("test_db", "test_table");
Table srcTable = ReplicationTestUtils.createUnpartitionedTable(conf, srcMetastore, spec,
TableType.MANAGED_TABLE, srcWarehouseRoot);
// Copy the table
CopyUnpartitionedTableTask copyJob =
new CopyUnpartitionedTableTask(conf, destinationObjectFactory, conflictHandler, srcCluster,
destCluster, spec, ReplicationUtils.getLocation(srcTable), directoryCopier, true);
RunInfo status = copyJob.runTask();
// Verify that the table exists on the destination, the location is
// within the destination filesystem, the data is the same,
// and the right number of bytes were copied.
assertEquals(RunInfo.RunStatus.SUCCESSFUL, status.getRunStatus());
Table destTable = destMetastore.getTable(spec.getDbName(), spec.getTableName());
assertNotNull(destTable);
assertTrue(destTable.getSd().getLocation().startsWith(destCluster.getFsRoot() + "/"));
assertTrue(FsUtils.equalDirs(conf, new Path(srcTable.getSd().getLocation()),
new Path(destTable.getSd().getLocation())));
assertEquals(9, status.getBytesCopied());
// Verify that doing a copy again is a no-op
RunInfo rerunStatus = copyJob.runTask();
assertEquals(RunInfo.RunStatus.SUCCESSFUL, rerunStatus.getRunStatus());
assertEquals(0, rerunStatus.getBytesCopied());
// Trying to copy a new table without a data copy should not succeed.
HiveObjectSpec spec2 = new HiveObjectSpec("test_db", "test_table_2");
ReplicationTestUtils.createUnpartitionedTable(conf, srcMetastore, spec2,
TableType.MANAGED_TABLE, srcWarehouseRoot);
CopyUnpartitionedTableTask copyJob2 =
new CopyUnpartitionedTableTask(conf, destinationObjectFactory, conflictHandler, srcCluster,
destCluster, spec2, ReplicationUtils.getLocation(srcTable), directoryCopier, false);
RunInfo status2 = copyJob2.runTask();
// Verify that the table exists on the destination, the location is
// within the destination filesystem, the data is the same,
// and the right number of bytes were copied.
assertEquals(RunInfo.RunStatus.NOT_COMPLETABLE, status2.getRunStatus());
}
@Test
public void testCopyUnpartitionedTableView()
throws ConfigurationException, IOException, HiveMetastoreException, DistCpException {
HiveObjectSpec spec = new HiveObjectSpec("test_db", "test_table_view");
Table srcTable = ReplicationTestUtils.createUnpartitionedTable(conf, srcMetastore, spec,
TableType.VIRTUAL_VIEW, srcWarehouseRoot);
// Copy the table
CopyUnpartitionedTableTask copyJob =
new CopyUnpartitionedTableTask(conf, destinationObjectFactory, conflictHandler, srcCluster,
destCluster, spec, ReplicationUtils.getLocation(srcTable), directoryCopier, true);
RunInfo status = copyJob.runTask();
// Verify that the table exists on the destination, the location is
// within the destination filesystem, the data is the same,
// and the right number of bytes were copied.
assertEquals(RunInfo.RunStatus.SUCCESSFUL, status.getRunStatus());
Table destTable = destMetastore.getTable(spec.getDbName(), spec.getTableName());
assertNotNull(destTable);
assertNull(destTable.getSd().getLocation());
assertEquals(0, status.getBytesCopied());
}
}
| 9,468 |
0 | Create_ds/reair/main/src/test/java | Create_ds/reair/main/src/test/java/test/ReplicationServerTest.java | package test;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.common.HiveParameterKeys;
import com.airbnb.reair.db.DbConnectionFactory;
import com.airbnb.reair.db.DbKeyValueStore;
import com.airbnb.reair.db.EmbeddedMySqlDb;
import com.airbnb.reair.db.StaticDbConnectionFactory;
import com.airbnb.reair.db.TestDbCredentials;
import com.airbnb.reair.hive.hooks.AuditLogHookUtils;
import com.airbnb.reair.hive.hooks.CliAuditLogHook;
import com.airbnb.reair.hive.hooks.HiveOperation;
import com.airbnb.reair.incremental.DirectoryCopier;
import com.airbnb.reair.incremental.ReplicationServer;
import com.airbnb.reair.incremental.auditlog.AuditLogReader;
import com.airbnb.reair.incremental.db.PersistedJobInfoStore;
import com.airbnb.reair.incremental.filter.PassThoughReplicationFilter;
import com.airbnb.reair.incremental.filter.ReplicationFilter;
import com.airbnb.reair.utils.ReplicationTestUtils;
import com.timgroup.statsd.NoOpStatsDClient;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStore;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.Mockito;
import java.io.IOException;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Optional;
public class ReplicationServerTest extends MockClusterTest {
private static final Log LOG = LogFactory.getLog(
ReplicationServerTest.class);
private static EmbeddedMySqlDb embeddedMySqlDb;
private static final String AUDIT_LOG_DB_NAME = "audit_log_db";
private static final String AUDIT_LOG_TABLE_NAME = "audit_log";
private static final String AUDIT_LOG_OBJECTS_TABLE_NAME = "audit_objects";
private static final String AUDIT_LOG_MAP_RED_STATS_TABLE_NAME =
"mapred_stats";
private static final String REPLICATION_STATE_DB_NAME =
"replication_state_db";
private static final String KEY_VALUE_TABLE_NAME = "key_value";
private static final String REPLICATION_JOB_STATE_TABLE_NAME =
"replication_state";
private static final String HIVE_DB = "test_db";
// To speed up execution of the test, specify this poll interval for the
// replication server
private static final long TEST_POLL_TIME = 500;
private static CliAuditLogHook cliAuditLogHook;
private static AuditLogReader auditLogReader;
private static DbKeyValueStore dbKeyValueStore;
private static PersistedJobInfoStore persistedJobInfoStore;
private static ReplicationFilter replicationFilter;
/**
* Sets up this class for testing.
*
* @throws IOException if there's an error accessing the local filesystem
* @throws SQLException if there's an error querying the DB
*/
@BeforeClass
public static void setupClass() throws IOException, SQLException {
MockClusterTest.setupClass();
embeddedMySqlDb = new EmbeddedMySqlDb();
embeddedMySqlDb.startDb();
resetState();
}
private static void resetState() throws IOException, SQLException {
TestDbCredentials testDbCredentials = new TestDbCredentials();
DbConnectionFactory dbConnectionFactory = new StaticDbConnectionFactory(
ReplicationTestUtils.getJdbcUrl(embeddedMySqlDb),
testDbCredentials.getReadWriteUsername(),
testDbCredentials.getReadWritePassword());
// Drop the databases to start fresh
ReplicationTestUtils.dropDatabase(dbConnectionFactory,
AUDIT_LOG_DB_NAME);
ReplicationTestUtils.dropDatabase(dbConnectionFactory,
REPLICATION_STATE_DB_NAME);
// Create the audit log DB and tables
AuditLogHookUtils.setupAuditLogTables(dbConnectionFactory,
AUDIT_LOG_DB_NAME,
AUDIT_LOG_TABLE_NAME,
AUDIT_LOG_OBJECTS_TABLE_NAME,
AUDIT_LOG_MAP_RED_STATS_TABLE_NAME);
// Recreate the connection factory so that it uses the database
DbConnectionFactory auditLogDbConnectionFactory =
new StaticDbConnectionFactory(
ReplicationTestUtils.getJdbcUrl(embeddedMySqlDb,
AUDIT_LOG_DB_NAME),
testDbCredentials.getReadWriteUsername(),
testDbCredentials.getReadWritePassword());
cliAuditLogHook = new CliAuditLogHook(testDbCredentials);
// Setup the DB and tables needed to store replication state
setupReplicationServerStateTables(dbConnectionFactory,
REPLICATION_STATE_DB_NAME,
KEY_VALUE_TABLE_NAME,
REPLICATION_JOB_STATE_TABLE_NAME);
DbConnectionFactory replicationStateDbConnectionFactory =
new StaticDbConnectionFactory(
ReplicationTestUtils.getJdbcUrl(embeddedMySqlDb,
REPLICATION_STATE_DB_NAME),
testDbCredentials.getReadWriteUsername(),
testDbCredentials.getReadWritePassword());
auditLogReader = new AuditLogReader(
conf,
auditLogDbConnectionFactory,
AUDIT_LOG_TABLE_NAME,
AUDIT_LOG_OBJECTS_TABLE_NAME,
AUDIT_LOG_MAP_RED_STATS_TABLE_NAME,
0);
dbKeyValueStore = new DbKeyValueStore(
replicationStateDbConnectionFactory,
KEY_VALUE_TABLE_NAME);
persistedJobInfoStore =
new PersistedJobInfoStore(
conf,
replicationStateDbConnectionFactory,
REPLICATION_JOB_STATE_TABLE_NAME);
replicationFilter = new PassThoughReplicationFilter();
replicationFilter.setConf(conf);
}
private static void clearMetastores() throws HiveMetastoreException {
// Drop all tables from the destination metastore
for (String tableName : srcMetastore.getTables(HIVE_DB, "*")) {
srcMetastore.dropTable(HIVE_DB, tableName, true);
}
// Drop all partitions from the destination metastore
for (String tableName : destMetastore.getTables(HIVE_DB, "*")) {
destMetastore.dropTable(HIVE_DB, tableName, true);
}
}
private static void setupReplicationServerStateTables(
DbConnectionFactory dbConnectionFactory,
String dbName,
String keyValueTableName,
String persistedJobInfoTableName) throws SQLException {
String createDbSql = String.format("CREATE DATABASE %s", dbName);
String createKeyValueTableSql =
DbKeyValueStore.getCreateTableSql(keyValueTableName);
String createPersistedJobInfoTable =
PersistedJobInfoStore.getCreateTableSql(
persistedJobInfoTableName);
Connection connection = dbConnectionFactory.getConnection();
Statement statement = connection.createStatement();
// Create the tables
try {
statement.execute(createDbSql);
connection.setCatalog(dbName);
statement = connection.createStatement();
statement.execute(createKeyValueTableSql);
statement.execute(createPersistedJobInfoTable);
} finally {
statement.close();
connection.close();
}
}
@Test
public void testTableReplication() throws Exception {
// Reset the state
resetState();
clearMetastores();
// Create an unpartitioned table in the source and a corresponding
// entry in the audit log
String dbName = "test_db";
String tableName = "test_table";
simulatedCreateUnpartitionedTable(dbName, tableName);
// Have the replication server copy it.
ReplicationServer replicationServer = createReplicationServer();
replicationServer.run(1);
// Verify that the object was copied
assertTrue(destMetastore.existsTable(dbName, tableName));
}
@Test
public void testPartitionReplication() throws Exception {
// Reset the state
resetState();
clearMetastores();
// Create an partitioned table in the source and a corresponding
// entry in the audit log
String dbName = "test_db";
String tableName = "test_table";
String partitionName = "ds=1/hr=2";
simulateCreatePartitionedTable(dbName, tableName);
simulateCreatePartition(dbName, tableName, partitionName);
// Have the replication server copy it.
ReplicationServer replicationServer = createReplicationServer();
replicationServer.run(2);
// Verify that the object was copied
assertTrue(destMetastore.existsTable(dbName, tableName));
assertTrue(destMetastore.existsPartition(
dbName,
tableName,
partitionName));
}
@Test
public void testOptimizedPartitionReplication() throws Exception {
// Reset the state
resetState();
clearMetastores();
// Create an partitioned table in the source and a corresponding
// entry in the audit log
final String dbName = "test_db";
final String tableName = "test_table";
final List<String> partitionNames = new ArrayList<>();
partitionNames.add("ds=1/hr=1");
partitionNames.add("ds=1/hr=2");
partitionNames.add("ds=1/hr=3");
simulateCreatePartitionedTable(dbName, tableName);
simulateCreatePartitions(dbName, tableName, partitionNames);
// Have the replication server copy it.
final ReplicationServer replicationServer = createReplicationServer();
replicationServer.run(2);
LOG.error("Server stopped");
// Verify that the object was copied
assertTrue(destMetastore.existsTable(dbName, tableName));
for (String partitionName : partitionNames) {
assertTrue(destMetastore.existsPartition(
dbName,
tableName,
partitionName));
}
}
private void removeTableAttributes(
List<org.apache.hadoop.hive.ql.metadata.Table> tables) {
for (org.apache.hadoop.hive.ql.metadata.Table table : tables) {
final Table newTable = new Table(table.getTTable());
newTable.setParameters(Collections.emptyMap());
table.setTTable(newTable);
}
}
private void removePartitionAttributes(
List<org.apache.hadoop.hive.ql.metadata.Partition> partitions) {
for (org.apache.hadoop.hive.ql.metadata.Partition p : partitions) {
final Table newTable = new Table(p.getTable().getTTable());
Partition newPartition = new Partition(p.getTPartition());
newTable.setParameters(null);
newPartition.setParameters(null);
p.getTable().setTTable(newTable);
p.setTPartition(newPartition);
}
}
private void simulatedCreateUnpartitionedTable(String dbName, String tableName) throws Exception {
// Create an unpartitioned table in the source and a corresponding entry in the audit log
HiveObjectSpec unpartitionedTable = new HiveObjectSpec(dbName,
tableName);
Table srcTable = ReplicationTestUtils.createUnpartitionedTable(conf,
srcMetastore,
unpartitionedTable,
TableType.MANAGED_TABLE,
srcWarehouseRoot);
List<org.apache.hadoop.hive.ql.metadata.Table> inputTables =
new ArrayList<>();
List<org.apache.hadoop.hive.ql.metadata.Table> outputTables =
new ArrayList<>();
outputTables.add(new org.apache.hadoop.hive.ql.metadata.Table(srcTable));
removeTableAttributes(outputTables);
HiveConf hiveConf = AuditLogHookUtils.getHiveConf(
embeddedMySqlDb,
AUDIT_LOG_DB_NAME,
AUDIT_LOG_TABLE_NAME,
AUDIT_LOG_OBJECTS_TABLE_NAME,
AUDIT_LOG_MAP_RED_STATS_TABLE_NAME);
AuditLogHookUtils.insertAuditLogEntry(
cliAuditLogHook,
HiveOperation.QUERY,
"Example query string",
inputTables,
new ArrayList<>(),
outputTables,
new ArrayList<>(),
new HashMap<>(),
hiveConf);
}
private void simulatedRenameTable(String dbName,
String oldTableName,
String newTableName,
boolean isThriftAuditLog)
throws Exception {
Table srcTable = srcMetastore.getTable(dbName, oldTableName);
Table renamedTable = new Table(srcTable);
renamedTable.setTableName(newTableName);
srcMetastore.alterTable(dbName, oldTableName, renamedTable);
List<org.apache.hadoop.hive.ql.metadata.Table> inputTables =
new ArrayList<>();
org.apache.hadoop.hive.ql.metadata.Table qlSrcTable =
new org.apache.hadoop.hive.ql.metadata.Table(srcTable);
inputTables.add(qlSrcTable);
List<org.apache.hadoop.hive.ql.metadata.Table> outputTables =
new ArrayList<>();
outputTables.add(qlSrcTable);
org.apache.hadoop.hive.ql.metadata.Table qlRenamedTable =
new org.apache.hadoop.hive.ql.metadata.Table(renamedTable);
outputTables.add(qlRenamedTable);
if (isThriftAuditLog) {
HiveConf hiveConf = AuditLogHookUtils.getMetastoreHiveConf(
embeddedMySqlDb,
AUDIT_LOG_DB_NAME,
AUDIT_LOG_TABLE_NAME,
AUDIT_LOG_OBJECTS_TABLE_NAME
);
AuditLogHookUtils.insertThriftRenameTableLogEntry(
srcTable,
renamedTable,
hiveConf
);
} else {
HiveConf hiveConf = AuditLogHookUtils.getHiveConf(
embeddedMySqlDb,
AUDIT_LOG_DB_NAME,
AUDIT_LOG_TABLE_NAME,
AUDIT_LOG_OBJECTS_TABLE_NAME,
AUDIT_LOG_MAP_RED_STATS_TABLE_NAME);
AuditLogHookUtils.insertAuditLogEntry(
cliAuditLogHook,
HiveOperation.ALTERTABLE_RENAME,
"Example query string",
inputTables,
new ArrayList<>(),
outputTables,
new ArrayList<>(),
new HashMap<>(),
hiveConf);
}
}
private void simulatedRenamePartition(String dbName,
String tableName,
String oldPartitionName,
List<String> newPartitionValues) throws Exception {
Partition oldPartition = srcMetastore.getPartition(dbName, tableName, oldPartitionName);
Partition newPartition = new Partition(oldPartition);
newPartition.setValues(newPartitionValues);
HiveConf hiveConf = AuditLogHookUtils.getMetastoreHiveConf(
embeddedMySqlDb,
AUDIT_LOG_DB_NAME,
AUDIT_LOG_TABLE_NAME,
AUDIT_LOG_OBJECTS_TABLE_NAME
);
HiveMetaStore.HMSHandler handler = Mockito.mock(HiveMetaStore.HMSHandler.class);
Mockito.when(
handler.get_table(dbName, tableName)
).thenReturn(srcMetastore.getTable(dbName, tableName));
AuditLogHookUtils.insertThriftRenamePartitionLogEntry(
handler,
oldPartition,
newPartition,
hiveConf
);
}
private void simulateCreatePartitionedTable(String dbName, String tableName) throws Exception {
// Create an unpartitioned table in the source and a corresponding entry in the audit log
HiveObjectSpec unpartitionedTable = new HiveObjectSpec(dbName,
tableName);
Table srcTable = ReplicationTestUtils.createPartitionedTable(conf,
srcMetastore,
unpartitionedTable,
TableType.MANAGED_TABLE,
srcWarehouseRoot);
List<org.apache.hadoop.hive.ql.metadata.Table> inputTables =
new ArrayList<>();
List<org.apache.hadoop.hive.ql.metadata.Table> outputTables =
new ArrayList<>();
outputTables.add(new org.apache.hadoop.hive.ql.metadata.Table(srcTable));
removeTableAttributes(outputTables);
HiveConf hiveConf = AuditLogHookUtils.getHiveConf(
embeddedMySqlDb,
AUDIT_LOG_DB_NAME,
AUDIT_LOG_TABLE_NAME,
AUDIT_LOG_OBJECTS_TABLE_NAME,
AUDIT_LOG_MAP_RED_STATS_TABLE_NAME);
AuditLogHookUtils.insertAuditLogEntry(
cliAuditLogHook,
HiveOperation.QUERY,
"Example query string",
inputTables,
new ArrayList<>(),
outputTables,
new ArrayList<>(),
new HashMap<>(),
hiveConf);
}
private void simulateCreatePartition(String dbName,
String tableName,
String partitionName)
throws Exception {
HiveObjectSpec partitionSpec = new HiveObjectSpec(dbName, tableName,
partitionName);
Table srcTable = srcMetastore.getTable(dbName, tableName);
Partition srcPartition = ReplicationTestUtils.createPartition(conf,
srcMetastore,
partitionSpec);
List<org.apache.hadoop.hive.ql.metadata.Table> inputTables =
new ArrayList<>();
List<org.apache.hadoop.hive.ql.metadata.Partition> outputPartitions =
new ArrayList<>();
inputTables.add(new org.apache.hadoop.hive.ql.metadata.Table(srcTable));
outputPartitions.add(new org.apache.hadoop.hive.ql.metadata.Partition(
new org.apache.hadoop.hive.ql.metadata.Table(srcTable),
srcPartition));
removeTableAttributes(inputTables);
removePartitionAttributes(outputPartitions);
HiveConf hiveConf = AuditLogHookUtils.getHiveConf(
embeddedMySqlDb,
AUDIT_LOG_DB_NAME,
AUDIT_LOG_TABLE_NAME,
AUDIT_LOG_OBJECTS_TABLE_NAME,
AUDIT_LOG_MAP_RED_STATS_TABLE_NAME);
AuditLogHookUtils.insertAuditLogEntry(
cliAuditLogHook,
HiveOperation.QUERY,
"Example query string",
inputTables,
new ArrayList<>(),
new ArrayList<>(),
outputPartitions,
new HashMap<>(),
hiveConf);
}
private void simulateCreatePartitions(String dbName,
String tableName,
List<String> partitionNames)
throws Exception {
List<org.apache.hadoop.hive.ql.metadata.Table> inputTables =
new ArrayList<>();
List<org.apache.hadoop.hive.ql.metadata.Partition> outputPartitions =
new ArrayList<>();
Table srcTable = srcMetastore.getTable(dbName, tableName);
inputTables.add(new org.apache.hadoop.hive.ql.metadata.Table(srcTable));
for (String partitionName : partitionNames) {
HiveObjectSpec partitionSpec = new HiveObjectSpec(dbName, tableName,
partitionName);
Partition srcPartition = ReplicationTestUtils.createPartition(conf,
srcMetastore,
partitionSpec);
outputPartitions.add(new org.apache.hadoop.hive.ql.metadata.Partition(
new org.apache.hadoop.hive.ql.metadata.Table(srcTable),
srcPartition));
}
removeTableAttributes(inputTables);
removePartitionAttributes(outputPartitions);
HiveConf hiveConf = AuditLogHookUtils.getHiveConf(
embeddedMySqlDb,
AUDIT_LOG_DB_NAME,
AUDIT_LOG_TABLE_NAME,
AUDIT_LOG_OBJECTS_TABLE_NAME,
AUDIT_LOG_MAP_RED_STATS_TABLE_NAME);
AuditLogHookUtils.insertAuditLogEntry(
cliAuditLogHook,
HiveOperation.QUERY,
"Example query string",
inputTables,
new ArrayList<>(),
new ArrayList<>(),
outputPartitions,
new HashMap<>(),
hiveConf);
}
private void simulateDropTable(String dbName, String tableName) throws Exception {
// Drop the specified table from the source and also generate the appropriate audit log entry
Table srcTable = srcMetastore.getTable(dbName, tableName);
srcMetastore.dropTable(dbName, tableName, false);
List<org.apache.hadoop.hive.ql.metadata.Table> outputTables = new ArrayList<>();
outputTables.add(new org.apache.hadoop.hive.ql.metadata.Table(srcTable));
HiveConf hiveConf = AuditLogHookUtils.getHiveConf(
embeddedMySqlDb,
AUDIT_LOG_DB_NAME,
AUDIT_LOG_TABLE_NAME,
AUDIT_LOG_OBJECTS_TABLE_NAME,
AUDIT_LOG_MAP_RED_STATS_TABLE_NAME);
AuditLogHookUtils.insertAuditLogEntry(
cliAuditLogHook,
HiveOperation.DROPTABLE,
"Example query string",
new ArrayList<>(),
new ArrayList<>(),
outputTables,
new ArrayList<>(),
new HashMap<>(),
hiveConf);
}
private void simulateDropPartition(String dbName, String tableName, String partitionName)
throws Exception {
// Drop the specified partition from the source and also generate the
// appropriate audit log entry
Table srcTable = srcMetastore.getTable(dbName, tableName);
Partition srcPartition = srcMetastore.getPartition(dbName, tableName,
partitionName);
srcMetastore.dropPartition(dbName, tableName, partitionName, false);
List<org.apache.hadoop.hive.ql.metadata.Table> inputTables =
new ArrayList<>();
org.apache.hadoop.hive.ql.metadata.Table qlTable =
new org.apache.hadoop.hive.ql.metadata.Table(srcTable);
inputTables.add(qlTable);
List<org.apache.hadoop.hive.ql.metadata.Partition> outputPartitions =
new ArrayList<>();
outputPartitions.add(
new org.apache.hadoop.hive.ql.metadata.Partition(qlTable,
srcPartition));
HiveConf hiveConf = AuditLogHookUtils.getHiveConf(
embeddedMySqlDb,
AUDIT_LOG_DB_NAME,
AUDIT_LOG_TABLE_NAME,
AUDIT_LOG_OBJECTS_TABLE_NAME,
AUDIT_LOG_MAP_RED_STATS_TABLE_NAME);
AuditLogHookUtils.insertAuditLogEntry(
cliAuditLogHook,
HiveOperation.ALTERTABLE_DROPPARTS,
"Example query string",
inputTables,
new ArrayList<>(),
new ArrayList<>(),
outputPartitions,
new HashMap<>(),
hiveConf);
}
/**
* Converts a partition name into a spec used for DDL commands. For example,
* ds=1/hr=2 -> PARTITION(ds='1', hr='2'). Note the special characters are
* not escapsed as they are in production.
*/
public static String partitionNameToDdlSpec(String partitionName) {
String[] partitionNameSplit = partitionName.split("/");
List<String> columnExpressions = new ArrayList<>();
for (String columnValue : partitionNameSplit) {
String[] columnValueSplit = columnValue.split("=");
if (columnValueSplit.length != 2) {
throw new RuntimeException("Invalid partition name "
+ partitionName);
}
columnExpressions.add(columnValueSplit[0] + "='"
+ columnValueSplit[1] + "'");
}
return "PARTITION(" + StringUtils.join(columnExpressions, ", ") + ")";
}
private void simulateExchangePartition(String exchangeFromDbName,
String exchangeFromTableName,
String exchangeToDbName,
String exchangeToTableName,
String partitionName)
throws Exception {
// Do the exchange
srcMetastore.exchangePartition(
srcMetastore.partitionNameToMap(partitionName),
exchangeFromDbName,
exchangeFromTableName,
exchangeToDbName,
exchangeToTableName);
String query = String.format("ALTER TABLE %s.%s EXCHANGE "
+ "%s WITH TABLE %s.%s",
exchangeToDbName,
exchangeToTableName,
partitionNameToDdlSpec(partitionName),
exchangeFromDbName,
exchangeFromTableName);
// Generate the broken audit log entry. Hive should be fixed to have the
// correct entry. It's broken in that the command type is null and
// inputs and outputs are empty
HiveConf hiveConf = AuditLogHookUtils.getHiveConf(
embeddedMySqlDb,
AUDIT_LOG_DB_NAME,
AUDIT_LOG_TABLE_NAME,
AUDIT_LOG_OBJECTS_TABLE_NAME,
AUDIT_LOG_MAP_RED_STATS_TABLE_NAME);
AuditLogHookUtils.insertAuditLogEntry(
cliAuditLogHook,
null,
query,
new ArrayList<>(),
new ArrayList<>(),
new ArrayList<>(),
new ArrayList<>(),
new HashMap<>(),
hiveConf);
}
private ReplicationServer createReplicationServer() {
ReplicationServer replicationServer = new ReplicationServer(
conf,
srcCluster,
destCluster,
auditLogReader,
dbKeyValueStore,
persistedJobInfoStore,
Arrays.asList(replicationFilter),
new DirectoryCopier(conf, srcCluster.getTmpDir(), false),
new NoOpStatsDClient(),
1,
2,
Optional.of(0L));
replicationServer.setPollWaitTimeMs(TEST_POLL_TIME);
return replicationServer;
}
/**
* Tests to make sure that entries that were not completed in the previous
* invocation of the server are picked up and run on a subsequent
* invocation.
*
* @throws Exception if there is an error setting up or running this test
*/
@Test
public void testResumeJobs() throws Exception {
// Reset the state
resetState();
clearMetastores();
String dbName = "test_db";
String firstTableName = "test_table_1";
String secondTableName = "test_table_2";
// Create the objects and the audit log entry
simulatedCreateUnpartitionedTable(dbName, firstTableName);
simulatedCreateUnpartitionedTable(dbName, secondTableName);
// Have the replication server copy the first table
ReplicationServer replicationServer = createReplicationServer();
replicationServer.run(1);
// Verify that the object was copied
assertTrue(destMetastore.existsTable(dbName, firstTableName));
assertFalse(destMetastore.existsTable(dbName, secondTableName));
// Re-run. Since the last run finished the first entry, the second run
// should copy the second entry.
replicationServer.run(1);
// Verify that the second object was copied
assertTrue(destMetastore.existsTable(dbName, firstTableName));
assertTrue(destMetastore.existsTable(dbName, secondTableName));
}
@Test
public void testDropPartition() throws Exception {
// Reset the state
resetState();
clearMetastores();
final String dbName = "test_db";
final String tableName = "test_table";
final String partitionName = "ds=1/hr=2";
final HiveObjectSpec partitionSpec = new HiveObjectSpec(dbName, tableName,
partitionName);
// Create a partitioned table and a partition on the source, and
// replicate it.
simulateCreatePartitionedTable(dbName, tableName);
simulateCreatePartition(dbName, tableName, partitionName);
ReplicationServer replicationServer = createReplicationServer();
replicationServer.run(2);
// Verify that the partition is on the destination
assertTrue(destMetastore.existsPartition(dbName, tableName,
partitionName));
// Simulate the drop
LOG.debug("Dropping " + partitionSpec);
simulateDropPartition(dbName, tableName, partitionName);
// Run replication so that it picks up the drop command
replicationServer.setStartAfterAuditLogId(2);
replicationServer.run(1);
// Verify that the partition is gone from the destination
assertFalse(destMetastore.existsPartition(dbName, tableName,
partitionName));
}
@Test
public void testDropTable() throws Exception {
// Reset the state
resetState();
clearMetastores();
final String dbName = "test_db";
final String tableName = "test_table";
// Create a table on the source, and replicate it
simulatedCreateUnpartitionedTable(dbName, tableName);
ReplicationServer replicationServer = createReplicationServer();
replicationServer.run(1);
// Verify that the partition is on the destination
assertTrue(destMetastore.existsTable(dbName, tableName));
// Simulate the drop
simulateDropTable(dbName, tableName);
// Run replication so that it picks up the drop command.
replicationServer.setStartAfterAuditLogId(1);
replicationServer.run(1);
// Verify that the partition is gone from the destination
assertFalse(destMetastore.existsTable(dbName, tableName));
}
/**
* Test to make sure that the drop table command does not get replicated
* if the table is modified on the destination.
*
* @throws Exception if there is an error setting up or running this test
*/
@Test
public void testDropTableNoOp() throws Exception {
// Reset the state
resetState();
clearMetastores();
final String dbName = "test_db";
final String tableName = "test_table";
final String secondTableName = "test_table_2";
// Create a table on the source, and replicate it
simulatedCreateUnpartitionedTable(dbName, tableName);
ReplicationServer replicationServer = createReplicationServer();
replicationServer.run(1);
// Verify that the partition is on the destination
assertTrue(destMetastore.existsTable(dbName, tableName));
// Simulate the drop
simulateDropTable(dbName, tableName);
// Update the modified time on the destination table
final Table table = destMetastore.getTable(dbName, tableName);
table.getParameters().put(HiveParameterKeys.TLDT,
Long.toString(System.currentTimeMillis()));
destMetastore.alterTable(dbName, tableName, table);
// Create another table on the source so that replication has something
// to do on the next invocation if it skips the drop command
simulatedCreateUnpartitionedTable(dbName, secondTableName);
// Run replication so that it picks up the drop command
replicationServer.run(1);
// Verify that the partition is still there on the destination
assertTrue(destMetastore.existsTable(dbName, tableName));
}
/**
* Test whether the rename table operation is properly propagated.
*
* @throws Exception if there is an error setting up or running this test
*/
@Test
public void testRenameTable() throws Exception {
// Reset the state
resetState();
clearMetastores();
final String dbName = "test_db";
final String tableName = "test_table";
final String newTableName = "new_test_table";
// Create a table on the source, and replicate it
simulatedCreateUnpartitionedTable(dbName, tableName);
final ReplicationServer replicationServer = createReplicationServer();
replicationServer.run(1);
// Verify that the table is on the destination
assertTrue(destMetastore.existsTable(dbName, tableName));
// Simulate the rename
simulatedRenameTable(dbName, tableName, newTableName, false);
// Propagate the rename
replicationServer.setStartAfterAuditLogId(1);
replicationServer.run(1);
// Verify that the table is renamed on the destination
assertFalse(destMetastore.existsTable(dbName, tableName));
assertTrue(destMetastore.existsTable(dbName, newTableName));
}
/**
* Test whether the rename table operation from THRIFT audit log is properly propagated.
*
* @throws Exception if there is an error setting up or running this test
*/
@Test
public void testRenameTableByThrift() throws Exception {
// Reset the state
resetState();
clearMetastores();
final String dbName = "test_db";
final String tableName = "test_table";
final String newTableName = "new_test_table";
// Create a table on the source, and replicate it
simulatedCreateUnpartitionedTable(dbName, tableName);
final ReplicationServer replicationServer = createReplicationServer();
replicationServer.run(1);
// Verify that the table is on the destination
assertTrue(destMetastore.existsTable(dbName, tableName));
// Simulate the rename
simulatedRenameTable(dbName, tableName, newTableName, true);
// Propagate the rename
replicationServer.setStartAfterAuditLogId(1);
replicationServer.run(1);
// Verify that the table is renamed on the destination
assertFalse(destMetastore.existsTable(dbName, tableName));
assertTrue(destMetastore.existsTable(dbName, newTableName));
}
/**
* Test whether the rename partition operation from THRIFT audit log is properly propagated.
*
* @throws Exception if there is an error setting up or running this test
*/
@Test
public void testRenamePartitionByThrift() throws Exception {
// Reset the state
resetState();
clearMetastores();
// Create an partitioned table and a corresponding entry in the audit log
final String dbName = "test_db";
final String tableName = "test_table";
final String newPartitionName = "ds=1/hr=2";
final String oldPartitionName = "ds=1/hr=1";
final List<String> newPartitionValues = new ArrayList<>();
newPartitionValues.add("1"); // for `ds` partition
newPartitionValues.add("2"); // for `hr` partition
simulateCreatePartitionedTable(dbName, tableName);
simulateCreatePartition(dbName, tableName, oldPartitionName);
// Have the replication server rename it.
ReplicationServer replicationServer = createReplicationServer();
replicationServer.run(2);
// Simulate the rename
simulatedRenamePartition(dbName,
tableName,
oldPartitionName,
newPartitionValues);
replicationServer.setStartAfterAuditLogId(2);
replicationServer.run(1);
// Verify that the object was renamed
assertFalse(destMetastore.existsPartition(dbName, tableName, oldPartitionName));
assertTrue(destMetastore.existsPartition(dbName, tableName, newPartitionName));
}
/**
* Test whether the rename table operation is properly propagated in case
* when the table is updated on the destination. In such a case, the
* table should be copied over.
*
* @throws Exception if there is an error setting up or running this test
*/
@Test
public void testRenameTableCopy() throws Exception {
// Reset the state
resetState();
clearMetastores();
final String dbName = "test_db";
final String tableName = "test_table";
final String newTableName = "new_test_table";
// Create a table on the source, and replicate it
simulatedCreateUnpartitionedTable(dbName, tableName);
final ReplicationServer replicationServer = createReplicationServer();
replicationServer.run(1);
// Verify that the table is on the destination
assertTrue(destMetastore.existsTable(dbName, tableName));
// Update the modified time on the destination table
final Table table = destMetastore.getTable(dbName, tableName);
table.getParameters().put(HiveParameterKeys.TLDT,
Long.toString(System.currentTimeMillis()));
destMetastore.alterTable(dbName, tableName, table);
// Simulate the rename
simulatedRenameTable(dbName, tableName, newTableName, false);
// Propagate the rename
replicationServer.setStartAfterAuditLogId(1);
replicationServer.run(1);
// Verify that the renamed table was copied over, and the modified table
// remains.
assertTrue(destMetastore.existsTable(dbName, tableName));
assertTrue(destMetastore.existsTable(dbName, newTableName));
}
@Test
public void testExchangePartition() throws Exception {
// Reset the state
resetState();
clearMetastores();
// Create an partitioned table in the source and a corresponding
// entry in the audit log
String dbName = "test_db";
String exchangeFromTableName = "test_table_exchange_from";
String exchangeToTableName = "test_table_exchange_to";
String partitionName = "ds=1/hr=2";
simulateCreatePartitionedTable(dbName, exchangeFromTableName);
simulateCreatePartition(dbName, exchangeFromTableName, partitionName);
simulateCreatePartitionedTable(dbName, exchangeToTableName);
// Have the replication server copy it.
ReplicationServer replicationServer = createReplicationServer();
replicationServer.run(3);
// Simulate the exchange
simulateExchangePartition(dbName,
exchangeFromTableName,
dbName,
exchangeToTableName,
partitionName);
replicationServer.setStartAfterAuditLogId(3);
replicationServer.run(1);
// Verify that the object was copied
assertTrue(destMetastore.existsTable(dbName, exchangeFromTableName));
assertTrue(destMetastore.existsTable(dbName, exchangeToTableName));
assertTrue(destMetastore.existsPartition(
dbName,
exchangeToTableName,
partitionName));
}
@AfterClass
public static void tearDownClass() {
MockClusterTest.tearDownClass();
embeddedMySqlDb.stopDb();
}
// Additional cases to test
// * Copy partition after a restart
// * Copy unpartitioned table
// * Different rename cases with filters
// * Copying partitions after a table schema change
}
| 9,469 |
0 | Create_ds/reair/main/src/test/java | Create_ds/reair/main/src/test/java/test/DropTableTest.java | package test;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import com.airbnb.reair.common.DistCpException;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.RunInfo;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.incremental.primitives.CopyUnpartitionedTableTask;
import com.airbnb.reair.incremental.primitives.DropTableTask;
import com.airbnb.reair.utils.ReplicationTestUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.Table;
import org.junit.Test;
import java.io.IOException;
public class DropTableTest extends MockClusterTest {
private static final Log LOG = LogFactory.getLog(DropTableTest.class);
@Test
public void testDrop()
throws ConfigurationException, DistCpException, HiveMetastoreException, IOException {
String dbName = "test_db";
String tableName = "test_Table";
// Create an unpartitioned table in the source
HiveObjectSpec spec = new HiveObjectSpec(dbName, tableName);
Table srcTable = ReplicationTestUtils.createUnpartitionedTable(conf, srcMetastore, spec,
TableType.MANAGED_TABLE, srcWarehouseRoot);
// Copy the table
CopyUnpartitionedTableTask copyJob =
new CopyUnpartitionedTableTask(conf, destinationObjectFactory, conflictHandler, srcCluster,
destCluster, spec, ReplicationUtils.getLocation(srcTable), directoryCopier, true);
RunInfo status = copyJob.runTask();
// Verify that the table exists on the destination
assertTrue(destMetastore.existsTable(dbName, tableName));
// Pretend that a drop operation needs to be performed
DropTableTask dropTableTask =
new DropTableTask(srcCluster, destCluster, spec, ReplicationUtils.getTldt(srcTable));
dropTableTask.runTask();
// Verify that the table doesn't exist on the destination
assertFalse(destMetastore.existsTable(dbName, tableName));
// Create a different table on the destination, but with the same name
Table destTable = ReplicationTestUtils.createUnpartitionedTable(conf, destMetastore, spec,
TableType.MANAGED_TABLE, destWarehouseRoot);
// Pretend that a drop operation needs to be performed
dropTableTask.runTask();
// Verify that the table still exists on the destination
assertTrue(destMetastore.existsTable(dbName, tableName));
}
}
| 9,470 |
0 | Create_ds/reair/main/src/test/java | Create_ds/reair/main/src/test/java/test/MockClusterFactory.java | package test;
import com.airbnb.reair.incremental.DirectoryCopier;
import com.airbnb.reair.incremental.configuration.Cluster;
import com.airbnb.reair.incremental.configuration.ClusterFactory;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import org.apache.hadoop.conf.Configuration;
/**
* Returns static instances of Clusters for testing.
*/
public class MockClusterFactory implements ClusterFactory {
@Override
public void setConf(Configuration conf) {
}
@Override
public Cluster getSrcCluster() {
return MockClusterTest.srcCluster;
}
@Override
public Cluster getDestCluster() {
return MockClusterTest.destCluster;
}
@Override
public DirectoryCopier getDirectoryCopier() throws ConfigurationException {
return MockClusterTest.directoryCopier;
}
}
| 9,471 |
0 | Create_ds/reair/main/src/test/java | Create_ds/reair/main/src/test/java/test/CopyPartitionsTaskTest.java | package test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import com.airbnb.reair.common.DistCpException;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.RunInfo;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.incremental.primitives.CopyPartitionsTask;
import com.airbnb.reair.multiprocessing.ParallelJobExecutor;
import com.airbnb.reair.utils.ReplicationTestUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
public class CopyPartitionsTaskTest extends MockClusterTest {
private static ParallelJobExecutor jobExecutor = new ParallelJobExecutor(1);
@BeforeClass
public static void setupClass() throws IOException, SQLException {
MockClusterTest.setupClass();
jobExecutor.start();
}
@Test
public void testCopyPartitions()
throws ConfigurationException, IOException, HiveMetastoreException, DistCpException {
// Create a partitioned table in the source
HiveObjectSpec tableSpec = new HiveObjectSpec("test_db", "test_table");
ReplicationTestUtils.createPartitionedTable(conf, srcMetastore, tableSpec,
TableType.MANAGED_TABLE, srcWarehouseRoot);
// Create several partitions in the source table
HiveObjectSpec partitionSpec1 = new HiveObjectSpec("test_db", "test_table", "ds=1/hr=1");
HiveObjectSpec partitionSpec2 = new HiveObjectSpec("test_db", "test_table", "ds=1/hr=2");
HiveObjectSpec partitionSpec3 = new HiveObjectSpec("test_db", "test_table", "ds=1/hr=3");
Partition srcPartition1 =
ReplicationTestUtils.createPartition(conf, srcMetastore, partitionSpec1);
Partition srcPartition2 =
ReplicationTestUtils.createPartition(conf, srcMetastore, partitionSpec2);
Partition srcPartition3 =
ReplicationTestUtils.createPartition(conf, srcMetastore, partitionSpec3);
Map<HiveObjectSpec, Partition> specToPartition = new HashMap<>();
specToPartition.put(partitionSpec1, srcPartition1);
specToPartition.put(partitionSpec2, srcPartition2);
specToPartition.put(partitionSpec3, srcPartition3);
List<String> partitionNames = new ArrayList<>();
partitionNames.add("ds=1/hr=1");
partitionNames.add("ds=1/hr=2");
partitionNames.add("ds=1/hr=3");
// Find the common path for these partitions
Optional<Path> commonDirectory =
CopyPartitionsTask.findCommonDirectory(tableSpec, specToPartition);
// Copy the partition
CopyPartitionsTask copyPartitionsTask =
new CopyPartitionsTask(conf, destinationObjectFactory, conflictHandler, srcCluster,
destCluster, tableSpec, partitionNames, commonDirectory, jobExecutor, directoryCopier);
RunInfo status = copyPartitionsTask.runTask();
// Verify that the partition got copied
assertEquals(RunInfo.RunStatus.SUCCESSFUL, status.getRunStatus());
assertTrue(ReplicationUtils.exists(destMetastore, partitionSpec1));
assertTrue(ReplicationUtils.exists(destMetastore, partitionSpec2));
assertTrue(ReplicationUtils.exists(destMetastore, partitionSpec3));
assertEquals(27, status.getBytesCopied());
}
/**
* This ensures that the common directory isn't copied in cases where copying the common directory
* would copy too much data.
*
* @throws IOException if there's an error writing to the local file system
* @throws HiveMetastoreException if there's an error querying the metastore
* @throws DistCpException if there's an error copying data
*/
@Test
public void testCopyPartitionsWithoutCopyingCommon()
throws ConfigurationException, IOException, HiveMetastoreException, DistCpException {
// Create a partitioned table in the source
HiveObjectSpec tableSpec = new HiveObjectSpec("test_db", "test_table");
ReplicationTestUtils.createPartitionedTable(conf, srcMetastore, tableSpec,
TableType.MANAGED_TABLE, srcWarehouseRoot);
// Create several partitions in the source table
HiveObjectSpec partitionSpec1 = new HiveObjectSpec("test_db", "test_table", "ds=1/hr=1");
HiveObjectSpec partitionSpec2 = new HiveObjectSpec("test_db", "test_table", "ds=1/hr=2");
HiveObjectSpec partitionSpec3 = new HiveObjectSpec("test_db", "test_table", "ds=1/hr=3");
HiveObjectSpec partitionSpec4 = new HiveObjectSpec("test_db", "test_table", "ds=1/hr=4");
HiveObjectSpec partitionSpec5 = new HiveObjectSpec("test_db", "test_table", "ds=1/hr=5");
Partition srcPartition1 =
ReplicationTestUtils.createPartition(conf, srcMetastore, partitionSpec1);
Partition srcPartition2 =
ReplicationTestUtils.createPartition(conf, srcMetastore, partitionSpec2);
Partition srcPartition3 =
ReplicationTestUtils.createPartition(conf, srcMetastore, partitionSpec3);
Partition srcPartition4 =
ReplicationTestUtils.createPartition(conf, srcMetastore, partitionSpec4);
Partition srcPartition5 =
ReplicationTestUtils.createPartition(conf, srcMetastore, partitionSpec5);
Map<HiveObjectSpec, Partition> specToPartition = new HashMap<>();
specToPartition.put(partitionSpec1, srcPartition1);
specToPartition.put(partitionSpec2, srcPartition2);
specToPartition.put(partitionSpec3, srcPartition3);
specToPartition.put(partitionSpec4, srcPartition4);
specToPartition.put(partitionSpec3, srcPartition5);
// Copy only two partitions
List<String> partitionNames = new ArrayList<>();
partitionNames.add("ds=1/hr=1");
partitionNames.add("ds=1/hr=2");
// Find the common path for these partitions
Optional<Path> commonDirectory =
CopyPartitionsTask.findCommonDirectory(tableSpec, specToPartition);
ParallelJobExecutor jobExecutor = new ParallelJobExecutor(1);
jobExecutor.start();
// Copy the partition
CopyPartitionsTask copyPartitionsTask =
new CopyPartitionsTask(conf, destinationObjectFactory, conflictHandler, srcCluster,
destCluster, tableSpec, partitionNames, commonDirectory, jobExecutor, directoryCopier);
RunInfo status = copyPartitionsTask.runTask();
// Verify that the partition got copied
assertEquals(RunInfo.RunStatus.SUCCESSFUL, status.getRunStatus());
assertTrue(ReplicationUtils.exists(destMetastore, partitionSpec1));
assertTrue(ReplicationUtils.exists(destMetastore, partitionSpec2));
assertFalse(ReplicationUtils.exists(destMetastore, partitionSpec3));
assertFalse(ReplicationUtils.exists(destMetastore, partitionSpec4));
assertFalse(ReplicationUtils.exists(destMetastore, partitionSpec5));
assertEquals(18, status.getBytesCopied());
}
}
| 9,472 |
0 | Create_ds/reair/main/src/test/java | Create_ds/reair/main/src/test/java/test/CopyCompleteTableTaskTest.java | package test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import com.airbnb.reair.common.DistCpException;
import com.airbnb.reair.common.FsUtils;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.RunInfo;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.incremental.primitives.CopyCompleteTableTask;
import com.airbnb.reair.multiprocessing.ParallelJobExecutor;
import com.airbnb.reair.utils.ReplicationTestUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map;
public class CopyCompleteTableTaskTest extends MockClusterTest {
private static ParallelJobExecutor jobExecutor = new ParallelJobExecutor(1);
@BeforeClass
public static void setupClass() throws IOException, SQLException {
MockClusterTest.setupClass();
jobExecutor.start();
}
@Test
public void testCopyUnpartitionedTable()
throws ConfigurationException, IOException, HiveMetastoreException, DistCpException {
// Create an unpartitioned table in the source
HiveObjectSpec spec = new HiveObjectSpec("test_db", "test_table");
Table srcTable = ReplicationTestUtils.createUnpartitionedTable(conf, srcMetastore, spec,
TableType.MANAGED_TABLE, srcWarehouseRoot);
// Copy the table
CopyCompleteTableTask copyJob = new CopyCompleteTableTask(conf, destinationObjectFactory,
conflictHandler, srcCluster, destCluster, spec, ReplicationUtils.getLocation(srcTable),
jobExecutor, directoryCopier);
RunInfo status = copyJob.runTask();
// Verify that the table exists on the destination, the location is
// within the destination filesystem, the data is the same,
// and the right number of bytes were copied.
assertEquals(RunInfo.RunStatus.SUCCESSFUL, status.getRunStatus());
Table destTable = destMetastore.getTable(spec.getDbName(), spec.getTableName());
assertNotNull(destTable);
assertTrue(destTable.getSd().getLocation().startsWith(destCluster.getFsRoot() + "/"));
assertTrue(FsUtils.equalDirs(conf, new Path(srcTable.getSd().getLocation()),
new Path(destTable.getSd().getLocation())));
assertEquals(9, status.getBytesCopied());
// Verify that doing a copy again is a no-op
RunInfo rerunStatus = copyJob.runTask();
assertEquals(RunInfo.RunStatus.SUCCESSFUL, rerunStatus.getRunStatus());
assertEquals(0, rerunStatus.getBytesCopied());
}
@Test
public void testCopyPartitionedTable()
throws ConfigurationException, IOException, HiveMetastoreException, DistCpException {
// Create a partitioned table in the source
HiveObjectSpec tableSpec = new HiveObjectSpec("test_db", "test_table");
Table srcTable = ReplicationTestUtils.createPartitionedTable(conf, srcMetastore, tableSpec,
TableType.MANAGED_TABLE, srcWarehouseRoot);
// Create several partitions in the source table
HiveObjectSpec partitionSpec1 = new HiveObjectSpec("test_db", "test_table", "ds=1/hr=1");
HiveObjectSpec partitionSpec2 = new HiveObjectSpec("test_db", "test_table", "ds=1/hr=2");
HiveObjectSpec partitionSpec3 = new HiveObjectSpec("test_db", "test_table", "ds=1/hr=3");
Partition srcPartition1 =
ReplicationTestUtils.createPartition(conf, srcMetastore, partitionSpec1);
Partition srcPartition2 =
ReplicationTestUtils.createPartition(conf, srcMetastore, partitionSpec2);
Partition srcPartition3 =
ReplicationTestUtils.createPartition(conf, srcMetastore, partitionSpec3);
Map<HiveObjectSpec, Partition> specToPartition = new HashMap<>();
specToPartition.put(partitionSpec1, srcPartition1);
specToPartition.put(partitionSpec2, srcPartition2);
specToPartition.put(partitionSpec3, srcPartition3);
// Copy the partition
CopyCompleteTableTask copyCompleteTableTask = new CopyCompleteTableTask(conf,
destinationObjectFactory, conflictHandler, srcCluster, destCluster, tableSpec,
ReplicationUtils.getLocation(srcTable), jobExecutor, directoryCopier);
RunInfo status = copyCompleteTableTask.runTask();
// Verify that the partition got copied
assertEquals(RunInfo.RunStatus.SUCCESSFUL, status.getRunStatus());
assertTrue(ReplicationUtils.exists(destMetastore, partitionSpec1));
assertTrue(ReplicationUtils.exists(destMetastore, partitionSpec2));
assertTrue(ReplicationUtils.exists(destMetastore, partitionSpec3));
assertEquals(27, status.getBytesCopied());
}
}
| 9,473 |
0 | Create_ds/reair/main/src/test/java | Create_ds/reair/main/src/test/java/test/MockClusterTest.java | package test;
import com.airbnb.reair.incremental.DirectoryCopier;
import com.airbnb.reair.incremental.configuration.Cluster;
import com.airbnb.reair.incremental.configuration.DestinationObjectFactory;
import com.airbnb.reair.incremental.configuration.ObjectConflictHandler;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.MiniYARNCluster;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.rules.TemporaryFolder;
import java.io.IOException;
import java.sql.SQLException;
public abstract class MockClusterTest {
private static final Log LOG = LogFactory.getLog(MockClusterTest.class);
protected static MockHiveMetastoreClient srcMetastore;
protected static MockHiveMetastoreClient destMetastore;
protected static YarnConfiguration conf;
protected static MiniYARNCluster miniCluster;
protected static Cluster srcCluster;
protected static Cluster destCluster;
protected static DirectoryCopier directoryCopier;
protected static ObjectConflictHandler conflictHandler;
protected static DestinationObjectFactory destinationObjectFactory;
// Temporary directories on the local filesystem that we'll treat as the
// source and destination filesystems
@Rule
public TemporaryFolder srcLocalTmp = new TemporaryFolder();
@Rule
public TemporaryFolder destLocalTmp = new TemporaryFolder();
protected Path srcWarehouseRoot;
protected Path destWarehouseRoot;
/**
* Sets up this class for use in unit testing. It spins up the YARN mini-cluster and also sets up
* various default classes.
*
* @throws IOException if there's an error accessing the local filesystem
* @throws SQLException if there's an error querying the embedded DB
*/
@BeforeClass
public static void setupClass() throws IOException, SQLException {
conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64);
conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
miniCluster = new MiniYARNCluster("test", 1, 1, 1);
miniCluster.init(conf);
miniCluster.start();
conflictHandler = new ObjectConflictHandler();
conflictHandler.setConf(conf);
destinationObjectFactory = new DestinationObjectFactory();
destinationObjectFactory.setConf(conf);
}
/**
* Setup to do for each unit test.
*
* @throws IOException if there's an error accessing the local file system
*/
@Before
public void setUp() throws IOException {
srcMetastore = new MockHiveMetastoreClient();
destMetastore = new MockHiveMetastoreClient();
srcLocalTmp.create();
destLocalTmp.create();
final Path srcFsRoot = new Path("file://" + srcLocalTmp.getRoot().getAbsolutePath());
final Path destFsRoot = new Path("file://" + destLocalTmp.getRoot().getAbsolutePath());
srcWarehouseRoot = new Path(makeFileUri(srcLocalTmp), "warehouse");
destWarehouseRoot = new Path(makeFileUri(destLocalTmp), "warehouse");
srcWarehouseRoot.getFileSystem(conf).mkdirs(srcWarehouseRoot);
destWarehouseRoot.getFileSystem(conf).mkdirs(destWarehouseRoot);
LOG.info(String.format("src root: %s, dest root: %s", srcWarehouseRoot, destWarehouseRoot));
final Path srcTmp = new Path(makeFileUri(this.srcLocalTmp), "tmp");
final Path destTmp = new Path(makeFileUri(this.destLocalTmp), "tmp");
srcCluster = new MockCluster("src_cluster", srcMetastore, srcFsRoot, srcTmp);
destCluster = new MockCluster("dest_cluster", destMetastore, destFsRoot, destTmp);
// Disable checking of modified times as the local filesystem does not
// support this
directoryCopier = new DirectoryCopier(conf, destCluster.getTmpDir(), false);
}
@AfterClass
public static void tearDownClass() {
miniCluster.stop();
}
protected static Path makeFileUri(TemporaryFolder directory) {
return new Path("file://" + directory.getRoot().getAbsolutePath());
}
}
| 9,474 |
0 | Create_ds/reair/main/src/test/java | Create_ds/reair/main/src/test/java/test/ExchangePartitionParserTest.java | package test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.incremental.ExchangePartitionParser;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.Test;
public class ExchangePartitionParserTest {
private static final Log LOG = LogFactory.getLog(ExchangePartitionParserTest.class);
@Test
public void testParse1() {
String query = "ALTER TABLE test_db.test_table_exchange_to EXCHANGE "
+ "PARTITION( ds='1', hr = '2') "
+ "WITH TABLE test_db.test_table_exchange_from";
ExchangePartitionParser parser = new ExchangePartitionParser();
boolean parsed = parser.parse(query);
assertTrue(parsed);
assertEquals(new HiveObjectSpec("test_db", "test_table_exchange_from"),
parser.getExchangeFromSpec());
assertEquals(new HiveObjectSpec("test_db", "test_table_exchange_to"),
parser.getExchangeToSpec());
assertEquals("ds=1/hr=2", parser.getPartitionName());
}
@Test
public void testParse2() {
String query = "ALTER TABLE test_table_exchange_to EXCHANGE " + "PARTITION(ds='1', hr='2') "
+ "WITH TABLE test_table_exchange_from";
ExchangePartitionParser parser = new ExchangePartitionParser();
boolean parsed = parser.parse(query);
assertTrue(parsed);
assertEquals(new HiveObjectSpec("default", "test_table_exchange_from"),
parser.getExchangeFromSpec());
assertEquals(new HiveObjectSpec("default", "test_table_exchange_to"),
parser.getExchangeToSpec());
assertEquals("ds=1/hr=2", parser.getPartitionName());
}
}
| 9,475 |
0 | Create_ds/reair/main/src/test/java | Create_ds/reair/main/src/test/java/test/BatchReplicationTest.java | package test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import com.airbnb.reair.batch.hdfs.ReplicationJob;
import com.airbnb.reair.batch.hive.MetastoreReplicationJob;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.deploy.ConfigurationKeys;
import com.airbnb.reair.utils.ReplicationTestUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.util.ToolRunner;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.sql.SQLException;
/**
* Unit test for batch replication.
*/
public class BatchReplicationTest extends MockClusterTest {
/**
* Sets up this class to have the right defaults for the batch replication test.
*
* @throws IOException if there's an error writing to files
* @throws SQLException if there's an error querying the embedded DB
*/
@BeforeClass
public static void setupClass() throws IOException, SQLException {
MockClusterTest.setupClass();
conf.setBoolean(MRJobConfig.MAP_SPECULATIVE, false);
conf.setBoolean(MRJobConfig.REDUCE_SPECULATIVE, false);
}
@Test
public void testCopyNewTables() throws Exception {
// Create an unpartitioned table in the source
final HiveObjectSpec spec = new HiveObjectSpec("test", "test_table");
final Table srcTable = ReplicationTestUtils.createUnpartitionedTable(conf,
srcMetastore,
spec,
TableType.MANAGED_TABLE,
srcWarehouseRoot);
// Create a partitioned table in the source
final HiveObjectSpec tableSpec = new HiveObjectSpec("test", "partitioned_table");
final Table srcTable2 = ReplicationTestUtils.createPartitionedTable(conf,
srcMetastore,
tableSpec,
TableType.MANAGED_TABLE,
srcWarehouseRoot);
// Create several partitions in the source table
HiveObjectSpec partitionSpec1 = new HiveObjectSpec("test",
"partitioned_table", "ds=1/hr=1");
HiveObjectSpec partitionSpec2 = new HiveObjectSpec("test",
"partitioned_table", "ds=1/hr=2");
HiveObjectSpec partitionSpec3 = new HiveObjectSpec("test",
"partitioned_table", "ds=1/hr=3");
final Partition srcPartition1 = ReplicationTestUtils.createPartition(conf,
srcMetastore, partitionSpec1);
final Partition srcPartition2 = ReplicationTestUtils.createPartition(conf,
srcMetastore, partitionSpec2);
final Partition srcPartition3 = ReplicationTestUtils.createPartition(conf,
srcMetastore, partitionSpec3);
JobConf jobConf = new JobConf(conf);
String[] args = {};
jobConf.set(ConfigurationKeys.BATCH_JOB_OUTPUT_DIR,
new Path(destCluster.getFsRoot(), "test_output").toString());
jobConf.set(ConfigurationKeys.BATCH_JOB_CLUSTER_FACTORY_CLASS,
MockClusterFactory.class.getName());
ToolRunner.run(jobConf, new MetastoreReplicationJob(), args);
assertTrue(ReplicationUtils.exists(destMetastore, spec));
Table dstTable = destMetastore.getTable(spec.getDbName(), spec.getTableName());
assertTrue(directoryCopier.equalDirs(new Path(srcTable.getSd().getLocation()),
new Path(dstTable.getSd().getLocation())));
assertTrue(ReplicationUtils.exists(destMetastore, partitionSpec1));
assertTrue(ReplicationUtils.exists(destMetastore, partitionSpec2));
assertTrue(ReplicationUtils.exists(destMetastore, partitionSpec3));
Partition dstPartition1 = destMetastore.getPartition(partitionSpec1.getDbName(),
partitionSpec1.getTableName(),
partitionSpec1.getPartitionName());
assertTrue(directoryCopier.equalDirs(new Path(srcPartition1.getSd().getLocation()),
new Path(dstPartition1.getSd().getLocation())));
Partition dstPartition2 = destMetastore.getPartition(partitionSpec2.getDbName(),
partitionSpec2.getTableName(),
partitionSpec2.getPartitionName());
assertTrue(directoryCopier.equalDirs(new Path(srcPartition2.getSd().getLocation()),
new Path(dstPartition2.getSd().getLocation())));
Partition dstPartition3 = destMetastore.getPartition(partitionSpec3.getDbName(),
partitionSpec3.getTableName(),
partitionSpec3.getPartitionName());
assertTrue(directoryCopier.equalDirs(new Path(srcPartition3.getSd().getLocation()),
new Path(dstPartition3.getSd().getLocation())));
ReplicationTestUtils.dropTable(srcMetastore, spec);
ReplicationTestUtils.dropPartition(srcMetastore, partitionSpec2);
assertEquals(0, ToolRunner.run(jobConf, new MetastoreReplicationJob(), args));
assertTrue(!ReplicationUtils.exists(destMetastore, partitionSpec2));
}
@Test
public void testHdfsCopy() throws Exception {
// Create an unpartitioned table in the source
HiveObjectSpec spec = new HiveObjectSpec("test", "test_table");
Table srcTable = ReplicationTestUtils.createUnpartitionedTable(conf,
srcMetastore,
spec,
TableType.MANAGED_TABLE,
srcWarehouseRoot);
// Create a partitioned table in the source
HiveObjectSpec tableSpec = new HiveObjectSpec("test", "partitioned_table");
Table srcTable2 = ReplicationTestUtils.createPartitionedTable(conf,
srcMetastore,
tableSpec,
TableType.MANAGED_TABLE,
srcWarehouseRoot);
// Create several partitions in the source table
HiveObjectSpec partitionSpec1 = new HiveObjectSpec("test",
"partitioned_table", "ds=1/hr=1");
HiveObjectSpec partitionSpec2 = new HiveObjectSpec("test",
"partitioned_table", "ds=1/hr=2");
HiveObjectSpec partitionSpec3 = new HiveObjectSpec("test",
"partitioned_table", "ds=1/hr=3");
Partition srcPartition1 = ReplicationTestUtils.createPartition(conf,
srcMetastore, partitionSpec1);
Partition srcPartition2 = ReplicationTestUtils.createPartition(conf,
srcMetastore, partitionSpec2);
Partition srcPartition3 = ReplicationTestUtils.createPartition(conf,
srcMetastore, partitionSpec3);
JobConf jobConf = new JobConf(conf);
String[] args = {"--" + ReplicationJob.SOURCE_DIRECTORY_ARG, srcWarehouseRoot.toString(),
"--" + ReplicationJob.DESTINATION_DIRECTORY_ARG, destWarehouseRoot.toString(),
"--" + ReplicationJob.LOG_DIRECTORY_ARG,
new Path(destCluster.getFsRoot(), "log").toString(),
"--" + ReplicationJob.TEMP_DIRECTORY_ARG, destCluster.getTmpDir().toString(),
"--" + ReplicationJob.OPERATIONS_ARG, "a,d,u"};
assertEquals(0, ToolRunner.run(jobConf, new ReplicationJob(), args));
assertTrue(directoryCopier.equalDirs(srcWarehouseRoot, destWarehouseRoot));
}
}
| 9,476 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/OnStateChangeHandler.java | package com.airbnb.reair.incremental;
/**
* Handler that fires when replication jobs change states.
*/
public interface OnStateChangeHandler {
/**
* Method to run when the job starts.
*
* @param replicationJob job that started
*
* @throws StateUpdateException if there's an error updating the state
*/
void onStart(ReplicationJob replicationJob) throws StateUpdateException;
/**
* Method to run when the job completes.
*
* @param runInfo information about how the job ran
* @param replicationJob the job that completed
*
* @throws StateUpdateException if there's an error updating the state
*/
void onComplete(RunInfo runInfo, ReplicationJob replicationJob) throws StateUpdateException;
}
| 9,477 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/ReplicationStatus.java | package com.airbnb.reair.incremental;
/**
* States that a replication job can be in.
*/
public enum ReplicationStatus {
// Created, but not yet running.
PENDING,
// Executing operations.
RUNNING,
// Finished running with a success.
SUCCESSFUL,
// Finished running but with a failure.
FAILED,
// A job that is not possible to complete. For example, trying to copy a non-existent table.
NOT_COMPLETABLE,
// A job that was aborted and should not be run again.
ABORTED,
}
| 9,478 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/ExchangePartitionParser.java | package com.airbnb.reair.incremental;
import com.airbnb.reair.common.HiveObjectSpec;
import org.apache.commons.lang.StringUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Parses exchange partition query commands. Since it uses a regex, there are cases that won't be
* handled properly. See warnings below for details.
*
* <p>TODO: Deprecate once HIVE-12865 is resolved
*/
public class ExchangePartitionParser {
private static String EXCHANGE_REGEX =
"\\s*ALTER\\s+TABLE\\s+" + "(?<exchangeToTable>\\S+)\\s+EXCHANGE\\s+PARTITION\\s*\\(\\s*"
+ "(?<partitionSpec>.*)\\)\\s+WITH\\s+TABLE\\s+" + "(?<exchangeFromTable>\\S+)\\s*";
private HiveObjectSpec exchangeFromTableSpec;
private HiveObjectSpec exchangeToTableSpec;
private String partitionName;
private List<String> partitionValues;
/**
* Parse the query and store relevant attributes.
*
* @param query the query to parse
* @return whether query was successfully parsed
*/
public boolean parse(String query) {
Matcher matcher = Pattern.compile(EXCHANGE_REGEX, Pattern.CASE_INSENSITIVE).matcher(query);
if (!matcher.matches()) {
return false;
}
String exchangeFromTable = matcher.group("exchangeFromTable");
String exchangeToTable = matcher.group("exchangeToTable");
String partitionSpec = matcher.group("partitionSpec");
exchangeFromTableSpec = getSpec(exchangeFromTable);
exchangeToTableSpec = getSpec(exchangeToTable);
partitionName = getPartitionName(partitionSpec);
partitionValues = getPartitionValues(partitionSpec);
return true;
}
/**
* Parse the string representation of a Hive object specification into HiveObjectSpec.
*
* @param spec table specification in the form "db.table"
* @return HiveObjectSpec represented by the input string
*/
private HiveObjectSpec getSpec(String spec) {
String[] specSplit = spec.split("\\.");
if (specSplit.length == 1) {
// Warning! Assumes default DB.
return new HiveObjectSpec("default", specSplit[0]);
} else if (specSplit.length == 2) {
return new HiveObjectSpec(specSplit[0], specSplit[1]);
} else {
throw new RuntimeException(
"Unexpected split from " + spec + " to " + Arrays.asList(specSplit));
}
}
private String trimWhitespace(String str) {
str = StringUtils.stripEnd(str, " \t\n");
str = StringUtils.stripStart(str, " \t\n");
return str;
}
/**
* Get the partition that is being exchanged by this query.
*
* @return the name of the partition that is exchanged by the previously parsed query
*/
public String getPartitionName() {
return partitionName;
}
/**
* Get the partition names from a partition spec. E.g. "PARTITION(ds='1')" => "ds=1"
*
* @param partitionSpec a partition specification in the form "ds=1, hr=2"
*
* @return the partition spec converted to that name
*/
private String getPartitionName(String partitionSpec) {
// Warning - incorrect for cases where there are commas in values and
// other corner cases.
String[] partitionSpecSplit = partitionSpec.split(",");
StringBuilder sb = new StringBuilder();
for (String columnSpec : partitionSpecSplit) {
columnSpec = trimWhitespace(columnSpec);
// columnSpec should be something of the form ds='1'
String[] columnSpecSplit = columnSpec.split("=");
if (columnSpecSplit.length != 2) {
throw new RuntimeException("Unexpected column spec " + columnSpec);
}
if (sb.length() != 0) {
sb.append("/");
}
String partitionColumnName = trimWhitespace(columnSpecSplit[0]);
String partitionColumnValue = trimWhitespace(columnSpecSplit[1]).replace("'", "");
sb.append(partitionColumnName);
sb.append("=");
sb.append(partitionColumnValue);
}
return sb.toString();
}
/**
* Get the partition that is being exchanged by this query.
*
* @return the partition values of the partition that is exchanged by the previously parsed query
*/
public List<String> getPartitionValues() {
return partitionValues;
}
private List<String> getPartitionValues(String partitionSpec) {
// Warning - incorrect for cases where there are commas in values and
// other corner cases.
String[] partitionSpecSplit = partitionSpec.split(",");
List<String> partitionValues = new ArrayList<>();
for (String columnSpec : partitionSpecSplit) {
columnSpec = StringUtils.stripEnd(columnSpec, " \t\n");
columnSpec = StringUtils.stripStart(columnSpec, " \t\n");
// columnSpec should be something of the form ds='1'
String[] columnSpecSplit = columnSpec.split("=");
if (columnSpecSplit.length != 2) {
throw new RuntimeException("Unexpected column spec " + columnSpec);
}
String partitionColumnValue = columnSpecSplit[1].replace("'", "");
partitionValues.add(partitionColumnValue);
}
return partitionValues;
}
/**
* Get the specification for the object that the query exchanged from.
*
* @return specification for the object that the query exchanged from.
*/
public HiveObjectSpec getExchangeFromSpec() {
return exchangeFromTableSpec;
}
/**
* Get the specification for the object that the query exchanged to.
*
* @return specification for the object that the query exchanged to.
*/
public HiveObjectSpec getExchangeToSpec() {
return exchangeToTableSpec;
}
}
| 9,479 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/ReplicationJobRegistry.java | package com.airbnb.reair.incremental;
import static com.airbnb.reair.incremental.auditlog.MetricNames.REPLICATION_JOBS_AGE_COUNT;
import com.airbnb.reair.incremental.deploy.ConfigurationKeys;
import com.timgroup.statsd.StatsDClient;
import org.apache.hadoop.conf.Configuration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.Map;
import java.util.TreeMap;
import java.util.stream.Collector;
/**
* Keeps track of a set of jobs.
*/
public class ReplicationJobRegistry {
// Report number of job above each threshold (in seconds)
private static final long[] DEFAULT_THRESHOLDS = {1800, 3600, 7200, 10800, 21600};
private static long MAX_RETIRED_JOBS = 200;
private StatsDClient statsDClient;
private long[] thresholds;
TreeMap<Long, ReplicationJob> idToReplicationJob = new TreeMap<>();
LinkedList<ReplicationJob> retiredJobs = new LinkedList<>();
/**
*
* @param conf Configuration for Reair.
* @param statsDClient A statsDClient.
*/
public ReplicationJobRegistry(Configuration conf, StatsDClient statsDClient) {
this.statsDClient = statsDClient;
String thresholdString = conf.get(ConfigurationKeys.REPLICATION_JOB_METRIC_THRESHOLDS, null);
if (thresholdString != null) {
String[] splitString = thresholdString.trim().split(",");
thresholds = Arrays.stream(splitString).mapToLong(Long::parseLong).toArray();
} else {
this.thresholds = DEFAULT_THRESHOLDS;
}
}
public synchronized void registerJob(ReplicationJob job) {
idToReplicationJob.put(job.getId(), job);
}
public synchronized ReplicationJob getJob(long id) {
return idToReplicationJob.get(id);
}
/**
* Get the job with the smallest ID value in the registry. The job with the smallest ID is
* generally the oldest job.
*
* @return the job with the smallest ID in the registry
*/
public synchronized ReplicationJob getJobWithSmallestId() {
if (idToReplicationJob.size() == 0) {
return null;
} else {
return idToReplicationJob.firstEntry().getValue();
}
}
/**
*
* @return a collection containing all the active replication jobs. The jobs are returned ordered
* by id ascending.
*/
public synchronized Collection<ReplicationJob> getActiveJobs() {
return new ArrayList<>(idToReplicationJob.values());
}
/**
* Remove this job from the main internal data structures to another retired job datastructure.
*
* @param job the job to remove
*/
public synchronized void retireJob(ReplicationJob job) {
ReplicationJob removedJob = idToReplicationJob.remove(job.getId());
if (removedJob == null) {
throw new RuntimeException("Couldn't find id: " + job.getId() + " in the registry!");
}
if (removedJob != job) {
throw new RuntimeException("Replication jobs with the same ID " + "are not equal: %s and %s");
}
// Trim the size of the list so that we exceed the limit.
if (retiredJobs.size() + 1 > MAX_RETIRED_JOBS) {
retiredJobs.remove(0);
}
retiredJobs.add(removedJob);
}
public synchronized Collection<ReplicationJob> getRetiredJobs() {
return new ArrayList<>(retiredJobs);
}
/**
* Report stats on the age of replication jobs based on thresholds in seconds.
* If the jobs have a delay of 1, 5, 10 seconds, and the thresholds are {2, 6}, we would report
* {2: 2, 6: 1}
*/
public synchronized void reportStats() {
long now = System.currentTimeMillis();
Map<Long, Integer> mapCount = new HashMap<>();
for (Long value: thresholds) {
mapCount.put(value, 0);
}
for (ReplicationJob job : idToReplicationJob.values()) {
for (Long value: thresholds) {
if ((now - job.getPersistedJobInfo().getCreateTime()) / 1000 > value) {
mapCount.put(value, mapCount.get(value) + 1);
}
}
}
for (Map.Entry<Long, Integer> val: mapCount.entrySet()) {
statsDClient.gauge(String.format(REPLICATION_JOBS_AGE_COUNT, val.getKey()), val.getValue());
}
}
}
| 9,480 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/ReplicationCounters.java | package com.airbnb.reair.incremental;
import static com.airbnb.reair.incremental.auditlog.MetricNames.REPLICATION_STATUS_COUNT;
import com.timgroup.statsd.StatsDClient;
import java.util.HashMap;
import java.util.Map;
/**
* Counters used to track the progress of replication within the app.
*/
public class ReplicationCounters {
public enum Type {
// Tasks that have completed successfully
SUCCESSFUL_TASKS,
// Tasks that aren't completable (e.g. missing source table), but are
// otherwise finished.
NOT_COMPLETABLE_TASKS,
// Tasks that were submitted to run.
EXECUTION_SUBMITTED_TASKS,
// Tasks that failed to execute. This shouldn't happen in normal
// operation.
FAILED_TASKS
}
private Map<Type, Long> counters;
private StatsDClient statsDClient;
public ReplicationCounters(StatsDClient client) {
counters = new HashMap<>();
statsDClient = client;
}
/**
* Increment the count for the given counter type.
*
* @param type the type of counter
*/
public synchronized void incrementCounter(Type type) {
long currentCount = 0;
if (counters.get(type) != null) {
currentCount = counters.get(type);
}
counters.put(type, currentCount + 1);
statsDClient.incrementCounter(
String.format(REPLICATION_STATUS_COUNT, type.toString().toLowerCase()));
}
/**
* Get the value of the given counter.
*
* @param type the type of counter
* @return the value of the counter
*/
public synchronized long getCounter(Type type) {
long currentCount = 0;
if (counters.get(type) != null) {
currentCount = counters.get(type);
}
return currentCount;
}
/**
* Reset all counters to 0.
*/
public synchronized void clear() {
counters.clear();
}
}
| 9,481 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/ReplicationJob.java | package com.airbnb.reair.incremental;
import com.airbnb.reair.common.DistCpException;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.incremental.db.PersistedJobInfo;
import com.airbnb.reair.incremental.deploy.ConfigurationKeys;
import com.airbnb.reair.incremental.primitives.ReplicationTask;
import com.airbnb.reair.multiprocessing.Job;
import com.airbnb.reair.multiprocessing.LockSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Optional;
import java.util.Set;
/**
* A job that performs a replication task and can be executed in parallel though the
* ParallelJobExecutor.
*/
public class ReplicationJob extends Job {
private static final Log LOG = LogFactory.getLog(ReplicationJob.class);
// Default number of times to retry a job if it fails.
public static final int DEFAULT_JOB_RETRIES = 8;
private Configuration conf;
private ReplicationTask replicationTask;
private OnStateChangeHandler onStateChangeHandler;
private PersistedJobInfo persistedJobInfo;
/**
* Constructor for a replication job that can be run in the ParallelJobExecutor.
*
* @param replicationTask the task that this job should run
* @param onStateChangeHandler The handler to run when the state of this job changes. E.g. start
* or finish.
* @param persistedJobInfo the PersistedJobInfo that should be associated with this job
*/
public ReplicationJob(
Configuration conf,
ReplicationTask replicationTask,
OnStateChangeHandler onStateChangeHandler,
PersistedJobInfo persistedJobInfo) {
this.conf = conf;
this.replicationTask = replicationTask;
this.onStateChangeHandler = onStateChangeHandler;
this.persistedJobInfo = persistedJobInfo;
}
public PersistedJobInfo getPersistedJobInfo() {
return persistedJobInfo;
}
@Override
public int run() {
int maxAttempts = 1 + Math.max(0, conf.getInt(ConfigurationKeys.JOB_RETRIES, 8));
for (int attempt = 0; attempt < maxAttempts; attempt++) {
try {
onStateChangeHandler.onStart(this);
RunInfo runInfo = replicationTask.runTask();
LOG.info(String.format("Replication job id: %s finished " + "with status %s",
persistedJobInfo.getId(), runInfo.getRunStatus()));
onStateChangeHandler.onComplete(runInfo, this);
switch (runInfo.getRunStatus()) {
case SUCCESSFUL:
case NOT_COMPLETABLE:
return 0;
case FAILED:
return -1;
default:
throw new RuntimeException("State not handled: " + runInfo.getRunStatus());
}
} catch (HiveMetastoreException | IOException | DistCpException e) {
LOG.error("Got an exception!", e);
} catch (StateUpdateException | ConfigurationException e) {
// Indicates an error with the system - fail the job.
LOG.error("Got an exception!", e);
return -1;
}
if (attempt == maxAttempts - 1) {
break;
}
LOG.error("Because job id: " + getId() + " was not successful, "
+ "it will be retried after sleeping.");
try {
ReplicationUtils.exponentialSleep(attempt);
} catch (InterruptedException e) {
LOG.warn("Got interrupted", e);
return -1;
}
attempt++;
}
return -1;
}
@Override
public LockSet getRequiredLocks() {
return replicationTask.getRequiredLocks();
}
@Override
public String toString() {
return "ReplicationJob{" + "persistedJobInfo=" + persistedJobInfo + "}";
}
/**
* Get the ID associated with this job.
*
* @return this job's ID
*/
public long getId() {
return persistedJobInfo.getId();
}
/**
* Get the time when this job was created.
*
* @return the create time
*/
public long getCreateTime() {
Optional<String> createTime = Optional.ofNullable(
getPersistedJobInfo().getExtras().get(PersistedJobInfo.AUDIT_LOG_ENTRY_CREATE_TIME_KEY));
return createTime.map(Long::parseLong).orElse(Long.valueOf(0));
}
/**
* Get the ID's for the jobs that this job is waiting on.
*
* @return a list of job ID's that this job is waiting on
*/
public Collection<Long> getParentJobIds() {
Set<Job> parentJobs = getParentJobs();
List<Long> parentJobIds = new ArrayList<>();
for (Job parentJob : parentJobs) {
// Generally good to avoid casting, but done here since the getId()
// is not a part of the Job class.
ReplicationJob replicationJob = (ReplicationJob) parentJob;
parentJobIds.add(replicationJob.getId());
}
return parentJobIds;
}
}
| 9,482 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/StateUpdateException.java | package com.airbnb.reair.incremental;
/**
* Exception thrown when there is an error updating the state of a job.
*/
public class StateUpdateException extends Exception {
public StateUpdateException() {
super();
}
public StateUpdateException(String message) {
super(message);
}
public StateUpdateException(String message, Throwable cause) {
super(message, cause);
}
public StateUpdateException(Throwable cause) {
super(cause);
}
}
| 9,483 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/MetadataException.java | package com.airbnb.reair.incremental;
/**
* Exception related to Hive metadata.
*/
public class MetadataException extends Exception {
public MetadataException(String message) {
super(message);
}
public MetadataException(String message, Throwable cause) {
super(message, cause);
}
public MetadataException(Throwable cause) {
super(cause);
}
}
| 9,484 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/StatsTracker.java | package com.airbnb.reair.incremental;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.util.Timer;
import java.util.TimerTask;
/**
* Periodically prints stats for the replication process to the log.
*/
public class StatsTracker {
private static final Log LOG = LogFactory.getLog(StatsTracker.class);
// By default print the stats every 10 seconds
private static final long PRINT_TIME_INTERVAL = 10 * 1000;
private ReplicationJobRegistry jobRegistry;
private Timer timer = new Timer(true);
private volatile long lastCalculatedLag = 0;
/**
* Constructor for a stats tracker.
*
* @param jobRegistry the job registry to query
*/
public StatsTracker(ReplicationJobRegistry jobRegistry) {
this.jobRegistry = jobRegistry;
}
/**
* Start printing out stats about the oldest entry.
*/
public void start() {
timer.scheduleAtFixedRate(new TimerTask() {
@Override
public void run() {
ReplicationJob jobWithSmallestId = jobRegistry.getJobWithSmallestId();
if (jobWithSmallestId == null) {
LOG.debug("Oldest ID: N/A Age: 0");
lastCalculatedLag = 0;
} else {
long currentTime = System.currentTimeMillis();
long createTime = jobWithSmallestId.getCreateTime();
String age = "N/A";
if (createTime != 0) {
age = String.format("%.2f hrs", (currentTime - createTime) / 3600.0 / 1000.0);
lastCalculatedLag = currentTime - createTime;
} else {
lastCalculatedLag = 0;
}
LOG.debug(String.format("Oldest ID: %s Age: %s", jobWithSmallestId.getId(), age));
}
}
}, 0, PRINT_TIME_INTERVAL);
}
/**
* Get the most recently calculated lag. The lag is defined as the time difference between now and
* the old entry that is not done.
*
* @return the calculated lag.
*/
public long getLastCalculatedLag() {
return lastCalculatedLag;
}
}
| 9,485 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/ReplicationJobFactory.java | package com.airbnb.reair.incremental;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.common.HiveUtils;
import com.airbnb.reair.common.NamedPartition;
import com.airbnb.reair.hive.hooks.HiveOperation;
import com.airbnb.reair.incremental.auditlog.AuditLogEntry;
import com.airbnb.reair.incremental.configuration.Cluster;
import com.airbnb.reair.incremental.configuration.DestinationObjectFactory;
import com.airbnb.reair.incremental.configuration.ObjectConflictHandler;
import com.airbnb.reair.incremental.db.PersistedJobInfo;
import com.airbnb.reair.incremental.db.PersistedJobInfoStore;
import com.airbnb.reair.incremental.filter.ReplicationFilter;
import com.airbnb.reair.incremental.primitives.CopyPartitionTask;
import com.airbnb.reair.incremental.primitives.CopyPartitionedTableTask;
import com.airbnb.reair.incremental.primitives.CopyPartitionsTask;
import com.airbnb.reair.incremental.primitives.CopyUnpartitionedTableTask;
import com.airbnb.reair.incremental.primitives.DropPartitionTask;
import com.airbnb.reair.incremental.primitives.DropTableTask;
import com.airbnb.reair.incremental.primitives.RenamePartitionTask;
import com.airbnb.reair.incremental.primitives.RenameTableTask;
import com.airbnb.reair.incremental.primitives.ReplicationTask;
import com.airbnb.reair.multiprocessing.ParallelJobExecutor;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
/**
* Creates replication jobs and persists initial information into the DB.
*/
public class ReplicationJobFactory {
private static final Log LOG = LogFactory.getLog(ReplicationJobFactory.class);
private Configuration conf;
private Cluster srcCluster;
private Cluster destCluster;
private DestinationObjectFactory destinationObjectFactory;
private OnStateChangeHandler onStateChangeHandler;
private ObjectConflictHandler objectConflictHandler;
private ParallelJobExecutor copyPartitionJobExecutor;
private DirectoryCopier directoryCopier;
private PersistedJobInfoStore persistedJobInfoStore;
/**
* Constructor.
*
* @param conf configuration
* @param srcCluster source cluster
* @param destCluster destination cluster
* @param persistedJobInfoStore PersistedJobInfoStore
* @param destinationObjectFactory factory for creating objects for the destination cluster
* @param onStateChangeHandler handler for when a job's state changes
* @param objectConflictHandler handler for addressing conflicting tables/partitions on the
* destination cluster
* @param copyPartitionJobExecutor executor for copying partitions
* @param directoryCopier copies directories using MR jobs
*/
public ReplicationJobFactory(
Configuration conf,
Cluster srcCluster,
Cluster destCluster,
PersistedJobInfoStore persistedJobInfoStore,
DestinationObjectFactory destinationObjectFactory,
OnStateChangeHandler onStateChangeHandler,
ObjectConflictHandler objectConflictHandler,
ParallelJobExecutor copyPartitionJobExecutor,
DirectoryCopier directoryCopier) {
this.conf = conf;
this.srcCluster = srcCluster;
this.destCluster = destCluster;
this.destinationObjectFactory = destinationObjectFactory;
this.onStateChangeHandler = onStateChangeHandler;
this.objectConflictHandler = objectConflictHandler;
this.copyPartitionJobExecutor = copyPartitionJobExecutor;
this.directoryCopier = directoryCopier;
this.persistedJobInfoStore = persistedJobInfoStore;
}
/**
* Create a replication job to copy a table.
*
* @param auditLogId ID of the audit log entry where this table was created
* @param auditLogEntryCreateTime when the audit log entry was created
* @param table the table to copy
* @return job to copy the table
*
* @throws StateUpdateException if there's an error writing to the DB
*/
public ReplicationJob createJobForCopyTable(
long auditLogId,
long auditLogEntryCreateTime,
Table table) throws StateUpdateException {
ReplicationOperation replicationOperation =
HiveUtils.isPartitioned(table) ? ReplicationOperation.COPY_PARTITIONED_TABLE
: ReplicationOperation.COPY_UNPARTITIONED_TABLE;
Map<String, String> extras = new HashMap<>();
extras.put(PersistedJobInfo.AUDIT_LOG_ID_EXTRAS_KEY, Long.toString(auditLogId));
extras.put(PersistedJobInfo.AUDIT_LOG_ENTRY_CREATE_TIME_KEY,
Long.toString(auditLogEntryCreateTime));
PersistedJobInfo persistedJobInfo = PersistedJobInfo.createDeferred(
replicationOperation,
ReplicationStatus.PENDING, ReplicationUtils.getLocation(table), srcCluster.getName(),
new HiveObjectSpec(table), Collections.emptyList(), ReplicationUtils.getTldt(table),
Optional.empty(), Optional.empty(), extras);
HiveObjectSpec spec = new HiveObjectSpec(table);
Optional<Path> tableLocation = ReplicationUtils.getLocation(table);
switch (replicationOperation) {
case COPY_UNPARTITIONED_TABLE:
return new ReplicationJob(
conf,
new CopyUnpartitionedTableTask(conf, destinationObjectFactory, objectConflictHandler,
srcCluster, destCluster, spec, tableLocation, directoryCopier, true),
onStateChangeHandler, persistedJobInfo);
case COPY_PARTITIONED_TABLE:
return new ReplicationJob(
conf,
new CopyPartitionedTableTask(conf, destinationObjectFactory, objectConflictHandler,
srcCluster, destCluster, spec, tableLocation),
onStateChangeHandler, persistedJobInfo);
default:
throw new RuntimeException("Unhandled operation " + replicationOperation);
}
}
/**
* Create a replication job to copy a partition.
*
* @param auditLogId ID of the audit log entry where this partition was created
* @param auditLogEntryCreateTime when the audit log entry was created
* @param spec specification for the partition
* @return the job to copy the partition
*
* @throws StateUpdateException if there's an error writing to the DB
*/
public ReplicationJob createJobForCopyPartition(
long auditLogId,
long auditLogEntryCreateTime,
HiveObjectSpec spec) throws StateUpdateException {
Map<String, String> extras = new HashMap<>();
extras.put(PersistedJobInfo.AUDIT_LOG_ID_EXTRAS_KEY, Long.toString(auditLogId));
extras.put(PersistedJobInfo.AUDIT_LOG_ENTRY_CREATE_TIME_KEY,
Long.toString(auditLogEntryCreateTime));
List<String> partitionNames = new ArrayList<>();
partitionNames.add(spec.getPartitionName());
ReplicationOperation replicationOperation = ReplicationOperation.COPY_PARTITION;
PersistedJobInfo persistedJobInfo =
PersistedJobInfo.createDeferred(
replicationOperation, ReplicationStatus.PENDING,
Optional.empty(), srcCluster.getName(), spec, partitionNames,
Optional.empty(), Optional.empty(), Optional.empty(), extras);
ReplicationTask replicationTask = new CopyPartitionTask(conf, destinationObjectFactory,
objectConflictHandler, srcCluster, destCluster, spec, Optional.<Path>empty(),
Optional.<Path>empty(), directoryCopier, true);
return new ReplicationJob(conf, replicationTask, onStateChangeHandler, persistedJobInfo);
}
/**
* Create a replication job to copy a partition.
*
* @param auditLogId ID of the audit log entry where this partition was created
* @param auditLogEntryCreateTime when the audit log entry was created
* @param namedPartition partition to copy
* @return the job to copy the partition
*
* @throws StateUpdateException if there's an error writing to the DB
*/
public ReplicationJob createJobForCopyPartition(
long auditLogId,
long auditLogEntryCreateTime,
NamedPartition namedPartition) throws StateUpdateException {
String partitionName = namedPartition.getName();
List<String> partitionNames = new ArrayList<>();
partitionNames.add(partitionName);
ReplicationOperation replicationOperation = ReplicationOperation.COPY_PARTITION;
Map<String, String> extras = new HashMap<>();
extras.put(PersistedJobInfo.AUDIT_LOG_ID_EXTRAS_KEY, Long.toString(auditLogId));
extras.put(PersistedJobInfo.AUDIT_LOG_ENTRY_CREATE_TIME_KEY,
Long.toString(auditLogEntryCreateTime));
Partition partition = namedPartition.getPartition();
HiveObjectSpec spec = new HiveObjectSpec(namedPartition);
PersistedJobInfo persistedJobInfo =
PersistedJobInfo.createDeferred(
replicationOperation, ReplicationStatus.PENDING,
ReplicationUtils.getLocation(partition), srcCluster.getName(), spec, partitionNames,
ReplicationUtils.getTldt(partition), Optional.empty(), Optional.empty(), extras);
ReplicationTask replicationTask = new CopyPartitionTask(
conf, destinationObjectFactory, objectConflictHandler, srcCluster, destCluster, spec,
ReplicationUtils.getLocation(partition), Optional.empty(), directoryCopier, true);
return new ReplicationJob(conf, replicationTask, onStateChangeHandler, persistedJobInfo);
}
/**
* Create a replication job to copy many partitions that were created by dynamic partitioning.
*
* @param auditLogId ID of the audit log entry where the partitions were created
* @param auditLogEntryCreateTime when the audit log entry was created
* @param namedPartitions partitions to copy
* @return the job to copy all of the specified partitions
*
* @throws StateUpdateException if there's an error writing to the DB
*/
public ReplicationJob createJobForCopyDynamicPartitions(
long auditLogId,
long auditLogEntryCreateTime,
List<NamedPartition> namedPartitions) throws StateUpdateException {
ReplicationOperation replicationOperation = ReplicationOperation.COPY_PARTITIONS;
List<Partition> partitions = NamedPartition.toPartitions(namedPartitions);
List<String> partitionNames = NamedPartition.toNames(namedPartitions);
// The common location is the common path that all the partitions share.
Optional<Path> commonLocation =
ReplicationUtils.getCommonDirectory(ReplicationUtils.getLocations(partitions));
Partition samplePartition = namedPartitions.get(0).getPartition();
HiveObjectSpec tableSpec =
new HiveObjectSpec(samplePartition.getDbName(), samplePartition.getTableName());
Map<String, String> extras = new HashMap<>();
extras.put(PersistedJobInfo.AUDIT_LOG_ID_EXTRAS_KEY, Long.toString(auditLogId));
extras.put(PersistedJobInfo.AUDIT_LOG_ENTRY_CREATE_TIME_KEY,
Long.toString(auditLogEntryCreateTime));
PersistedJobInfo persistedJobInfo =
PersistedJobInfo.createDeferred(
replicationOperation, ReplicationStatus.PENDING, commonLocation,
srcCluster.getName(), tableSpec, partitionNames,
Optional.empty(), Optional.empty(), Optional.empty(), extras);
ReplicationTask replicationTask = new CopyPartitionsTask(conf, destinationObjectFactory,
objectConflictHandler, srcCluster, destCluster, tableSpec, partitionNames, commonLocation,
copyPartitionJobExecutor, directoryCopier);
return new ReplicationJob(conf, replicationTask, onStateChangeHandler, persistedJobInfo);
}
/**
* Create a mapping from a Hive object specification to the Thrift Hive Table object.
*
* @param tables tables to include in the map
* @return a map from the Hive object specification to the Thrift Hive Table object
*/
private Map<HiveObjectSpec, Table> createTableLookupMap(List<Table> tables) {
// Create a map from the table spec to the table object. We'll need this
// for getting the table that a partition belongs to
Map<HiveObjectSpec, Table> specToTable = new HashMap<>();
for (Table table : tables) {
HiveObjectSpec spec = new HiveObjectSpec(table);
specToTable.put(spec, table);
}
return specToTable;
}
/**
* Create a replication job to drop a table.
*
* @param auditLogId ID of the audit log entry where this table was dropped
* @param auditLogEntryCreateTime when the audit log entry was created
* @param table the table to drop
* @return the job to drop the table
*
* @throws StateUpdateException if there's an error writing to the DB
*/
public ReplicationJob createJobForDropTable(
long auditLogId,
long auditLogEntryCreateTime,
Table table) throws StateUpdateException {
ReplicationOperation replicationOperation = ReplicationOperation.DROP_TABLE;
Map<String, String> extras = new HashMap<>();
extras.put(PersistedJobInfo.AUDIT_LOG_ID_EXTRAS_KEY, Long.toString(auditLogId));
extras.put(PersistedJobInfo.AUDIT_LOG_ENTRY_CREATE_TIME_KEY,
Long.toString(auditLogEntryCreateTime));
HiveObjectSpec tableSpec = new HiveObjectSpec(table);
PersistedJobInfo persistedJobInfo =
PersistedJobInfo.createDeferred(
replicationOperation, ReplicationStatus.PENDING,
ReplicationUtils.getLocation(table), srcCluster.getName(), tableSpec,
Collections.emptyList(), ReplicationUtils.getTldt(table), Optional.empty(),
Optional.empty(), extras);
return new ReplicationJob(
conf,
new DropTableTask(
srcCluster,
destCluster,
tableSpec,
ReplicationUtils.getTldt(table)),
onStateChangeHandler,
persistedJobInfo);
}
/**
* Create a job to drop a partition.
*
* @param auditLogId ID of the audit log entry where this partition was dropped
* @param auditLogEntryCreateTime when the audit log entry was created
* @param namedPartition the partition to drop
* @return the job to drop the partition
*
* @throws StateUpdateException if there is an error writing to the DB
*/
public ReplicationJob createJobForDropPartition(
long auditLogId,
long auditLogEntryCreateTime,
NamedPartition namedPartition) throws StateUpdateException {
Map<String, String> extras = new HashMap<>();
extras.put(PersistedJobInfo.AUDIT_LOG_ID_EXTRAS_KEY, Long.toString(auditLogId));
extras.put(PersistedJobInfo.AUDIT_LOG_ENTRY_CREATE_TIME_KEY,
Long.toString(auditLogEntryCreateTime));
ReplicationOperation replicationOperation = ReplicationOperation.DROP_PARTITION;
HiveObjectSpec partitionSpec = new HiveObjectSpec(namedPartition);
List<String> partitionNames = new ArrayList<>();
partitionNames.add(namedPartition.getName());
Optional<String> partitionTldt = ReplicationUtils.getTldt(namedPartition.getPartition());
PersistedJobInfo persistedJobInfo =
PersistedJobInfo.createDeferred(
replicationOperation, ReplicationStatus.PENDING,
ReplicationUtils.getLocation(namedPartition.getPartition()),
srcCluster.getName(), partitionSpec.getTableSpec(), partitionNames, partitionTldt,
Optional.empty(), Optional.empty(), extras);
return new ReplicationJob(
conf,
new DropPartitionTask(
srcCluster,
destCluster,
partitionSpec,
partitionTldt),
onStateChangeHandler,
persistedJobInfo);
}
/**
* Create a job to rename a table.
*
* @param auditLogId ID of the audit log entry where this partition was dropped
* @param auditLogEntryCreateTime when the audit log entry was created
* @param renameFromTable the table to rename from
* @param renameToTable the table to rename to
* @return the job to rename the specified table
*
* @throws StateUpdateException if there's an error writing to the DB
*/
public ReplicationJob createJobForRenameTable(
long auditLogId,
long auditLogEntryCreateTime,
Table renameFromTable,
Table renameToTable) throws StateUpdateException {
ReplicationOperation replicationOperation = ReplicationOperation.RENAME_TABLE;
Map<String, String> extras = new HashMap<>();
extras.put(PersistedJobInfo.AUDIT_LOG_ID_EXTRAS_KEY, Long.toString(auditLogId));
extras.put(PersistedJobInfo.AUDIT_LOG_ENTRY_CREATE_TIME_KEY,
Long.toString(auditLogEntryCreateTime));
HiveObjectSpec renameFromTableSpec = new HiveObjectSpec(renameFromTable);
HiveObjectSpec renameToTableSpec = new HiveObjectSpec(renameToTable);
Optional<Path> renameFromPath = ReplicationUtils.getLocation(renameFromTable);
Optional<Path> renameToPath = ReplicationUtils.getLocation(renameToTable);
PersistedJobInfo persistedJobInfo =
PersistedJobInfo.createDeferred(
replicationOperation, ReplicationStatus.PENDING, renameFromPath,
srcCluster.getName(), renameFromTableSpec,
new ArrayList<>(), ReplicationUtils.getTldt(renameFromTable),
Optional.of(renameToTableSpec), renameToPath, extras);
return new ReplicationJob(
conf,
new RenameTableTask(conf,
srcCluster,
destCluster,
destinationObjectFactory,
objectConflictHandler,
renameFromTableSpec,
renameToTableSpec,
renameFromPath,
renameToPath,
ReplicationUtils.getTldt(renameFromTable),
copyPartitionJobExecutor,
directoryCopier),
onStateChangeHandler,
persistedJobInfo);
}
/**
* Create a job to rename a partition.
*
* @param auditLogId ID of the audit log entry where this partition was dropped
* @param auditLogEntryCreateTime when the audit log entry was created
* @param renameFromPartition partition to rename from
* @param renameToPartition partition to rename to
* @return a job to rename the partition
*
* @throws StateUpdateException if there's an error writing to the DB
*/
public ReplicationJob createJobForRenamePartition(
long auditLogId,
long auditLogEntryCreateTime,
NamedPartition renameFromPartition,
NamedPartition renameToPartition) throws StateUpdateException {
ReplicationOperation replicationOperation = ReplicationOperation.RENAME_PARTITION;
Map<String, String> extras = new HashMap<>();
extras.put(PersistedJobInfo.AUDIT_LOG_ID_EXTRAS_KEY, Long.toString(auditLogId));
extras.put(PersistedJobInfo.AUDIT_LOG_ENTRY_CREATE_TIME_KEY,
Long.toString(auditLogEntryCreateTime));
HiveObjectSpec renameFromPartitionSpec = new HiveObjectSpec(renameFromPartition);
HiveObjectSpec renameToPartitionSpec = new HiveObjectSpec(renameToPartition);
Optional renameFromPath = ReplicationUtils.getLocation(renameFromPartition.getPartition());
Optional renameToPath = ReplicationUtils.getLocation(renameToPartition.getPartition());
PersistedJobInfo persistedJobInfo =
PersistedJobInfo.createDeferred(
replicationOperation, ReplicationStatus.PENDING, renameFromPath,
srcCluster.getName(), renameFromPartitionSpec,
new ArrayList<>(), ReplicationUtils.getTldt(renameFromPartition.getPartition()),
Optional.of(renameToPartitionSpec), renameToPath, extras);
return new ReplicationJob(
conf,
new RenamePartitionTask(
conf,
destinationObjectFactory,
objectConflictHandler,
srcCluster,
destCluster,
renameFromPartitionSpec,
renameToPartitionSpec,
renameFromPath,
renameToPath,
ReplicationUtils.getTldt(renameFromPartition.getPartition()), directoryCopier),
onStateChangeHandler,
persistedJobInfo);
}
private enum OperationType {
COPY, DROP, RENAME
}
/**
* Creates ReplicationJobs for a list of AuditLogEntries. The lists of lists
* return correspond naturally (ie first AuditLogEntry corresponds to first List).
* The filters are used in each request
* @param auditLogEntries A list of N AuditLogEntries
* @param replicationFilters A list of M replication filter to be used in each AuditLogEntry
* @return A List of List of Replication Jobs, with each List of Replication Job corresponding
* to the i^th audit log entry
* @throws StateUpdateException if there is a MySQL issue
*/
public List<List<ReplicationJob>> createReplicationJobs(
List<AuditLogEntry> auditLogEntries,
List<ReplicationFilter> replicationFilters) throws StateUpdateException {
List<List<ReplicationJob>> replicationJobs = new ArrayList<>();
List<PersistedJobInfo> toPersist = new ArrayList<>();
for (AuditLogEntry auditLogEntry : auditLogEntries) {
List<ReplicationJob> replicationJobs1 =
createReplicationJobsSingle(auditLogEntry, replicationFilters);
replicationJobs.add(replicationJobs1);
for (ReplicationJob replicationJob: replicationJobs1) {
toPersist.add(replicationJob.getPersistedJobInfo());
}
}
persistedJobInfoStore.createMany(toPersist);
return replicationJobs;
}
/**
* Converts the audit log entry into a set of replication jobs that have the persisted elements
* properly set.
*
* @param auditLogEntry the audit log entry associated with the actions that need to be replicated
* @throws StateUpdateException if there's an error writing to the DB
*/
private List<ReplicationJob> createReplicationJobsSingle(
AuditLogEntry auditLogEntry,
List<ReplicationFilter> replicationFilters) throws StateUpdateException {
List<ReplicationJob> replicationJobs = new ArrayList<>();
for (ReplicationFilter replicationFilter : replicationFilters) {
if (!replicationFilter.accept(auditLogEntry)) {
LOG.debug(String.format("Audit log entry id: %s filtered out by %s", auditLogEntry,
replicationFilter.getClass().getSimpleName()));
return replicationJobs;
}
}
// TODO: Rewrite once HIVE-12865 is resolved.
// The inputs and outputs for exchange partitions in the audit log is
// broken as per HIVE-12865. This workaround is to parse the exchange
// partition command to figure out what the input and output partitions
// are. auditLogEntry.getCommandType() may be null for the unit test only.
if (auditLogEntry.getOutputTables().size() == 0
&& (auditLogEntry.getCommandType() == null
|| auditLogEntry.getCommandType() == HiveOperation.ALTERTABLE_EXCHANGEPARTITION)) {
// This is probably an exchange partition command
ExchangePartitionParser parser = new ExchangePartitionParser();
boolean parsed = parser.parse(auditLogEntry.getCommand());
if (parsed) {
LOG.debug(
String.format("Parsed audit log id: %s " + "query: %s as an exchange partition query",
auditLogEntry.getId(), auditLogEntry.getCommand()));
// Since we're missing the modified time for the source
// partition, just copy for now
HiveObjectSpec exchangeToSpec = new HiveObjectSpec(parser.getExchangeToSpec().getDbName(),
parser.getExchangeToSpec().getTableName(), parser.getPartitionName());
Table exchangeToTable = new Table();
exchangeToTable.setDbName(exchangeToSpec.getDbName());
exchangeToTable.setTableName(exchangeToSpec.getTableName());
Partition exchangeToPartition = new Partition();
exchangeToPartition.setDbName(exchangeToSpec.getDbName());
exchangeToPartition.setTableName(exchangeToSpec.getTableName());
exchangeToPartition.setValues(parser.getPartitionValues());
for (ReplicationFilter replicationFilter : replicationFilters) {
if (!replicationFilter.accept(exchangeToTable,
new NamedPartition(exchangeToSpec.getPartitionName(), exchangeToPartition))) {
LOG.debug(
String.format("Exchange partition from audit log" + " id: %s filtered out by %s",
auditLogEntry.getId(), replicationFilter.getClass().getSimpleName()));
return replicationJobs;
}
}
ReplicationJob job = createJobForCopyPartition(auditLogEntry.getId(),
auditLogEntry.getCreateTime().getTime(), exchangeToSpec);
replicationJobs.add(job);
return replicationJobs;
} else {
LOG.warn("Error parsing query " + auditLogEntry.getCommand());
}
}
// End exchange partitions workaround
// Filter out CLI commands that don't have any outputs. This logic will need to be revisited
// when the definition of inputs / outputs is revised for drop operations.
if (!HiveOperation.isThriftOperation(auditLogEntry.getCommandType())
&& auditLogEntry.getOutputTables().size() == 0
&& auditLogEntry.getOutputPartitions().size() == 0) {
LOG.debug(String.format(
"Audit log entry id: %s filtered out " + "since it has no output tables or partitions",
auditLogEntry.getId()));
return replicationJobs;
}
OperationType operationType = null;
switch (auditLogEntry.getCommandType()) {
case DROPTABLE:
case THRIFT_DROP_TABLE:
case DROPVIEW:
case ALTERTABLE_DROPPARTS:
case THRIFT_DROP_PARTITION:
operationType = OperationType.DROP;
break;
case ALTERTABLE_RENAME:
case ALTERVIEW_RENAME:
case ALTERTABLE_RENAMEPART:
operationType = OperationType.RENAME;
break;
case THRIFT_ALTER_TABLE:
String inputTableName = auditLogEntry.getInputTable().getTableName();
if (auditLogEntry.getOutputTables().size() == 1
&& !auditLogEntry.getOutputTables().get(0).getTableName().equals(inputTableName)) {
operationType = OperationType.RENAME;
} else {
operationType = OperationType.COPY;
}
break;
case THRIFT_ALTER_PARTITION:
NamedPartition inputPartition = auditLogEntry.getInputPartition();
List<NamedPartition> outputPartitions = auditLogEntry.getOutputPartitions();
if (inputPartition != null && outputPartitions.size() == 1
&& !inputPartition.getName().equals(outputPartitions.get(0).getName())) {
operationType = OperationType.RENAME;
} else {
operationType = OperationType.COPY;
}
break;
default:
operationType = OperationType.COPY;
}
List<Table> outputTables = new ArrayList<>(auditLogEntry.getOutputTables());
List<NamedPartition> outputPartitions = new ArrayList<>(auditLogEntry.getOutputPartitions());
List<Table> referenceTables = auditLogEntry.getReferenceTables();
// Look at inputs as Thrift drop operations have that in the inputs
List<Table> inputTables = new ArrayList<>();
if (auditLogEntry.getInputTable() != null) {
inputTables.add(auditLogEntry.getInputTable());
}
List<NamedPartition> inputPartitions = new ArrayList<>();
if (auditLogEntry.getInputPartition() != null) {
inputPartitions.add(auditLogEntry.getInputPartition());
}
// Filter out tables and partitions that we may not want to replicate
filterObjects(replicationFilters, outputTables, outputPartitions,
createTableLookupMap(referenceTables));
filterObjects(replicationFilters, inputTables, inputPartitions,
createTableLookupMap(referenceTables));
switch (operationType) {
case COPY:
// Handle the tables. The table is present in add partition
// calls, so skip in those cases. Also, for load commands, the table is mentioned as well
// in case of a partition load, so that can be omitted.
boolean shouldNotAddTables =
auditLogEntry.getCommandType() == HiveOperation.ALTERTABLE_ADDPARTS
|| (auditLogEntry.getCommandType() == HiveOperation.LOAD
&& auditLogEntry.getOutputPartitions().size() > 0);
if (!shouldNotAddTables) {
for (Table t : outputTables) {
replicationJobs.add(createJobForCopyTable(auditLogEntry.getId(),
auditLogEntry.getCreateTime().getTime(), t));
}
}
// Handle the partitions
// See if this is a dynamic partition insert
if (auditLogEntry.getOutputPartitions().size() > 1
&& ReplicationUtils.fromSameTable(NamedPartition.toPartitions(outputPartitions))) {
replicationJobs.add(createJobForCopyDynamicPartitions(auditLogEntry.getId(),
auditLogEntry.getCreateTime().getTime(), auditLogEntry.getOutputPartitions()));
} else {
// Otherwise create separate insert partition jobs for each
// partition
for (NamedPartition p : outputPartitions) {
replicationJobs.add(createJobForCopyPartition(auditLogEntry.getId(),
auditLogEntry.getCreateTime().getTime(), p));
}
}
break;
case DROP:
for (Table t : outputTables) {
replicationJobs.add(createJobForDropTable(auditLogEntry.getId(),
auditLogEntry.getCreateTime().getTime(), t));
}
for (NamedPartition p : outputPartitions) {
replicationJobs.add(createJobForDropPartition(auditLogEntry.getId(),
auditLogEntry.getCreateTime().getTime(), p));
}
// Thrift operations have the dropped object in the inputs
if (HiveOperation.isThriftOperation(auditLogEntry.getCommandType())) {
for (Table t : inputTables) {
replicationJobs.add(createJobForDropTable(auditLogEntry.getId(),
auditLogEntry.getCreateTime().getTime(), t));
}
for (NamedPartition p : inputPartitions) {
replicationJobs.add(createJobForDropPartition(auditLogEntry.getId(),
auditLogEntry.getCreateTime().getTime(), p));
}
}
break;
case RENAME:
// There's an edge case to consider - let's say table A is
// renamed to table B, however, table A is excluded by the
// user specified filter. In this case, we still do the rename.
if (outputTables.size() == 0 && outputPartitions.size() == 0) {
// This means that the table was filtered out
} else if (auditLogEntry.getInputTable() != null) {
// Handle a rename table
replicationJobs.add(createJobForRenameTable(auditLogEntry.getId(),
auditLogEntry.getCreateTime().getTime(), auditLogEntry.getInputTable(),
auditLogEntry.getOutputTables().get(0)));
} else if (auditLogEntry.getInputPartition() != null) {
// Handle a rename partition
replicationJobs.add(createJobForRenamePartition(auditLogEntry.getId(),
auditLogEntry.getCreateTime().getTime(), auditLogEntry.getInputPartition(),
auditLogEntry.getOutputPartitions().get(0)));
} else {
throw new RuntimeException("Shouldn't happen!");
}
break;
default:
throw new RuntimeException("Operation not handled: " + operationType);
}
LOG.debug("Converted audit log entry " + auditLogEntry + " to " + replicationJobs);
return replicationJobs;
}
/**
* Based on the supplied filter, remove tables and partitions that should not be replicated.
*
* @param filters the filters to remove undesired objects
* @param tables the tables to filter
* @param partitions the partitions to filter
*/
private void filterObjects(
List<ReplicationFilter> filters,
List<Table> tables,
List<NamedPartition> partitions,
Map<HiveObjectSpec, Table> tableLookupMap) {
// Create the list of tables that the partitions belong to. These
// tables were included by the hook, but don't need to be replicated,
// but is needed for running the filter.
Set<HiveObjectSpec> tablesToNotReplicate = new HashSet<>();
for (NamedPartition pwn : partitions) {
Partition partition = pwn.getPartition();
HiveObjectSpec tableSpec = new HiveObjectSpec(
partition.getDbName(), partition.getTableName());
tablesToNotReplicate.add(tableSpec);
}
// Remove all the partitions that don't match the filter
Iterator<NamedPartition> partitionIterator = partitions.iterator();
while (partitionIterator.hasNext()) {
NamedPartition pwn = partitionIterator.next();
Partition partition = pwn.getPartition();
HiveObjectSpec partitionSpec = new HiveObjectSpec(pwn);
Table table =
tableLookupMap.get(new HiveObjectSpec(partition.getDbName(), partition.getTableName()));
for (ReplicationFilter filter : filters) {
if (!filter.accept(table, pwn)) {
LOG.debug(
String.format("%s filtering out: %s", filter.getClass().getName(), partitionSpec));
partitionIterator.remove();
break;
}
}
}
// Remove all tables that don't pass the filter, or don't need to be
// replicated
Iterator<Table> tableIterator = tables.iterator();
while (tableIterator.hasNext()) {
Table table = tableIterator.next();
HiveObjectSpec tableSpec = new HiveObjectSpec(table);
for (ReplicationFilter filter : filters) {
if (!filter.accept(table)) {
LOG.debug(String.format("%s filtering out: %s", filter.getClass().getName(), tableSpec));
tableIterator.remove();
break;
}
}
}
}
}
| 9,486 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/ThriftObjectUtils.java | package com.airbnb.reair.incremental;
import com.airbnb.reair.incremental.db.PersistedJobInfo;
import com.airbnb.reair.incremental.thrift.TReplicationJob;
import com.airbnb.reair.incremental.thrift.TReplicationOperation;
import com.airbnb.reair.incremental.thrift.TReplicationStatus;
import org.apache.hadoop.fs.Path;
import java.util.ArrayList;
import java.util.List;
public class ThriftObjectUtils {
private static TReplicationOperation convert(ReplicationOperation op) {
switch (op) {
case COPY_UNPARTITIONED_TABLE:
return TReplicationOperation.COPY_UNPARTITIONED_TABLE;
case COPY_PARTITIONED_TABLE:
return TReplicationOperation.COPY_PARTITIONED_TABLE;
case COPY_PARTITION:
return TReplicationOperation.COPY_PARTITION;
case COPY_PARTITIONS:
return TReplicationOperation.COPY_PARTITIONS;
case DROP_TABLE:
return TReplicationOperation.DROP_TABLE;
case DROP_PARTITION:
return TReplicationOperation.DROP_PARTITION;
case RENAME_TABLE:
return TReplicationOperation.RENAME_TABLE;
case RENAME_PARTITION:
return TReplicationOperation.RENAME_PARTITION;
default:
throw new UnsupportedOperationException("Unhandled operation: " + op);
}
}
private static TReplicationStatus convert(ReplicationStatus status) {
switch (status) {
case PENDING:
return TReplicationStatus.PENDING;
case RUNNING:
return TReplicationStatus.RUNNING;
case SUCCESSFUL:
return TReplicationStatus.SUCCESSFUL;
case FAILED:
return TReplicationStatus.FAILED;
case NOT_COMPLETABLE:
return TReplicationStatus.NOT_COMPLETABLE;
default:
throw new RuntimeException("Unhandled case: " + status);
}
}
/**
* Convert a ReplicationJob to the Thrift equivalent.
*
* @param job the job to convert
* @return the corresponding Thrift replication job
*/
public static TReplicationJob convert(ReplicationJob job) {
PersistedJobInfo jobInfo = job.getPersistedJobInfo();
List<Long> parentJobIds = new ArrayList<>(job.getParentJobIds());
return new TReplicationJob(job.getId(), job.getCreateTime(),
// TODO: Maybe fetch the update time from the DB?
0,
convert(jobInfo.getOperation()), convert(jobInfo.getStatus()),
jobInfo.getSrcPath() == null ? null : jobInfo.getSrcPath().toString(),
jobInfo.getSrcClusterName(), jobInfo.getSrcDbName(), jobInfo.getSrcTableName(),
jobInfo.getSrcPartitionNames() == null ? new ArrayList<>() : jobInfo.getSrcPartitionNames(),
jobInfo.getSrcObjectTldt().orElse(null), jobInfo.getRenameToDb().orElse(null),
jobInfo.getRenameToTable().orElse(null),
jobInfo.getRenameToPath().map(Path::toString).orElse(null), jobInfo.getExtras(),
parentJobIds);
}
}
| 9,487 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/DirectoryCopier.java | package com.airbnb.reair.incremental;
import com.airbnb.reair.common.DistCpException;
import com.airbnb.reair.common.DistCpWrapper;
import com.airbnb.reair.common.DistCpWrapperOptions;
import com.airbnb.reair.common.FsUtils;
import com.airbnb.reair.common.PathBuilder;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.incremental.deploy.ConfigurationKeys;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
import java.util.List;
import java.util.Optional;
import java.util.Random;
/**
* Copies directories on Hadoop filesystems.
*/
public class DirectoryCopier {
private static final Log LOG = LogFactory.getLog(DirectoryCopier.class);
private Configuration conf;
private Path tmpDir;
private boolean checkFileModificationTimes;
/**
* Constructor for the directory copier.
*
* @param conf configuration object
* @param tmpDir the temporary directory to copy data to before moving to the final destination
* @param checkFileModificationTimes Whether to check that the modified times of the files match
* after the copy. Some filesystems do not support preservation
* of modified file time after a copy, so this check may need to
* be disabled.
*/
public DirectoryCopier(Configuration conf, Path tmpDir, boolean checkFileModificationTimes) {
this.conf = conf;
this.tmpDir = tmpDir;
this.checkFileModificationTimes = checkFileModificationTimes;
}
/**
* Copy the source directory to the destination directory.
*
* @param srcDir source directory
* @param destDir destination directory
* @param copyAttributes a list of attributes to use when creating the tmp directory. Doesn't
* really matter, but it can make it easier to manually inspect the tmp directory.
* @return the number of bytes copied
* @throws IOException if there was an error copying the directory
* @throws ConfigurationException if configuration options are improper
*/
public long copy(Path srcDir, Path destDir, List<String> copyAttributes)
throws ConfigurationException, IOException {
Random random = new Random();
long randomLong = Math.abs(random.nextLong());
PathBuilder tmpDirPathBuilder = new PathBuilder(tmpDir).add("distcp_tmp");
for (String attribute : copyAttributes) {
tmpDirPathBuilder.add(attribute);
}
Path distCpTmpDir = tmpDirPathBuilder.add(Long.toHexString(randomLong)).toPath();
PathBuilder logDirPathBuilder = new PathBuilder(tmpDir).add("distcp_logs");
for (String attribute : copyAttributes) {
logDirPathBuilder.add(attribute);
}
Path distCpLogDir = logDirPathBuilder.add(Long.toHexString(randomLong)).toPath();
try {
// Copy directory
DistCpWrapperOptions options =
new DistCpWrapperOptions(srcDir, destDir, distCpTmpDir, distCpLogDir)
.setAtomic(true)
.setSyncModificationTimes(checkFileModificationTimes);
long copyJobTimeoutSeconds = conf.getLong(
ConfigurationKeys.COPY_JOB_TIMEOUT_SECONDS,
-1);
if (copyJobTimeoutSeconds > 0) {
options.setDistCpJobTimeout(copyJobTimeoutSeconds * 1_000L);
}
boolean dynamicTimeoutEnabled = conf.getBoolean(
ConfigurationKeys.COPY_JOB_DYNAMIC_TIMEOUT_ENABLED,
false);
options.setDistcpDynamicJobTimeoutEnabled(dynamicTimeoutEnabled);
if (dynamicTimeoutEnabled && copyJobTimeoutSeconds > 0) {
throw new ConfigurationException(String.format(
"The config options {} and {} are both set, but only one can be used",
ConfigurationKeys.COPY_JOB_DYNAMIC_TIMEOUT_ENABLED,
ConfigurationKeys.COPY_JOB_TIMEOUT_SECONDS));
}
long dynamicTimeoutMsPerGbPerMapper = conf.getLong(
ConfigurationKeys.COPY_JOB_DYNAMIC_TIMEOUT_MS_PER_GB_PER_MAPPER,
-1);
if (dynamicTimeoutMsPerGbPerMapper > 0) {
options.setDistcpDynamicJobTimeoutMsPerGbPerMapper(dynamicTimeoutMsPerGbPerMapper);
}
long dynamicTimeoutMin = conf.getLong(
ConfigurationKeys.COPY_JOB_DYNAMIC_TIMEOUT_BASE,
-1);
if (dynamicTimeoutMin > 0) {
options.setDistcpDynamicJobTimeoutBase(dynamicTimeoutMin);
}
long dynamicTimeoutMax = conf.getLong(
ConfigurationKeys.COPY_JOB_DYNAMIC_TIMEOUT_MAX,
-1);
if (dynamicTimeoutMax > 0) {
options.setDistcpDynamicJobTimeoutMax(dynamicTimeoutMax);
}
DistCpWrapper distCpWrapper = new DistCpWrapper(conf);
long bytesCopied = distCpWrapper.copy(options);
return bytesCopied;
} catch (DistCpException e) {
throw new IOException(e);
}
}
/**
* Checks to see if two directories contain the same files. Same is defined as having the same set
* of non-empty files with matching file sizes (and matching modified times if set in the
* constructor)
*
* @param srcDir source directory
* @param destDir destination directory
* @return whether directories are equal
*
* @throws IOException if there's an error reading the filesystem
*/
public boolean equalDirs(Path srcDir, Path destDir) throws IOException {
return FsUtils.equalDirs(conf, srcDir, destDir, Optional.empty(), checkFileModificationTimes);
}
}
| 9,488 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/RunInfo.java | package com.airbnb.reair.incremental;
/**
* Class to encapsulate how the run of a replication task / job went.
*/
public class RunInfo {
public enum RunStatus {
// See similar definitions for
// {@link com.airbnb.reair.incremental.ReplicationStatus}
SUCCESSFUL,
NOT_COMPLETABLE,
FAILED,
DEST_IS_NEWER,
}
private RunStatus runStatus;
private long bytesCopied;
public RunStatus getRunStatus() {
return runStatus;
}
public void setRunStatus(RunStatus runStatus) {
this.runStatus = runStatus;
}
public long getBytesCopied() {
return bytesCopied;
}
public void setBytesCopied(long bytesCopied) {
this.bytesCopied = bytesCopied;
}
public RunInfo(RunStatus runStatus, long bytesCopied) {
this.runStatus = runStatus;
this.bytesCopied = bytesCopied;
}
}
| 9,489 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/ReplicationServer.java | package com.airbnb.reair.incremental;
import static com.airbnb.reair.incremental.auditlog.MetricNames.AUDIT_LOG_ENTRIES_COUNT;
import static com.airbnb.reair.incremental.auditlog.MetricNames.JOBS_IN_MEMORY_GAUGE;
import static com.airbnb.reair.incremental.auditlog.MetricNames.PERSISTED_JOBS_COUNT;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.db.DbKeyValueStore;
import com.airbnb.reair.incremental.auditlog.AuditLogEntry;
import com.airbnb.reair.incremental.auditlog.AuditLogEntryException;
import com.airbnb.reair.incremental.auditlog.AuditLogReader;
import com.airbnb.reair.incremental.configuration.Cluster;
import com.airbnb.reair.incremental.configuration.DestinationObjectFactory;
import com.airbnb.reair.incremental.configuration.ObjectConflictHandler;
import com.airbnb.reair.incremental.db.PersistedJobInfo;
import com.airbnb.reair.incremental.db.PersistedJobInfoStore;
import com.airbnb.reair.incremental.deploy.ConfigurationKeys;
import com.airbnb.reair.incremental.filter.ReplicationFilter;
import com.airbnb.reair.incremental.primitives.CopyPartitionTask;
import com.airbnb.reair.incremental.primitives.CopyPartitionedTableTask;
import com.airbnb.reair.incremental.primitives.CopyPartitionsTask;
import com.airbnb.reair.incremental.primitives.CopyUnpartitionedTableTask;
import com.airbnb.reair.incremental.primitives.DropPartitionTask;
import com.airbnb.reair.incremental.primitives.DropTableTask;
import com.airbnb.reair.incremental.primitives.RenamePartitionTask;
import com.airbnb.reair.incremental.primitives.RenameTableTask;
import com.airbnb.reair.incremental.primitives.ReplicationTask;
import com.airbnb.reair.incremental.thrift.TReplicationJob;
import com.airbnb.reair.incremental.thrift.TReplicationService;
import com.airbnb.reair.multiprocessing.ParallelJobExecutor;
import com.timgroup.statsd.StatsDClient;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.thrift.TException;
import java.io.IOException;
import java.sql.SQLException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.TimeZone;
/**
* Replication server that reads entries from the audit log and replicates objects / operations
* from the source warehouse to the destination warehouse.
*/
public class ReplicationServer implements TReplicationService.Iface {
private static final Log LOG = LogFactory.getLog(ReplicationServer.class);
private static final long POLL_WAIT_TIME_MS = 10 * 1000;
// how many audit log entries to process at once
private final int auditLogBatchSize;
// If there is a need to wait to poll, wait this many ms
private long pollWaitTimeMs = POLL_WAIT_TIME_MS;
// Key used for storing the last persisted audit log ID in the key value
// store
public static final String LAST_PERSISTED_AUDIT_LOG_ID_KEY = "last_persisted_id";
private Configuration conf;
private Cluster srcCluster;
private Cluster destCluster;
private OnStateChangeHandler onStateChangeHandler;
private ObjectConflictHandler objectConflictHandler;
private DestinationObjectFactory destinationObjectFactory;
private AuditLogReader auditLogReader;
private DbKeyValueStore keyValueStore;
private final PersistedJobInfoStore jobInfoStore;
private ParallelJobExecutor jobExecutor;
private ParallelJobExecutor copyPartitionJobExecutor;
private List<ReplicationFilter> replicationFilters;
// Collect stats with counters
private ReplicationCounters counters;
private ReplicationJobRegistry jobRegistry;
private ReplicationJobFactory jobFactory;
private StatsDClient statsDClient;
// If the number of jobs that we track in memory exceed this amount, then
// pause until more jobs finish.
private int maxJobsInMemory;
private volatile boolean pauseRequested = false;
private StatsTracker statsTracker;
private DirectoryCopier directoryCopier;
private Optional<Long> startAfterAuditLogId = Optional.empty();
private long replicationJobRegistryReportInterval;
// Responsible for persisting changes to the state of the replication job
// once it finishes
private class JobStateChangeHandler implements OnStateChangeHandler {
@Override
public void onStart(ReplicationJob replicationJob) throws StateUpdateException {
LOG.debug("Job id: " + replicationJob.getId() + " started");
jobInfoStore.changeStatusAndPersist(ReplicationStatus.RUNNING,
replicationJob.getPersistedJobInfo());
}
@Override
public void onComplete(RunInfo runInfo, ReplicationJob replicationJob)
throws StateUpdateException {
LOG.debug("Job id: " + replicationJob.getId() + " finished " + "with state "
+ runInfo.getRunStatus() + " and " + runInfo.getBytesCopied() + " bytes copied");
replicationJob.getPersistedJobInfo().getExtras().put(PersistedJobInfo.BYTES_COPIED_KEY,
Long.toString(runInfo.getBytesCopied()));
LOG.debug("Persisting job id: " + replicationJob.getPersistedJobInfo().getId());
switch (runInfo.getRunStatus()) {
case SUCCESSFUL:
jobInfoStore.changeStatusAndPersist(ReplicationStatus.SUCCESSFUL,
replicationJob.getPersistedJobInfo());
counters.incrementCounter(ReplicationCounters.Type.SUCCESSFUL_TASKS);
break;
case NOT_COMPLETABLE:
jobInfoStore.changeStatusAndPersist(ReplicationStatus.NOT_COMPLETABLE,
replicationJob.getPersistedJobInfo());
counters.incrementCounter(ReplicationCounters.Type.NOT_COMPLETABLE_TASKS);
break;
case FAILED:
jobInfoStore.changeStatusAndPersist(ReplicationStatus.FAILED,
replicationJob.getPersistedJobInfo());
counters.incrementCounter(ReplicationCounters.Type.FAILED_TASKS);
break;
default:
throw new RuntimeException("Unhandled status: " + runInfo.getRunStatus());
}
LOG.debug("Persisted job: " + replicationJob);
jobRegistry.retireJob(replicationJob);
}
}
/**
* Constructor.
*
* @param conf configuration
* @param srcCluster source cluster
* @param destCluster destination cluster
* @param auditLogReader audit log reader
* @param keyValueStore key/value store for persisting the read position of the audit log
* @param jobInfoStore store for persisting replication job information
* @param replicationFilters the filters for replication entries
* @param directoryCopier directory copier
* @param numWorkers number of worker threads to launch for processing replication jobs
* @param maxJobsInMemory maximum number of jobs to store in memory
* @param startAfterAuditLogId start reading and replicating entries after this audit log ID
*/
public ReplicationServer(
Configuration conf,
Cluster srcCluster,
Cluster destCluster,
AuditLogReader auditLogReader,
DbKeyValueStore keyValueStore,
final PersistedJobInfoStore jobInfoStore,
List<ReplicationFilter> replicationFilters,
DirectoryCopier directoryCopier,
StatsDClient statsDClient,
int numWorkers,
int maxJobsInMemory,
Optional<Long> startAfterAuditLogId) {
this.conf = conf;
this.srcCluster = srcCluster;
this.destCluster = destCluster;
this.auditLogReader = auditLogReader;
this.keyValueStore = keyValueStore;
this.jobInfoStore = jobInfoStore;
this.statsDClient = statsDClient;
this.onStateChangeHandler = new JobStateChangeHandler();
this.objectConflictHandler = new ObjectConflictHandler();
this.objectConflictHandler.setConf(conf);
this.destinationObjectFactory = new DestinationObjectFactory();
this.destinationObjectFactory.setConf(conf);
this.replicationFilters = replicationFilters;
this.maxJobsInMemory = maxJobsInMemory;
this.counters = new ReplicationCounters(statsDClient);
this.jobExecutor = new ParallelJobExecutor("TaskWorker", numWorkers);
this.copyPartitionJobExecutor = new ParallelJobExecutor("CopyPartitionWorker", numWorkers);
this.auditLogBatchSize = conf.getInt(
ConfigurationKeys.AUDIT_LOG_PROCESSING_BATCH_SIZE, 32);
this.jobRegistry = new ReplicationJobRegistry(conf, statsDClient);
this.statsTracker = new StatsTracker(jobRegistry);
this.directoryCopier = directoryCopier;
this.jobFactory = new ReplicationJobFactory(
conf,
srcCluster,
destCluster,
jobInfoStore,
destinationObjectFactory,
onStateChangeHandler,
objectConflictHandler,
copyPartitionJobExecutor,
directoryCopier);
this.replicationJobRegistryReportInterval = 1000
* conf.getLong(ConfigurationKeys.REPLICATION_JOB_REGISTRY_REPORT_INTERVAL_SEC,
60);
this.startAfterAuditLogId = startAfterAuditLogId;
jobExecutor.start();
copyPartitionJobExecutor.start();
}
/**
* Creates a replication job from the parameters that were persisted to the DB.
*
* @param persistedJobInfo information about the job persisted on the DB
* @return a ReplicationJob made from the persisted information
*/
private ReplicationJob restoreReplicationJob(PersistedJobInfo persistedJobInfo) {
ReplicationTask replicationTask = null;
HiveObjectSpec tableSpec =
new HiveObjectSpec(persistedJobInfo.getSrcDbName(), persistedJobInfo.getSrcTableName());
HiveObjectSpec partitionSpec = null;
if (persistedJobInfo.getSrcPartitionNames().size() > 0) {
partitionSpec = new HiveObjectSpec(persistedJobInfo.getSrcDbName(),
persistedJobInfo.getSrcTableName(), persistedJobInfo.getSrcPartitionNames().get(0));
}
switch (persistedJobInfo.getOperation()) {
case COPY_UNPARTITIONED_TABLE:
replicationTask = new CopyUnpartitionedTableTask(conf, destinationObjectFactory,
objectConflictHandler, srcCluster, destCluster, tableSpec,
persistedJobInfo.getSrcPath(), directoryCopier, true);
break;
case COPY_PARTITIONED_TABLE:
replicationTask =
new CopyPartitionedTableTask(conf, destinationObjectFactory, objectConflictHandler,
srcCluster, destCluster, tableSpec, persistedJobInfo.getSrcPath());
break;
case COPY_PARTITION:
replicationTask = new CopyPartitionTask(conf, destinationObjectFactory,
objectConflictHandler, srcCluster, destCluster, partitionSpec,
persistedJobInfo.getSrcPath(), Optional.empty(), directoryCopier, true);
break;
case COPY_PARTITIONS:
List<String> partitionNames = persistedJobInfo.getSrcPartitionNames();
replicationTask = new CopyPartitionsTask(conf, destinationObjectFactory,
objectConflictHandler, srcCluster, destCluster, tableSpec, partitionNames,
persistedJobInfo.getSrcPath(), copyPartitionJobExecutor, directoryCopier);
break;
case DROP_TABLE:
replicationTask = new DropTableTask(srcCluster, destCluster, tableSpec,
persistedJobInfo.getSrcObjectTldt());
break;
case DROP_PARTITION:
replicationTask = new DropPartitionTask(srcCluster, destCluster, partitionSpec,
persistedJobInfo.getSrcObjectTldt());
break;
case RENAME_TABLE:
if (!persistedJobInfo.getRenameToDb().isPresent()
|| !persistedJobInfo.getRenameToTable().isPresent()) {
throw new RuntimeException(String.format("Rename to table is invalid: %s.%s",
persistedJobInfo.getRenameToDb(), persistedJobInfo.getRenameToTable()));
}
HiveObjectSpec renameToTableSpec = new HiveObjectSpec(
persistedJobInfo.getRenameToDb().get(), persistedJobInfo.getRenameToTable().get());
replicationTask = new RenameTableTask(conf, srcCluster, destCluster,
destinationObjectFactory, objectConflictHandler, tableSpec, renameToTableSpec,
persistedJobInfo.getSrcPath(), persistedJobInfo.getRenameToPath(),
persistedJobInfo.getSrcObjectTldt(), copyPartitionJobExecutor, directoryCopier);
break;
case RENAME_PARTITION:
if (!persistedJobInfo.getRenameToDb().isPresent()
|| !persistedJobInfo.getRenameToTable().isPresent()
|| !persistedJobInfo.getRenameToPartition().isPresent()) {
throw new RuntimeException(String.format("Rename to partition is invalid: %s.%s/%s",
persistedJobInfo.getRenameToDb(),
persistedJobInfo.getRenameToTable(),
persistedJobInfo.getRenameToPartition()));
}
HiveObjectSpec renameToSpec = new HiveObjectSpec(
persistedJobInfo.getRenameToDb().get(),
persistedJobInfo.getRenameToTable().get(),
persistedJobInfo.getRenameToPartition().get());
replicationTask = new RenamePartitionTask(conf,
destinationObjectFactory,
objectConflictHandler,
srcCluster,
destCluster,
partitionSpec,
renameToSpec,
Optional.empty(),
persistedJobInfo.getRenameToPath(),
persistedJobInfo.getSrcObjectTldt(),
directoryCopier);
break;
default:
throw new UnsupportedOperationException(
"Unhandled operation:" + persistedJobInfo.getOperation());
}
return new ReplicationJob(conf, replicationTask, onStateChangeHandler, persistedJobInfo);
}
/**
* Queue the specified job to be run.
*
* @param job the job to add to the queue.
*/
public void queueJobForExecution(ReplicationJob job) {
jobExecutor.add(job);
counters.incrementCounter(ReplicationCounters.Type.EXECUTION_SUBMITTED_TASKS);
}
/**
* Start reading the audit log and replicate entries.
*
* @param jobsToComplete the number of jobs to complete before returning. Useful for testing.
*
* @throws IOException if there's an error reading or writing to the filesystem
* @throws SQLException if there's an error querying the DB
*/
public void run(long jobsToComplete)
throws AuditLogEntryException, IOException, StateUpdateException, SQLException {
// Clear the counters so that we can accurate stats for this run
clearCounters();
// Configure the audit log reader based what's specified, or what was
// last persisted.
long lastPersistedAuditLogId = 0;
if (startAfterAuditLogId.isPresent()) {
// The starting ID was specified
lastPersistedAuditLogId = startAfterAuditLogId.get();
} else {
// Otherwise, start from the previous stop point
LOG.debug("Fetching last persisted audit log ID");
Optional<String> lastPersistedIdString = keyValueStore.get(LAST_PERSISTED_AUDIT_LOG_ID_KEY);
if (!lastPersistedIdString.isPresent()) {
Optional<Long> maxId = auditLogReader.getMaxId();
lastPersistedAuditLogId = maxId.orElse(Long.valueOf(0));
LOG.warn(
String.format(
"Since the last persisted ID was not "
+ "previously set, using max ID in the audit log: %s",
lastPersistedAuditLogId));
} else {
lastPersistedAuditLogId = Long.parseLong(lastPersistedIdString.get());
}
}
LOG.info("Using last persisted ID of " + lastPersistedAuditLogId);
auditLogReader.setReadAfterId(lastPersistedAuditLogId);
// Resume jobs that were persisted, but were not run.
for (PersistedJobInfo jobInfo : jobInfoStore.getRunnableFromDb()) {
LOG.debug(String.format("Restoring %s to (re)run", jobInfo));
ReplicationJob job = restoreReplicationJob(jobInfo);
prettyLogStart(job);
jobRegistry.registerJob(job);
queueJobForExecution(job);
}
TimeZone tz = TimeZone.getTimeZone("UTC");
DateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm'Z'");
df.setTimeZone(tz);
statsTracker.start();
// This is the time that the last persisted id was updated in the store.
// It's tracked to rate limit the number of updates that are done.
long updateTimeForLastPersistedId = 0;
long lastReportedMetricsTimeReplicationJob = 0;
while (true) {
if (pauseRequested) {
LOG.debug("Pause requested. Sleeping...");
ReplicationUtils.sleep(pollWaitTimeMs);
continue;
}
if (System.currentTimeMillis() - lastReportedMetricsTimeReplicationJob
> replicationJobRegistryReportInterval) {
jobRegistry.reportStats();
lastReportedMetricsTimeReplicationJob = System.currentTimeMillis();
}
// Stop if we've had enough successful jobs - for testing purposes
// only
long completedJobs = counters.getCounter(ReplicationCounters.Type.SUCCESSFUL_TASKS)
+ counters.getCounter(ReplicationCounters.Type.NOT_COMPLETABLE_TASKS);
if (jobsToComplete > 0 && completedJobs >= jobsToComplete) {
LOG.debug(
String.format("Hit the limit for the number of " + "successful jobs (%d) - returning.",
jobsToComplete));
return;
}
statsDClient.gauge(JOBS_IN_MEMORY_GAUGE, jobExecutor.getNotDoneJobCount());
// Wait if there are too many jobs
if (jobExecutor.getNotDoneJobCount() >= maxJobsInMemory) {
LOG.debug(String.format(
"There are too many jobs in memory. " + "Waiting until more complete. (limit: %d)",
maxJobsInMemory));
ReplicationUtils.sleep(pollWaitTimeMs);
continue;
}
long batchSize = auditLogBatchSize;
// make sure not to exceed maxJobsInMemory
batchSize = Math.min(batchSize, maxJobsInMemory - jobExecutor.getNotDoneJobCount());
// Get a few entries from the audit log
LOG.debug("Fetching the next entry from the audit log");
List<AuditLogEntry> auditLogEntries = auditLogReader.resilientNext((int) batchSize);
LOG.debug(String.format("Got %d audit log entries", auditLogEntries.size()));
statsDClient.count(AUDIT_LOG_ENTRIES_COUNT, auditLogEntries.size());
for (AuditLogEntry entry : auditLogEntries) {
LOG.debug("Got audit log entry: " + entry);
}
// If there's nothing from the audit log, then wait for a little bit
// and then try again.
if (auditLogEntries.isEmpty()) {
LOG.debug(String.format("No more entries from the audit log. " + "Sleeping for %s ms",
pollWaitTimeMs));
ReplicationUtils.sleep(pollWaitTimeMs);
continue;
}
// Convert the audit log entry into a replication job, which has
// elements persisted to the DB
List<List<ReplicationJob>> replicationJobsJobs =
jobFactory.createReplicationJobs(auditLogEntries, replicationFilters);
int replicationJobsJobsSize = 0;
for (List<ReplicationJob> rj : replicationJobsJobs) {
replicationJobsJobsSize += rj.size();
}
LOG.debug(String.format("Persisted %d replication jobs", replicationJobsJobsSize));
statsDClient.count(PERSISTED_JOBS_COUNT, replicationJobsJobsSize);
// Since the replication job was created and persisted, we can
// advance the last persisted ID. Update every 10s to reduce db
for (int i = 0; i < auditLogEntries.size(); i++) {
List<ReplicationJob> replicationJobs = replicationJobsJobs.get(i);
AuditLogEntry auditLogEntry = auditLogEntries.get(i);
// Add these jobs to the registry
for (ReplicationJob job : replicationJobs) {
jobRegistry.registerJob(job);
}
LOG.debug(String.format(
"Audit log entry id: %s converted to %s", auditLogEntry.getId(), replicationJobs));
for (ReplicationJob replicationJob : replicationJobs) {
LOG.debug("Scheduling: " + replicationJob);
prettyLogStart(replicationJob);
long tasksSubmittedForExecution =
counters.getCounter(ReplicationCounters.Type.EXECUTION_SUBMITTED_TASKS);
if (tasksSubmittedForExecution >= jobsToComplete) {
LOG.warn(String.format("Not submitting %s for execution "
+ " due to the limit for the number of " + "jobs to execute", replicationJob));
continue;
} else {
queueJobForExecution(replicationJob);
}
}
}
if (System.currentTimeMillis() - updateTimeForLastPersistedId > 10000) {
keyValueStore.resilientSet(
LAST_PERSISTED_AUDIT_LOG_ID_KEY,
Long.toString(auditLogEntries.get(auditLogEntries.size() - 1).getId()));
updateTimeForLastPersistedId = System.currentTimeMillis();
}
}
}
/**
* Resets the counters - for testing purposes.
*/
public void clearCounters() {
counters.clear();
}
@Override
public List<TReplicationJob> getActiveJobs(long afterId, int maxJobs) throws TException {
int count = 0;
List<TReplicationJob> jobsToReturn = new ArrayList<>();
for (ReplicationJob job : jobRegistry.getActiveJobs()) {
if (count == maxJobs) {
break;
}
if (job.getId() > afterId) {
count++;
TReplicationJob jobThrift = ThriftObjectUtils.convert(job);
jobsToReturn.add(jobThrift);
}
}
return jobsToReturn;
}
@Override
public synchronized void pause() throws TException {
LOG.debug("Paused requested");
if (pauseRequested) {
LOG.warn("Server is already paused!");
} else {
pauseRequested = true;
try {
copyPartitionJobExecutor.stop();
jobExecutor.stop();
} catch (InterruptedException e) {
LOG.error("Unexpected interruption", e);
}
}
}
@Override
public synchronized void resume() throws TException {
LOG.debug("Resume requested");
if (!pauseRequested) {
LOG.warn("Server is already resumed!");
} else {
pauseRequested = false;
copyPartitionJobExecutor.start();
jobExecutor.start();
}
}
@Override
public long getLag() throws TException {
return statsTracker.getLastCalculatedLag();
}
@Override
public Map<Long, TReplicationJob> getJobs(List<Long> ids) {
throw new RuntimeException("Not yet implemented!");
}
@Override
public List<TReplicationJob> getRetiredJobs(long afterId, int maxJobs) throws TException {
int count = 0;
List<TReplicationJob> jobsToReturn = new ArrayList<>();
for (ReplicationJob job : jobRegistry.getRetiredJobs()) {
if (count == maxJobs) {
break;
}
if (job.getId() > afterId) {
count++;
TReplicationJob jobThrift = ThriftObjectUtils.convert(job);
jobsToReturn.add(jobThrift);
}
}
return jobsToReturn;
}
private void prettyLogStart(ReplicationJob job) {
List<HiveObjectSpec> srcSpecs = new ArrayList<>();
if (job.getPersistedJobInfo().getSrcPartitionNames().size() > 0) {
for (String partitionName : job.getPersistedJobInfo().getSrcPartitionNames()) {
HiveObjectSpec spec = new HiveObjectSpec(job.getPersistedJobInfo().getSrcDbName(),
job.getPersistedJobInfo().getSrcTableName(), partitionName);
srcSpecs.add(spec);
}
} else {
HiveObjectSpec spec = new HiveObjectSpec(job.getPersistedJobInfo().getSrcDbName(),
job.getPersistedJobInfo().getSrcTableName());
srcSpecs.add(spec);
}
Optional<HiveObjectSpec> renameToSpec = Optional.empty();
PersistedJobInfo jobInfo = job.getPersistedJobInfo();
if (jobInfo.getRenameToDb().isPresent() && jobInfo.getRenameToTable().isPresent()) {
if (!jobInfo.getRenameToPartition().isPresent()) {
renameToSpec = Optional.of(
new HiveObjectSpec(jobInfo.getRenameToDb().get(), jobInfo.getRenameToTable().get()));
} else {
renameToSpec = Optional.of(new HiveObjectSpec(jobInfo.getRenameToDb().get(),
jobInfo.getRenameToTable().get(), jobInfo.getRenameToPartition().get()));
}
}
ReplicationOperation operation = job.getPersistedJobInfo().getOperation();
boolean renameOperation = operation == ReplicationOperation.RENAME_TABLE
|| operation == ReplicationOperation.RENAME_PARTITION;
if (renameOperation) {
LOG.info(String.format(
"Processing audit log id: %s, job id: %s, " + "operation: %s, source objects: %s "
+ "rename to: %s",
job.getPersistedJobInfo().getExtras().get(PersistedJobInfo.AUDIT_LOG_ID_EXTRAS_KEY),
job.getId(), job.getPersistedJobInfo().getOperation(), srcSpecs, renameToSpec));
} else {
LOG.info(String.format(
"Processing audit log id: %s, job id: %s, " + "operation: %s, source objects: %s",
job.getPersistedJobInfo().getExtras().get(PersistedJobInfo.AUDIT_LOG_ID_EXTRAS_KEY),
job.getId(), job.getPersistedJobInfo().getOperation(), srcSpecs));
}
}
/**
* Start processing audit log entries after this ID. This should only be called before run() is
* called.
*/
public void setStartAfterAuditLogId(long auditLogId) {
this.startAfterAuditLogId = Optional.of(auditLogId);
}
/**
* For polling operations that need to sleep, sleep for this many milliseconds.
*/
public void setPollWaitTimeMs(long pollWaitTimeMs) {
this.pollWaitTimeMs = pollWaitTimeMs;
}
}
| 9,490 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/ReplicationUtils.java | package com.airbnb.reair.incremental;
import com.google.common.base.Joiner;
import com.airbnb.reair.common.HiveMetastoreClient;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.common.HiveParameterKeys;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.thrift.TBase;
import org.apache.thrift.TDeserializer;
import org.apache.thrift.TException;
import org.apache.thrift.protocol.TJSONProtocol;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.ObjectWriter;
import org.codehaus.jackson.type.TypeReference;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
public class ReplicationUtils {
private static final Log LOG = LogFactory.getLog(ReplicationUtils.class);
// For doing exponential backoff, the number of seconds to use as the base
private static final int DEFAULT_WAIT_TIME_BASE = 2;
// For doing exponential backoff, the maximum number of seconds to use
private static final int DEFAULT_MAX_WAIT_TIME = 3600;
/**
* Remove (or set to 0) fields in the table object that should not be compared.
*
* @param table the table to remove non-comparable fields
* @return the table with non-comparable fields removed
*/
public static Table stripNonComparables(Table table) {
Table newTable = new Table(table);
newTable.setCreateTime(0);
newTable.setLastAccessTime(0);
return newTable;
}
/**
* Remove (or set to 0) fields in the partition object that should not be compared.
*
* @param partition the partition to remove non-comparable fields
* @return the partition with non-comparable fields removed
*/
public static Partition stripNonComparables(Partition partition) {
Partition newPartition = new Partition(partition);
newPartition.setCreateTime(0);
newPartition.setLastAccessTime(0);
return newPartition;
}
/**
* Deserialize a Thrift object from JSON.
*
* @param serializedObject the JSON string representation of the object
* @param obj the Thrift object to populate fields
*
* @throws MetadataException if there is an error deserializing
*/
public static <T extends TBase> void deserializeObject(String serializedObject, T obj)
throws MetadataException {
TDeserializer deserializer = new TDeserializer(new TJSONProtocol.Factory());
try {
deserializer.deserialize(obj, serializedObject, "UTF-8");
} catch (TException e) {
throw new MetadataException(e);
}
}
/**
* Create the database on the destination if it exists on the source but it does not exist on the
* destination metastore.
*
* @param srcMs source Hive metastore
* @param destMs destination Hive metastore
* @param dbName DB to create
*
* @throws HiveMetastoreException if there's an error creating the DB.
*/
public static void createDbIfNecessary(HiveMetastoreClient srcMs, HiveMetastoreClient destMs,
String dbName) throws HiveMetastoreException {
if (destMs.existsDb(dbName)) {
LOG.debug("DB " + dbName + " already exists on destination.");
return;
} else {
Database srcDb = srcMs.getDatabase(dbName);
if (srcDb == null) {
LOG.warn(String.format("DB %s doesn't exist on the source!", dbName));
return;
}
Database dbToCreate = new Database(srcDb.getName(), srcDb.getDescription(), null, null);
LOG.debug("Creating DB: " + dbToCreate);
destMs.createDatabase(dbToCreate);
}
}
/**
* Check if the specified Hive object exists.
*
* @param ms Hive metastore
* @param spec specification for the Hive object
* @return whether or not the object exists
*
* @throws HiveMetastoreException if there is an error querying the metastore
*/
public static boolean exists(HiveMetastoreClient ms, HiveObjectSpec spec)
throws HiveMetastoreException {
if (spec.isPartition()) {
return ms.existsPartition(spec.getDbName(), spec.getTableName(), spec.getPartitionName());
} else {
return ms.existsTable(spec.getDbName(), spec.getTableName());
}
}
/**
* Check if the schema between two tables match.
*
* @param srcTable source table
* @param destTable destination table
* @return whether or not the schemas of the tables match
*/
public static boolean schemasMatch(Table srcTable, Table destTable) {
return srcTable.getSd().getCols().equals(destTable.getSd().getCols())
&& srcTable.getPartitionKeys().equals(destTable.getPartitionKeys());
}
/**
* Check if the table has the expected modified time.
*
* @param expectedTldt expected modified time
* @param table table to check
* @return whether or not table has the expected modified time
*/
public static boolean transientLastDdlTimesMatch(String expectedTldt, Table table) {
return StringUtils.equals(expectedTldt, table.getParameters().get(HiveParameterKeys.TLDT));
}
/**
* Check if the partition has the expected modified time.
*
* @param expectedTldt expected modified time.
* @param partition partition to check
* @return whether or not partition has the expected modified time
*/
public static boolean transientLastDdlTimesMatch(String expectedTldt, Partition partition) {
return StringUtils.equals(expectedTldt, partition.getParameters().get(HiveParameterKeys.TLDT));
}
/**
* Check if two tables have matching modified times.
*
* @param table1 reference table to compare
* @param table2 other table to compare
* @return whether or not the two tables have matching modified times
*/
public static boolean transientLastDdlTimesMatch(Table table1, Table table2) {
if (table1 == null || table2 == null) {
return false;
}
return StringUtils.equals(table1.getParameters().get(HiveParameterKeys.TLDT),
table2.getParameters().get(HiveParameterKeys.TLDT));
}
/**
* Check if two partitions have matching modified times.
*
* @param partition1 reference partition to compare
* @param partition2 other partition to compare
* @return whether or not the two partitions have matching modified times
*/
public static boolean transientLastDdlTimesMatch(Partition partition1, Partition partition2) {
if (partition1 == null || partition2 == null) {
return false;
}
return StringUtils.equals(partition1.getParameters().get(HiveParameterKeys.TLDT),
partition2.getParameters().get(HiveParameterKeys.TLDT));
}
/**
* Convert JSON representation of a list into a Java string list.
*
* @param json JSON representation of a list
* @return Java string list
*/
public static List<String> convertToList(String json) {
try {
ObjectMapper om = new ObjectMapper();
return om.readValue(json, new TypeReference<List<String>>() {});
} catch (IOException e) {
return null;
}
}
/**
* Convert JSON representation of a map into a Java string map.
*
* @param json JSON representation of a map
* @return Java string map
*/
public static Map<String, String> convertToMap(String json) {
try {
ObjectMapper om = new ObjectMapper();
return om.readValue(json, new TypeReference<Map<String, String>>() {});
} catch (IOException e) {
return null;
}
}
/**
* Convert a Java list to a JSON list.
*
* @param list list to convert
* @return JSON representation of the list
*
* @throws IOException if there is an error converting the list
*/
public static String convertToJson(List<String> list) throws IOException {
// writerWithDefaultPrettyPrinter() bundled in with CDH is not present,
// so using this deprecated method.
@SuppressWarnings("deprecation")
ObjectWriter ow = new ObjectMapper().defaultPrettyPrintingWriter();
return ow.writeValueAsString(list);
}
/**
* Convert a Java map to a JSON map.
*
* @param map map to convert
* @return the JSON representation of the map
*
* @throws IOException if there's an error converting the map
*/
public static String convertToJson(Map<String, String> map) throws IOException {
// writerWithDefaultPrettyPrinter() bundled in with CDH is not present,
// so using this deprecated method.
@SuppressWarnings("deprecation")
ObjectWriter ow = new ObjectMapper().defaultPrettyPrintingWriter();
return ow.writeValueAsString(map);
}
/**
* Get the data location of a Hive table.
*
* @param table Thrift Hive table
* @return the data location of the given table, if present
*/
public static Optional<Path> getLocation(Table table) {
if (table == null || table.getSd() == null || table.getSd().getLocation() == null) {
return Optional.empty();
} else {
return Optional.ofNullable(new Path(table.getSd().getLocation()));
}
}
/**
* Get the data location of a Hive partition.
*
* @param partition Thrift Hive partition
* @return the data location of the given partition, if present
*/
public static Optional<Path> getLocation(Partition partition) {
if (partition == null || partition.getSd() == null || partition.getSd().getLocation() == null) {
return Optional.empty();
}
return Optional.ofNullable(new Path(partition.getSd().getLocation()));
}
/**
* Get the last modified time of a Hive table.
*
* @param table Thrift Hive table
* @return the last modified time as a string
*/
public static Optional<String> getTldt(Table table) {
if (table == null || table.getParameters() == null) {
return Optional.empty();
}
return Optional.ofNullable(table.getParameters().get(HiveParameterKeys.TLDT));
}
/**
* Get the last modified time of a Hive partition.
*
* @param partition Thrift Hive partition
* @return the last modified time as a string
*/
public static Optional<String> getTldt(Partition partition) {
if (partition == null || partition.getParameters() == null) {
return Optional.empty();
}
return Optional.ofNullable(partition.getParameters().get(HiveParameterKeys.TLDT));
}
/**
* Sleep the current thread.
*
* @param sleepTime number of miliseconds to sleep for
*/
public static void sleep(long sleepTime) {
try {
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
LOG.error("Unexpectedly interrupted!");
}
}
/**
* Check to see that all of the partitions are from the same table.
*
* @param partitions partitions to check
* @return whether or not all the partitions are from the same table
*/
public static boolean fromSameTable(Collection<Partition> partitions) {
if (partitions.size() == 0) {
return false;
}
String dbName = null;
String tableName = null;
for (Partition p : partitions) {
if (dbName == null) {
dbName = p.getDbName();
} else if (!dbName.equals(p.getDbName())) {
return false;
}
if (tableName == null) {
tableName = p.getTableName();
} else if (!tableName.equals(p.getTableName())) {
return false;
}
}
return true;
}
/**
* Return a common parent directory of the given directories.
*
* @param dirs directories to check
* @return the common parent directory
*/
public static Optional<Path> getCommonDirectory(Set<Path> dirs) {
if (dirs.size() == 0) {
return Optional.empty();
}
// First verify that all the schemes and authorities are the same
String scheme = null;
String authority = null;
for (Path dir : dirs) {
if (scheme == null) {
scheme = dir.toUri().getScheme();
}
if (authority == null) {
authority = dir.toUri().getAuthority();
}
if (!scheme.equals(dir.toUri().getScheme())) {
return Optional.empty();
}
// Authority can be null - for example: file:///abc/
if (authority != null && !authority.equals(dir.toUri().getAuthority())) {
return Optional.empty();
}
}
String commonDir = null;
for (Path dir : dirs) {
String dirPathString = dir.toUri().getPath();
if (commonDir == null) {
commonDir = dirPathString;
} else {
commonDir = commonDir(commonDir, dirPathString);
}
}
// No common directory
if (commonDir.length() == 0) {
return Optional.empty();
}
return Optional.of(new Path(scheme, authority, commonDir));
}
/**
* Get the most specific common directory.
*
* @param dir1 first directory
* @param dir2 second directory
* @return the most specific directory that contains both dir1 and dir2. e.g /a/b/c, /a/d/e => /a
*/
public static String commonDir(String dir1, String dir2) {
String[] path1Elements = dir1.split("/");
String[] path2Elements = dir2.split("/");
List<String> commonPath = new ArrayList<>();
int pathIndex = 0;
while (pathIndex < path1Elements.length && pathIndex < path2Elements.length) {
if (path1Elements[pathIndex].equals(path2Elements[pathIndex])) {
commonPath.add(path1Elements[pathIndex]);
} else {
break;
}
pathIndex++;
}
return org.apache.commons.lang.StringUtils.join(commonPath, "/");
}
/**
* Get the locations of the specified partitions.
*
* @param partitions partitions to get the locations of
* @return a set of locations corresponding to the partitions
*/
public static Set<Path> getLocations(Collection<Partition> partitions) {
Set<Path> paths = new HashSet<>();
for (Partition p : partitions) {
String location = p.getSd().getLocation();
if (location != null) {
paths.add(new Path(location));
}
}
return paths;
}
public static void exponentialSleep(int attempt) throws InterruptedException {
exponentialSleep(attempt, DEFAULT_WAIT_TIME_BASE, DEFAULT_MAX_WAIT_TIME);
}
/**
* Sleep for a period of time that relates exponentially to the attempt number.
*
* @param attempt attempt number
* @param base the wait time in ms when attempt is 0
* @param max the maximum wait time in ms
*
* @throws InterruptedException if the waiting thread gets interrupted
*/
public static void exponentialSleep(int attempt, int base, int max) throws InterruptedException {
long sleepSeconds = (long) Math.min(max, Math.pow(base, attempt));
LOG.debug(String.format("Attempt %d: sleeping for %d seconds", attempt, sleepSeconds));
Thread.sleep(1000 * sleepSeconds);
}
/**
* Returns a string row-like representation of the input columns.
*
* @param columns The columns to convert into the row.
* @return a string row-like representation of the input columns.
*/
public static String genValue(String... columns) {
return Joiner.on("\t").useForNull("NULL").join(columns);
}
/**
* Normalizes the DB name and table name in a table object.
*
* @param table the table object whose names are to be normalized
* @return the table object with normalized names
*/
public static Table normalizeNames(Table table) {
table.setDbName(table.getDbName().toLowerCase());
table.setTableName(table.getTableName().toLowerCase());
return table;
}
/**
* Normalizes the DB name and table name in a table object.
*
* @param partition the partition object whose names are to be normalized
* @return the partition object with normalized names
*/
public static Partition normalizeNames(Partition partition) {
partition.setDbName(partition.getDbName().toLowerCase());
partition.setTableName(partition.getTableName().toLowerCase());
return partition;
}
private static Long getLongValue(Map<String, String> parameters, String key) {
if (parameters == null) {
return null;
}
String value = parameters.get(key);
if (value == null) {
return null;
}
try {
return Long.valueOf(value);
} catch (NumberFormatException e) {
LOG.warn("NumberFormatException: value for key " + key + ": " + value
+ " is not a valid Integer.");
return null;
}
}
private static Long getLastModifiedTime(Map<String, String> parameters) {
Long lastModifiedTime = getLongValue(parameters, HiveParameterKeys.TLMT);
Long transientLastDdlTime = getLongValue(parameters, HiveParameterKeys.TLDT);
if (lastModifiedTime == null) {
return transientLastDdlTime;
}
if (transientLastDdlTime == null) {
return lastModifiedTime;
}
return Long.max(lastModifiedTime, transientLastDdlTime);
}
/**
* Return the last modified time of a table.
*/
public static Long getLastModifiedTime(Table table) {
if (table == null) {
return null;
}
Map<String, String> parameters = table.getParameters();
return getLastModifiedTime(parameters);
}
/**
* Return the last modified time of a partition.
*/
public static Long getLastModifiedTime(Partition partition) {
if (partition == null) {
return null;
}
Map<String, String> parameters = partition.getParameters();
return getLastModifiedTime(parameters);
}
/**
* Return whether the src table is older than the dest table.
*/
public static boolean isSrcOlder(Table src, Table dest) {
Long srcModifiedTime = ReplicationUtils.getLastModifiedTime(src);
Long destModifiedTime = ReplicationUtils.getLastModifiedTime(dest);
return (srcModifiedTime != null && destModifiedTime != null
&& srcModifiedTime < destModifiedTime);
}
/**
* Return whether the src partition is older than the dest partition.
*/
public static boolean isSrcOlder(Partition src, Partition dest) {
Long srcModifiedTime = ReplicationUtils.getLastModifiedTime(src);
Long destModifiedTime = ReplicationUtils.getLastModifiedTime(dest);
return (srcModifiedTime != null && destModifiedTime != null
&& srcModifiedTime < destModifiedTime);
}
}
| 9,491 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/ReplicationOperation.java | package com.airbnb.reair.incremental;
/**
* Different types of replication operations - these corresponding to the associated
* ReplicationTask.
*/
public enum ReplicationOperation {
COPY_UNPARTITIONED_TABLE,
COPY_PARTITIONED_TABLE,
COPY_PARTITION,
COPY_PARTITIONS,
DROP_TABLE,
DROP_PARTITION,
RENAME_TABLE,
RENAME_PARTITION
}
| 9,492 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/configuration/DestinationObjectFactory.java | package com.airbnb.reair.incremental.configuration;
import com.airbnb.reair.common.FsUtils;
import com.airbnb.reair.common.HiveParameterKeys;
import com.airbnb.reair.incremental.ReplicationUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
/**
* Creates the Hive thrift object that should be created on the destination cluster. Note that only
* the Thrift object is generated - it's not actually created in the metastore.
*
* @version
*/
public class DestinationObjectFactory implements Configurable {
private static final Log LOG = LogFactory.getLog(DestinationObjectFactory.class);
private Optional<Configuration> conf;
public DestinationObjectFactory() {}
public void setConf(Configuration conf) {
this.conf = Optional.ofNullable(conf);
}
public Configuration getConf() {
return conf.orElse(null);
}
/**
* For objects with a location, transform the location through this method.
*
* @param srcCluster source cluster object
* @param destCluster destination cluster object
* @param srcLocation the location on the source
* @return the location on the destination that the source location should map to
*/
public String modifyLocation(Cluster srcCluster, Cluster destCluster, String srcLocation) {
Path srcPath = new Path(srcLocation);
String scheme = srcPath.toUri().getScheme();
if (scheme != null) {
// Objects with an s3 location should be copied as is
if (scheme.equals("s3n") || scheme.equals("s3a")) {
return srcLocation;
}
}
// The destination path should have the same relative path under the
// destination FS's root.
// If the source path is within the FS root of the source cluster,
// it should have the same relative path on the destination
Path destPath;
String srcFsRootWithSlash = FsUtils.getPathWithSlash(srcCluster.getFsRoot().toString());
if (srcPath.toString().startsWith(srcFsRootWithSlash)) {
String relativePath = FsUtils.getRelativePath(srcCluster.getFsRoot(), srcPath);
destPath = new Path(destCluster.getFsRoot(), relativePath);
} else {
LOG.warn("srcPath " + srcPath.toString() + " doesn't start with "
+ srcFsRootWithSlash);
destPath = new Path(destCluster.getFsRoot(), srcPath.toUri().getPath());
}
return destPath.toString();
}
/**
* Given the Thrift table object from the source, create the Thrift table object that should be
* added to the destination. Note that this does not actually create the object on the destination
* metastore - it only makes a local Thrift object.
*
* @param srcCluster source cluster
* @param destCluster destination cluster
* @param srcTable table object from the source
* @param existingDestTable table object from the destination, if one already exists
* @return the table to create or overwrite with on the destination.
*/
public Table createDestTable(
Cluster srcCluster,
Cluster destCluster,
Table srcTable,
Table existingDestTable) {
Table destTable = new Table(srcTable);
// If applicable, update the location for the table
Optional<Path> srcLocation = ReplicationUtils.getLocation(srcTable);
if (srcLocation.isPresent() && !srcLocation.get().toString().startsWith("s3")) {
String destLocation = modifyLocation(srcCluster, destCluster, srcLocation.get().toString());
destTable.getSd().setLocation(destLocation);
}
destTable.putToParameters(HiveParameterKeys.SRC_CLUSTER, srcCluster.getName());
// Merge the parameters for the table, with the parameter values from
// the source taking precedence
if (existingDestTable != null) {
Map<String, String> newParameters = new HashMap<>();
newParameters.putAll(existingDestTable.getParameters());
newParameters.putAll(destTable.getParameters());
destTable.setParameters(newParameters);
}
return destTable;
}
/**
* Given the Thrift partition object from the source, create the Thrift partition object that
* should be added to the destination. Note that this does not actually create the object on the
* destination metastore - it only makes a local Thrift object.
*
* @param srcCluster source cluster
* @param destCluster destination cluster
* @param srcPartition partition object from the source
* @param existingDestPartition partition object on the destination, if one already exists
* @return partition object to create or overwrite with on the destination
*/
public Partition createDestPartition(
Cluster srcCluster,
Cluster destCluster,
Partition srcPartition,
Partition existingDestPartition) {
Partition destPartition = new Partition(srcPartition);
Optional<Path> srcLocation = ReplicationUtils.getLocation(srcPartition);
// If applicable, update the location for the partition
if (srcLocation.isPresent()) {
String destLocation = modifyLocation(srcCluster, destCluster, srcLocation.get().toString());
destPartition.getSd().setLocation(destLocation);
}
destPartition.putToParameters(HiveParameterKeys.SRC_CLUSTER, srcCluster.getName());
// Merge the parameters for the partition, with the parameter values
// from the source taking precedence
if (existingDestPartition != null) {
Map<String, String> newParameters = new HashMap<>();
newParameters.putAll(existingDestPartition.getParameters());
newParameters.putAll(destPartition.getParameters());
}
return destPartition;
}
/**
* @param srcLocation location of the source object to examine
* @return whether or not location should be copied to the destination.
*/
public boolean shouldCopyData(String srcLocation) {
if (srcLocation.startsWith("s3n://") || srcLocation.startsWith("s3a://")) {
return false;
} else {
return true;
}
}
}
| 9,493 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/configuration/ConfigurationException.java | package com.airbnb.reair.incremental.configuration;
public class ConfigurationException extends Exception {
public ConfigurationException(String message) {
super(message);
}
public ConfigurationException(String message, Throwable cause) {
super(message, cause);
}
public ConfigurationException(Throwable cause) {
super(cause);
}
}
| 9,494 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/configuration/HardCodedCluster.java | package com.airbnb.reair.incremental.configuration;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.ThriftHiveMetastoreClient;
import org.apache.hadoop.fs.Path;
/**
* A cluster defined with hard coded values, typically derived from the configuration.
*/
public class HardCodedCluster implements Cluster {
private String name;
private String metastoreHost;
private int metastorePort;
private String jobtrackerHost;
private String jobtrackerPort;
private Path hdfsRoot;
private Path tmpDir;
private ThreadLocal<ThriftHiveMetastoreClient> metastoreClient;
/**
* Constructor with specific values.
*
* @param name string to use for identifying this cluster
* @param metastoreHost hostname of the metastore Thrift server
* @param metastorePort port of the metastore Thrift server
* @param jobtrackerHost hostname of the job tracker
* @param jobtrackerPort port of the job tracker
* @param hdfsRoot the path for the root HDFS directory
* @param tmpDir the path for the temporary HDFS directory (should be under root)
*/
public HardCodedCluster(
String name,
String metastoreHost,
int metastorePort,
String jobtrackerHost,
String jobtrackerPort,
Path hdfsRoot,
Path tmpDir) {
this.name = name;
this.metastoreHost = metastoreHost;
this.metastorePort = metastorePort;
this.jobtrackerHost = jobtrackerHost;
this.jobtrackerPort = jobtrackerPort;
this.hdfsRoot = hdfsRoot;
this.tmpDir = tmpDir;
this.metastoreClient = new ThreadLocal<ThriftHiveMetastoreClient>();
}
public String getMetastoreHost() {
return metastoreHost;
}
public int getMetastorePort() {
return metastorePort;
}
/**
* Get a cached ThreadLocal metastore client.
*/
public ThriftHiveMetastoreClient getMetastoreClient() throws HiveMetastoreException {
ThriftHiveMetastoreClient result = this.metastoreClient.get();
if (result == null) {
result = new ThriftHiveMetastoreClient(getMetastoreHost(), getMetastorePort());
this.metastoreClient.set(result);
}
return result;
}
public Path getFsRoot() {
return hdfsRoot;
}
public Path getTmpDir() {
return tmpDir;
}
public String getName() {
return name;
}
}
| 9,495 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/configuration/ClusterFactory.java | package com.airbnb.reair.incremental.configuration;
import com.airbnb.reair.incremental.DirectoryCopier;
import org.apache.hadoop.conf.Configuration;
public interface ClusterFactory {
void setConf(Configuration conf);
Cluster getSrcCluster() throws ConfigurationException;
Cluster getDestCluster() throws ConfigurationException;
DirectoryCopier getDirectoryCopier() throws ConfigurationException;
}
| 9,496 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/configuration/Cluster.java | package com.airbnb.reair.incremental.configuration;
import com.airbnb.reair.common.HiveMetastoreClient;
import com.airbnb.reair.common.HiveMetastoreException;
import org.apache.hadoop.fs.Path;
/**
* Encapsulates information about a cluster - generally a HDFS, MR, and a Hive metastore that are
* considered as a unit.
*/
public interface Cluster {
HiveMetastoreClient getMetastoreClient() throws HiveMetastoreException;
Path getFsRoot();
Path getTmpDir();
String getName();
}
| 9,497 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/configuration/ObjectConflictHandler.java | package com.airbnb.reair.incremental.configuration;
import com.airbnb.reair.common.HiveMetastoreClient;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.common.HiveParameterKeys;
import com.airbnb.reair.incremental.ReplicationUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import java.util.Optional;
/**
* Handles cases when there is an existing table or partition on the destination cluster. This class
* can later be configured to be user-defined.
*/
public class ObjectConflictHandler implements Configurable {
private static final Log LOG = LogFactory.getLog(ObjectConflictHandler.class);
private Optional<Configuration> conf;
public void setConf(Configuration conf) {
this.conf = Optional.ofNullable(conf);
}
public Configuration getConf() {
return conf.orElse(null);
}
/**
* Handle a conflict on the destination cluster when a table with the same DB and name already
* exists. If the conflict was successfully handled so that the caller can go ahead with
* copying/overwriting the table, this will return true.
*
* @param srcCluster source cluster
* @param destCluster destination cluster
* @param srcTable source table
* @param existingDestTable the Hive Thift table object corresponding to the conflicting object on
* the destination cluster
* @return whether or not the conflict was resolved and the table can be copied
*
* @throws HiveMetastoreException if there an error making a metastore call
*/
public boolean handleCopyConflict(
Cluster srcCluster,
Cluster destCluster,
Table srcTable,
Table existingDestTable) throws HiveMetastoreException {
HiveObjectSpec spec =
new HiveObjectSpec(existingDestTable.getDbName(), existingDestTable.getTableName());
if (existingDestTable.getParameters().get(HiveParameterKeys.SRC_CLUSTER) != null
&& !existingDestTable.getParameters().get(HiveParameterKeys.SRC_CLUSTER)
.equals(srcCluster.getName())) {
LOG.warn("Table " + spec + " exists on destination, and it's "
+ "missing tags that indicate that it was replicated.");
// This might indicate that someone created a table with the same
// name on the destination cluster. Instead of dropping and
// overwriting, a rename can be done here to save the table with a
// *_conflict name.
}
// If the partitioning keys don't match, then it will have to be
// dropped.
if (!srcTable.getPartitionKeys().equals(existingDestTable.getPartitionKeys())) {
// Table exists on destination, but it's partitioned. It'll have to
// be dropped since Hive doesn't support changing of partition
// columns. Instead of dropping the table, the table on the dest
// cluster could be renamed to something else for further
// inspection.
LOG.warn(String.format(
"For %s, there is a mismatch in the " + "partitioning keys. src: %s dest: %s", spec,
srcTable.getPartitionKeys(), existingDestTable.getPartitionKeys()));
boolean dropData = !locationOnS3(existingDestTable.getSd());
LOG.warn("Not dropping data at location " + ReplicationUtils.getLocation(existingDestTable));
HiveMetastoreClient destMs = destCluster.getMetastoreClient();
LOG.debug(String.format("Dropping %s on destination (delete " + "data: %s)", spec, dropData));
destMs.dropTable(spec.getDbName(), spec.getTableName(), dropData);
LOG.debug("Dropped " + spec);
}
return true;
}
/**
* Handle a conflict on the destination cluster when a table with the same DB and name exists
* already. If the conflict was successfully handled so that the caller can go ahead with
* copying the table, this will return true.
*
* @param srcCluster source cluster
* @param destCluster destination cluster
* @param srcPartition source partition
* @param existingDestPartition the Hive Thift table partition corresponding to the conflicting
* object on the destination cluster
* @return whether or not conflict was resolved and the partition can be copied
*
* @throws HiveMetastoreException if there an error making a metastore call
*/
public boolean handleCopyConflict(
Cluster srcCluster,
Cluster destCluster,
Partition srcPartition,
Partition existingDestPartition) {
// Partitions can be usually overwritten without issues
return true;
}
private boolean locationOnS3(StorageDescriptor sd) {
String location = sd.getLocation();
return location != null && (location.startsWith("s3n") || location.startsWith("s3a"));
}
}
| 9,498 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/configuration/ConfiguredClusterFactory.java | package com.airbnb.reair.incremental.configuration;
import com.airbnb.reair.incremental.DirectoryCopier;
import com.airbnb.reair.incremental.deploy.ConfigurationKeys;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Optional;
public class ConfiguredClusterFactory implements ClusterFactory {
private Optional<Configuration> optionalConf = Optional.empty();
public void setConf(Configuration conf) {
this.optionalConf = Optional.of(conf);
}
private static URI makeUri(String thriftUri) throws ConfigurationException {
try {
URI uri = new URI(thriftUri);
if (uri.getPort() <= 0) {
throw new ConfigurationException("No port specified in "
+ thriftUri);
}
if (!"thrift".equals(uri.getScheme())) {
throw new ConfigurationException("Not a thrift URI; "
+ thriftUri);
}
return uri;
} catch (URISyntaxException e) {
throw new ConfigurationException(e);
}
}
@Override
public Cluster getDestCluster() throws ConfigurationException {
if (!optionalConf.isPresent()) {
throw new ConfigurationException("Configuration not set!");
}
Configuration conf = optionalConf.get();
String destClusterName = conf.get(
ConfigurationKeys.DEST_CLUSTER_NAME);
String destMetastoreUrlString = conf.get(
ConfigurationKeys.DEST_CLUSTER_METASTORE_URL);
URI destMetastoreUrl = makeUri(destMetastoreUrlString);
String destHdfsRoot = conf.get(
ConfigurationKeys.DEST_HDFS_ROOT);
String destHdfsTmp = conf.get(
ConfigurationKeys.DEST_HDFS_TMP);
return new HardCodedCluster(
destClusterName,
destMetastoreUrl.getHost(),
destMetastoreUrl.getPort(),
null,
null,
new Path(destHdfsRoot),
new Path(destHdfsTmp));
}
@Override
public Cluster getSrcCluster() throws ConfigurationException {
if (!optionalConf.isPresent()) {
throw new ConfigurationException("Configuration not set!");
}
Configuration conf = optionalConf.get();
// Create the source cluster object
String srcClusterName = conf.get(
ConfigurationKeys.SRC_CLUSTER_NAME);
String srcMetastoreUrlString = conf.get(
ConfigurationKeys.SRC_CLUSTER_METASTORE_URL);
URI srcMetastoreUrl = makeUri(srcMetastoreUrlString);
String srcHdfsRoot = conf.get(
ConfigurationKeys.SRC_HDFS_ROOT);
String srcHdfsTmp = conf.get(
ConfigurationKeys.SRC_HDFS_TMP);
return new HardCodedCluster(
srcClusterName,
srcMetastoreUrl.getHost(),
srcMetastoreUrl.getPort(),
null,
null,
new Path(srcHdfsRoot),
new Path(srcHdfsTmp));
}
@Override
public DirectoryCopier getDirectoryCopier() throws ConfigurationException {
if (!optionalConf.isPresent()) {
throw new ConfigurationException("Configuration not set!");
}
Configuration conf = optionalConf.get();
String destHdfsTmp = conf.get(
ConfigurationKeys.DEST_HDFS_TMP);
return new DirectoryCopier(conf,
new Path(destHdfsTmp),
conf.getBoolean(ConfigurationKeys.SYNC_MODIFIED_TIMES_FOR_FILE_COPY, true));
}
}
| 9,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.