index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/deploy/ConfigurationKeys.java | package com.airbnb.reair.incremental.deploy;
/**
* Keys used in the configuration for deploying the replication server.
*/
public class ConfigurationKeys {
// JDBC URL to the DB containing the audit log table
public static final String AUDIT_LOG_JDBC_URL = "airbnb.reair.audit_log.db.jdbc_url";
// User for the audit log DB
public static final String AUDIT_LOG_DB_USER = "airbnb.reair.audit_log.db.username";
// Password for the audit log DB
public static final String AUDIT_LOG_DB_PASSWORD = "airbnb.reair.audit_log.db.password";
// Name of the audit log table
public static final String AUDIT_LOG_DB_TABLE = "airbnb.reair.audit_log.db.table_name";
// Name of the table containing serialized thrift objects from the audit log
public static final String AUDIT_LOG_OBJECTS_DB_TABLE =
"airbnb.reair.audit_log.objects.db.table_name";
// Name of the table containing mapred job stats
public static final String AUDIT_LOG_MAPRED_STATS_DB_TABLE =
"airbnb.reair.audit_log.mapred_stats.db.table_name";
// Affects how many AuditLogEntries are read and processed at once, default 128
public static final String AUDIT_LOG_PROCESSING_BATCH_SIZE =
"airbnb.reair.audit_log.batch_size";
// JDB URL to the DB containing the replication state tables
public static final String STATE_JDBC_URL = "airbnb.reair.state.db.jdbc_url";
// User for the state DB
public static final String STATE_DB_USER = "airbnb.reair.state.db.username";
// Password for the state DB
public static final String STATE_DB_PASSWORD = "airbnb.reair.state.db.password";
// Name of the table containing replication job state
public static final String STATE_DB_TABLE = "airbnb.reair.state.db.table_name";
// Name of the table containing key/value pairs
public static final String STATE_KV_DB_TABLE = "airbnb.reair.state.kv.db.table_name";
// When running queries to the DB, the number of times to retry if there's an error
public static final String DB_QUERY_RETRIES =
"airbnb.reair.db.query.retries";
// monitoring via statsd settings
public static final String STATSD_ENABLED = "airbnb.reair.statsd.enabled";
// default: localhost
public static final String STATSD_HOST = "airbnb.reair.statsd.host";
// default: 8125
public static final String STATSD_PORT = "airbnb.reair.statsd.port";
// default: reair
public static final String STATSD_PREFIX = "airbnb.reair.statsd.prefix";
// ReplicationJob delay reported thresholds (seconds) (comma-separated)
public static final String REPLICATION_JOB_METRIC_THRESHOLDS =
"airbnb.reair.replication_job.threshold_seconds";
// Frequency at which to report stats in the ReplicationJobRegistry
public static final String REPLICATION_JOB_REGISTRY_REPORT_INTERVAL_SEC =
"airbnb.reair.replication.report.threshold_seconds";
// Name to use for the source cluster
public static final String SRC_CLUSTER_NAME = "airbnb.reair.clusters.src.name";
// URL to the source cluster's metastore Thrift server.
public static final String SRC_CLUSTER_METASTORE_URL = "airbnb.reair.clusters.src.metastore.url";
// The root of the HDFS directory for the source cluster
public static final String SRC_HDFS_ROOT = "airbnb.reair.clusters.src.hdfs.root";
// The root of the temporary directory for storing temporary files on the source cluster
public static final String SRC_HDFS_TMP = "airbnb.reair.clusters.src.hdfs.tmp";
// Name to use for the destination cluster
public static final String DEST_CLUSTER_NAME = "airbnb.reair.clusters.dest.name";
// URL to the destination cluster's metastore Thrift server.
public static final String DEST_CLUSTER_METASTORE_URL =
"airbnb.reair.clusters.dest.metastore.url";
// The root of the HDFS directory for the destination cluster
public static final String DEST_HDFS_ROOT = "airbnb.reair.clusters.dest.hdfs.root";
// The root of the temporary directory for storing temporary files on the destination cluster
public static final String DEST_HDFS_TMP = "airbnb.reair.clusters.dest.hdfs.tmp";
// Class to use for filtering out entries from the audit log
public static final String OBJECT_FILTER_CLASS = "airbnb.reair.object.filter";
// Number of threads to use for copying objects in the incremental replication server
public static final String WORKER_THREADS = "airbnb.reair.worker.threads";
// Maximum number of jobs to keep in memory in the incremental replication server
public static final String MAX_JOBS_IN_MEMORY = "airbnb.reair.jobs.in_memory_count";
// The port for the Thrift server to listen on
public static final String THRIFT_SERVER_PORT = "airbnb.reair.thrift.port";
// When copying tables or partitions using an MR job, fail the job and retry if the job takes
// longer than this many seconds.
public static final String COPY_JOB_TIMEOUT_SECONDS = "airbnb.reair.copy.timeout.seconds";
// Whether to use a size based timeout for copy jobs
public static final String COPY_JOB_DYNAMIC_TIMEOUT_ENABLED =
"airbnb.reair.copy.timeout.dynamic.enabled";
public static final String COPY_JOB_DYNAMIC_TIMEOUT_MS_PER_GB_PER_MAPPER =
"airbnb.reair.copy.timeout.dynamic.ms_per_gb_per_mapper";
public static final String COPY_JOB_DYNAMIC_TIMEOUT_BASE =
"airbnb.reair.copy.timeout.dynamic.base.ms";
public static final String COPY_JOB_DYNAMIC_TIMEOUT_MAX =
"airbnb.reair.copy.timeout.dynamic.max.ms";
// If a replication job fails, the number of times to retry the job.
public static final String JOB_RETRIES = "airbnb.reair.job.retries";
// After a copy, whether to set / check that modified times for the copied files match between
// the source and the destination. Set to false for file systems that don't support changes
// to the modified time.
public static final String SYNC_MODIFIED_TIMES_FOR_FILE_COPY =
"airbnb.reair.copy.sync_modified_times";
// Following are settings pertinent to batch replication only.
// Location to store the output of batch replication jobs
public static final String BATCH_JOB_OUTPUT_DIR = "airbnb.reair.clusters.batch.output.dir";
// Location to store the input for replication jobs
public static final String BATCH_JOB_INPUT_LIST = "airbnb.reair.clusters.batch.input";
// A list of regex'es to use to exclude tables in batch replication
public static final String BATCH_JOB_METASTORE_BLACKLIST =
"airbnb.reair.clusters.batch.metastore.blacklist";
// Name of the class for creating the cluster object in batch replication. Mainly for testing.
public static final String BATCH_JOB_CLUSTER_FACTORY_CLASS =
"airbnb.reair.clusters.batch.cluster.factory.class";
// Whether to overwrite newer tables/partitions on the destination. Default is true.
public static final String BATCH_JOB_OVERWRITE_NEWER =
"airbnb.reair.batch.overwrite.newer";
// The number of reducers to use for jobs where reducers perform metastore operations
public static final String BATCH_JOB_METASTORE_PARALLELISM =
"airbnb.reair.batch.metastore.parallelism";
// The number of reducers to use for jobs where reducers perform file copies
public static final String BATCH_JOB_COPY_PARALLELISM =
"airbnb.reair.batch.copy.parallelism";
// Whether to try to compare checksums to validate file copies when possible
public static final String BATCH_JOB_VERIFY_COPY_CHECKSUM =
"airbnb.reair.batch.copy.checksum.verify";
}
| 9,500 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/deploy/ReplicationLauncher.java | package com.airbnb.reair.incremental.deploy;
import com.airbnb.reair.db.DbConnectionFactory;
import com.airbnb.reair.db.DbConnectionWatchdog;
import com.airbnb.reair.db.DbKeyValueStore;
import com.airbnb.reair.db.StaticDbConnectionFactory;
import com.airbnb.reair.incremental.ReplicationServer;
import com.airbnb.reair.incremental.StateUpdateException;
import com.airbnb.reair.incremental.auditlog.AuditLogEntryException;
import com.airbnb.reair.incremental.auditlog.AuditLogReader;
import com.airbnb.reair.incremental.configuration.Cluster;
import com.airbnb.reair.incremental.configuration.ClusterFactory;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.incremental.configuration.ConfiguredClusterFactory;
import com.airbnb.reair.incremental.db.PersistedJobInfoStore;
import com.airbnb.reair.incremental.filter.ReplicationFilter;
import com.airbnb.reair.incremental.thrift.TReplicationService;
import com.timgroup.statsd.NoOpStatsDClient;
import com.timgroup.statsd.NonBlockingStatsDClient;
import com.timgroup.statsd.StatsDClient;
import org.apache.commons.cli.BasicParser;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.thrift.server.TServer;
import org.apache.thrift.server.TSimpleServer;
import org.apache.thrift.transport.TServerSocket;
import org.apache.thrift.transport.TServerTransport;
import java.io.IOException;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
public class ReplicationLauncher {
private static final Log LOG = LogFactory.getLog(
ReplicationLauncher.class);
/**
* Launches the replication server process using the passed in configuration.
*
* @param conf configuration object
* @param startAfterAuditLogId instruct the server to start replicating entries after this ID
* @param resetState if there were jobs that were in progress last time the process exited, do not
* resume them and instead mark them as aborted
*
* @throws SQLException if there is an error accessing the DB
* @throws ConfigurationException if there is an error with the supplied configuration
* @throws IOException if there is an error communicating with services
*/
public static void launch(Configuration conf,
Optional<Long> startAfterAuditLogId,
boolean resetState)
throws AuditLogEntryException, ConfigurationException, IOException, StateUpdateException,
SQLException {
// Create statsd registry
boolean statsDEnabled = conf.getBoolean(ConfigurationKeys.STATSD_ENABLED, false);
StatsDClient statsDClient;
if (statsDEnabled) {
statsDClient = new NonBlockingStatsDClient(
conf.get(ConfigurationKeys.STATSD_PREFIX, "reair"),
conf.get(ConfigurationKeys.STATSD_HOST, "localhost"),
conf.getInt(ConfigurationKeys.STATSD_PORT, 8125));
} else {
statsDClient = new NoOpStatsDClient();
}
// Create the audit log reader
String auditLogJdbcUrl = conf.get(
ConfigurationKeys.AUDIT_LOG_JDBC_URL);
String auditLogDbUser = conf.get(
ConfigurationKeys.AUDIT_LOG_DB_USER);
String auditLogDbPassword = conf.get(
ConfigurationKeys.AUDIT_LOG_DB_PASSWORD);
DbConnectionFactory auditLogConnectionFactory =
new StaticDbConnectionFactory(
auditLogJdbcUrl,
auditLogDbUser,
auditLogDbPassword);
String auditLogTableName = conf.get(
ConfigurationKeys.AUDIT_LOG_DB_TABLE);
String auditLogObjectsTableName = conf.get(
ConfigurationKeys.AUDIT_LOG_OBJECTS_DB_TABLE);
String auditLogMapRedStatsTableName = conf.get(
ConfigurationKeys.AUDIT_LOG_MAPRED_STATS_DB_TABLE);
final AuditLogReader auditLogReader = new AuditLogReader(
conf,
auditLogConnectionFactory,
auditLogTableName,
auditLogObjectsTableName,
auditLogMapRedStatsTableName,
0);
// Create the connection to the key value store in the DB
String stateJdbcUrl = conf.get(
ConfigurationKeys.STATE_JDBC_URL);
String stateDbUser = conf.get(
ConfigurationKeys.STATE_DB_USER);
String stateDbPassword = conf.get(
ConfigurationKeys.STATE_DB_PASSWORD);
String keyValueTableName = conf.get(
ConfigurationKeys.STATE_KV_DB_TABLE);
DbConnectionFactory stateConnectionFactory =
new StaticDbConnectionFactory(
stateJdbcUrl,
stateDbUser,
stateDbPassword);
final DbKeyValueStore dbKeyValueStore = new DbKeyValueStore(
stateConnectionFactory,
keyValueTableName);
String stateTableName = conf.get(
ConfigurationKeys.STATE_DB_TABLE);
// Create the store for replication job info
PersistedJobInfoStore persistedJobInfoStore =
new PersistedJobInfoStore(
conf,
stateConnectionFactory,
stateTableName);
if (resetState) {
LOG.info("Resetting state by aborting non-completed jobs");
persistedJobInfoStore.abortRunnableFromDb();
}
ClusterFactory clusterFactory = new ConfiguredClusterFactory();
clusterFactory.setConf(conf);
final Cluster srcCluster = clusterFactory.getSrcCluster();
final Cluster destCluster = clusterFactory.getDestCluster();
String objectFilterClassNames = conf.get(
ConfigurationKeys.OBJECT_FILTER_CLASS);
final List<ReplicationFilter> replicationFilters = new ArrayList<>();
String[] classNames = objectFilterClassNames.split(",");
for (String objectFilterClassName : classNames) {
objectFilterClassName = objectFilterClassName.trim().replaceAll("\\r|\\n", "");
// Instantiate the class
Object obj = null;
try {
Class<?> clazz = Class.forName(objectFilterClassName);
obj = clazz.newInstance();
if (!(obj instanceof ReplicationFilter)) {
throw new ConfigurationException(String.format(
"%s is not of type %s",
obj.getClass().getName(),
ReplicationFilter.class.getName()));
}
} catch (ClassNotFoundException | IllegalAccessException | InstantiationException e) {
throw new ConfigurationException(e);
}
ReplicationFilter filter = (ReplicationFilter) obj;
filter.setConf(conf);
replicationFilters.add(filter);
}
int numWorkers = conf.getInt(
ConfigurationKeys.WORKER_THREADS,
1);
int maxJobsInMemory = conf.getInt(
ConfigurationKeys.MAX_JOBS_IN_MEMORY,
100);
final int thriftServerPort = conf.getInt(
ConfigurationKeys.THRIFT_SERVER_PORT,
9996);
LOG.debug("Running replication server");
ReplicationServer replicationServer = new ReplicationServer(
conf,
srcCluster,
destCluster,
auditLogReader,
dbKeyValueStore,
persistedJobInfoStore,
replicationFilters,
clusterFactory.getDirectoryCopier(),
statsDClient,
numWorkers,
maxJobsInMemory,
startAfterAuditLogId);
// Start thrift server
final TReplicationService.Processor processor =
new TReplicationService.Processor<TReplicationService.Iface>(
replicationServer);
Runnable serverRunnable = new Runnable() {
public void run() {
try {
TServerTransport serverTransport = new TServerSocket(
thriftServerPort);
TServer server = new TSimpleServer(
new TServer.Args(
serverTransport).processor(processor));
LOG.debug("Starting the thrift server on port " + thriftServerPort);
server.serve();
} catch (Exception e) {
LOG.error("Thrift server died!", e);
}
}
};
Thread serverThread = new Thread(serverRunnable);
serverThread.start();
// Start DB connection watchdog - kills the server if a DB connection
// can't be made.
DbConnectionWatchdog dbConnectionWatchdog = new DbConnectionWatchdog(
stateConnectionFactory);
dbConnectionWatchdog.start();
// Start replicating entries
try {
replicationServer.run(Long.MAX_VALUE);
} finally {
LOG.debug("Replication server stopped running");
}
}
/**
* Launcher entry point.
*
* @param argv array of string arguments
*/
@SuppressWarnings("static-access")
public static void main(String[] argv)
throws AuditLogEntryException, ConfigurationException, IOException, ParseException,
StateUpdateException, SQLException {
Options options = new Options();
options.addOption(OptionBuilder.withLongOpt("config-files")
.withDescription("Comma separated list of paths to "
+ "configuration files")
.hasArg()
.withArgName("PATH")
.create());
options.addOption(OptionBuilder.withLongOpt("start-after-id")
.withDescription("Start processing entries from the audit "
+ "log after this ID")
.hasArg()
.withArgName("ID")
.create());
CommandLineParser parser = new BasicParser();
CommandLine cl = parser.parse(options, argv);
String configPaths = null;
Optional<Long> startAfterId = Optional.empty();
boolean resetState = false;
if (cl.hasOption("config-files")) {
configPaths = cl.getOptionValue("config-files");
LOG.info("configPaths=" + configPaths);
}
if (cl.hasOption("start-after-id")) {
startAfterId = Optional.of(
Long.parseLong(cl.getOptionValue("start-after-id")));
LOG.info("startAfterId=" + startAfterId);
resetState = true;
}
// Threads shouldn't exit with an exception - terminate to facilitate debugging.
Thread.setDefaultUncaughtExceptionHandler((Thread thread, Throwable throwable) -> {
LOG.error(String.format("Exiting due to uncaught exception from thread %s!", thread),
throwable);
System.exit(-1);
});
Configuration conf = new Configuration();
if (configPaths != null) {
for (String configPath : configPaths.split(",")) {
conf.addResource(new Path(configPath));
}
}
try {
launch(conf, startAfterId, resetState);
} catch (Exception e) {
LOG.fatal("Got an exception!", e);
throw e;
}
}
}
| 9,501 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/deploy/HiveCopy.java | package com.airbnb.reair.incremental.deploy;
import com.airbnb.reair.common.ArgumentException;
import com.airbnb.reair.common.CliUtils;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.common.ThriftHiveMetastoreClient;
import com.airbnb.reair.incremental.DirectoryCopier;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.RunInfo;
import com.airbnb.reair.incremental.configuration.DestinationObjectFactory;
import com.airbnb.reair.incremental.configuration.HardCodedCluster;
import com.airbnb.reair.incremental.configuration.ObjectConflictHandler;
import com.airbnb.reair.incremental.primitives.CopyPartitionTask;
import com.airbnb.reair.incremental.primitives.CopyPartitionedTableTask;
import com.airbnb.reair.incremental.primitives.CopyUnpartitionedTableTask;
import org.apache.commons.cli.BasicParser;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import java.util.Optional;
public class HiveCopy {
private static final Log LOG = LogFactory.getLog(HiveCopy.class);
/**
* Main entry for copy utility. Warning suppression needed for the OptionBuilder API
*
* @param args array of command line arguments
* @return 0 if copy was successful, non-zero otherwise
*
* @throws Exception if there is an error with the copy
*/
@SuppressWarnings("static-access")
public static int main(String[] args) throws Exception {
Options options = new Options();
options.addOption(OptionBuilder.withLongOpt("config-file")
.withDescription("Path to the XML configuration file").hasArg().withArgName("PATH")
.create());
options.addOption(OptionBuilder.withLongOpt("op")
.withDescription("name of operation to perform").hasArg().withArgName("OP").create());
options.addOption(OptionBuilder.withLongOpt("db")
.withDescription(
"Hive DB where the table or partition that " + "you want to replicate resides")
.hasArg().withArgName("DB").create());
options.addOption(OptionBuilder.withLongOpt("table")
.withDescription("Hive table to replicate or if a partition "
+ "is specified, the source table for the partition")
.hasArg().withArgName("TABLE").create());
options.addOption(OptionBuilder.withLongOpt("partition")
.withDescription("Hive partition to replicate").hasArg().withArgName("PARTITION").create());
options.addOption(OptionBuilder.withLongOpt("partition")
.withDescription("Hive partition to replicate").hasArg().withArgName("PARTITION").create());
options.addOption(OptionBuilder.withLongOpt("help").withDescription("Help message").create());
CommandLineParser parser = new BasicParser();
CommandLine cl = parser.parse(options, args);
String configPath = null;
String dbName = null;
String tableName = null;
String partitionName = null;
String op = null;
if (cl.hasOption("help")) {
CliUtils.printHelp("<command>", options);
}
if (cl.hasOption("config-file")) {
configPath = cl.getOptionValue("config-file");
LOG.info("config-file=" + configPath);
}
if (cl.hasOption("db")) {
dbName = cl.getOptionValue("db");
LOG.info("db=" + dbName);
}
if (cl.hasOption("table")) {
tableName = cl.getOptionValue("table");
LOG.info("table=" + tableName);
}
if (cl.hasOption("partition")) {
partitionName = cl.getOptionValue("partition");
LOG.info("partition=" + partitionName);
}
if (cl.hasOption("op")) {
op = cl.getOptionValue("op");
LOG.info("op=" + op);
}
if (configPath == null) {
throw new ArgumentException("config path not specified");
}
if (dbName == null) {
throw new ArgumentException("db was not specified!");
}
if (tableName == null) {
throw new ArgumentException("table was not specified!");
}
Configuration conf = new Configuration();
conf.addResource(new Path(configPath));
String srcName = conf.get("airbnb.hive.cluster.src.name");
String srcMetastoreHost = conf.get("airbnb.hive.cluster.src.metastore.host");
int srcMetastorePort = Integer.parseInt(conf.get("airbnb.hive.cluster.src.metastore.port"));
Path srcHdfsRoot = new Path(conf.get("airbnb.hive.cluster.src.hdfs.root"));
Path srcTmpDir = new Path(conf.get("airbnb.hive.cluster.src.hdfs.tmp.dir"));
String destName = conf.get("airbnb.hive.cluster.dest.name");
String destMetastoreHost = conf.get("airbnb.hive.cluster.dest.metastore.host");
int destMetastorePort = Integer.parseInt(conf.get("airbnb.hive.cluster.dest.metastore.port"));
Path destHdfsRoot = new Path(conf.get("airbnb.hive.cluster.dest.hdfs.root"));
Path destTmpDir = new Path(conf.get("airbnb.hive.cluster.dest.hdfs.tmp.dir"));
HiveObjectSpec spec = new HiveObjectSpec(dbName, tableName, partitionName);
LOG.info("srcName=" + srcName);
LOG.info("srcMetastoreHost=" + srcMetastoreHost);
LOG.info("srcMetastorePort=" + srcMetastorePort);
LOG.info("srcHdfsRoot=" + srcHdfsRoot);
LOG.info("srcTmpDir=" + srcTmpDir);
LOG.info("destName=" + destName);
LOG.info("destMetastoreHost=" + destMetastoreHost);
LOG.info("destMetastorePort=" + destMetastorePort);
LOG.info("destHdfsRoot=" + destHdfsRoot);
LOG.info("destTmpDir=" + destTmpDir);
LOG.info("pool.name=" + conf.get("pool.name"));
LOG.info("spec=" + spec);
HardCodedCluster srcCluster = new HardCodedCluster(srcName, srcMetastoreHost, srcMetastorePort,
null, null, srcHdfsRoot, srcTmpDir);
HardCodedCluster destCluster = new HardCodedCluster(destName, destMetastoreHost,
destMetastorePort, null, null, destHdfsRoot, destTmpDir);
DirectoryCopier directoryCopier = new DirectoryCopier(conf, destTmpDir, true);
ObjectConflictHandler conflictHandler = new ObjectConflictHandler();
conflictHandler.setConf(conf);
DestinationObjectFactory destinationObjectFactory = new DestinationObjectFactory();
destinationObjectFactory.setConf(conf);
if ("copy-unpartitioned-table".equals(op)) {
LOG.info("Copying an unpartitioned table");
ThriftHiveMetastoreClient ms = srcCluster.getMetastoreClient();
Table srcTable = ms.getTable(spec.getDbName(), spec.getTableName());
CopyUnpartitionedTableTask job = new CopyUnpartitionedTableTask(conf,
destinationObjectFactory, conflictHandler, srcCluster, destCluster, spec,
ReplicationUtils.getLocation(srcTable), directoryCopier, true);
if (job.runTask().getRunStatus() == RunInfo.RunStatus.SUCCESSFUL) {
return 0;
} else {
return -1;
}
} else if ("copy-partitioned-table".equals(op)) {
LOG.info("Copying a partitioned table");
ThriftHiveMetastoreClient ms = srcCluster.getMetastoreClient();
Table srcTable = ms.getTable(spec.getDbName(), spec.getTableName());
CopyPartitionedTableTask job = new CopyPartitionedTableTask(conf, destinationObjectFactory,
conflictHandler, srcCluster, destCluster, spec, ReplicationUtils.getLocation(srcTable));
if (job.runTask().getRunStatus() == RunInfo.RunStatus.SUCCESSFUL) {
return 0;
} else {
return -1;
}
} else if (op.equals("copy-partition")) {
LOG.info("Copying a partition");
ThriftHiveMetastoreClient ms = srcCluster.getMetastoreClient();
Partition srcPartition =
ms.getPartition(spec.getDbName(), spec.getTableName(), spec.getPartitionName());
CopyPartitionTask job = new CopyPartitionTask(conf, destinationObjectFactory, conflictHandler,
srcCluster, destCluster, spec, ReplicationUtils.getLocation(srcPartition),
Optional.<Path>empty(), new DirectoryCopier(conf, srcCluster.getTmpDir(), true), true);
if (job.runTask().getRunStatus() == RunInfo.RunStatus.SUCCESSFUL) {
return 0;
} else {
return -1;
}
} else {
throw new RuntimeException("Unhandled op " + op);
}
}
}
| 9,502 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/primitives/MetadataAction.java | package com.airbnb.reair.incremental.primitives;
/**
* Enumeration to help simplify logic for running metadata actions in the replication tasks.
*/
public enum MetadataAction {
// No operation
NOOP,
// Create new metadata
CREATE,
// Alter existing metadata
ALTER
}
| 9,503 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/primitives/DropPartitionTask.java | package com.airbnb.reair.incremental.primitives;
import com.airbnb.reair.common.HiveMetastoreClient;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.common.HiveParameterKeys;
import com.airbnb.reair.incremental.RunInfo;
import com.airbnb.reair.incremental.configuration.Cluster;
import com.airbnb.reair.multiprocessing.Lock;
import com.airbnb.reair.multiprocessing.LockSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.metastore.api.Partition;
import java.util.Optional;
/**
* Task that drops a partition. The expected modified time for the partition is used like a hash to
* ensure that the right partition is dropped.
*/
public class DropPartitionTask implements ReplicationTask {
private static final Log LOG = LogFactory.getLog(DropPartitionTask.class);
private Cluster srcCluster;
private Cluster destCluster;
private HiveObjectSpec spec;
private Optional<String> srcTldt;
/**
* Constructor for a task that drops a partition.
*
* @param srcCluster source cluster
* @param destCluster destination cluster
* @param spec specification for the Hive table to drop
* @param srcTldt The expected modified time for the table to drop. This should be the
* transient_lastDdlTime value in the parameters field of the Thrift object. If the
* time does not match, the task will not drop the table.
*/
public DropPartitionTask(
Cluster srcCluster,
Cluster destCluster,
HiveObjectSpec spec,
Optional<String> srcTldt) {
this.srcCluster = srcCluster;
this.destCluster = destCluster;
this.srcTldt = srcTldt;
this.spec = spec;
}
@Override
public RunInfo runTask() throws HiveMetastoreException {
LOG.debug("Looking to drop: " + spec);
LOG.debug("Source object TLDT is: " + srcTldt);
if (!srcTldt.isPresent()) {
LOG.error("For safety, failing drop job since source object " + " TLDT is missing!");
return new RunInfo(RunInfo.RunStatus.NOT_COMPLETABLE, 0);
}
String expectedTldt = srcTldt.get();
HiveMetastoreClient ms = destCluster.getMetastoreClient();
Partition destPartition =
ms.getPartition(spec.getDbName(), spec.getTableName(), spec.getPartitionName());
if (destPartition == null) {
LOG.warn("Missing " + spec + " on destination, so can't drop!");
return new RunInfo(RunInfo.RunStatus.NOT_COMPLETABLE, 0);
}
LOG.debug("Destination object is: " + destPartition);
String destTldt = destPartition.getParameters().get(HiveParameterKeys.TLDT);
if (expectedTldt.equals(destTldt)) {
LOG.debug(String.format("Destination partition %s matches expected" + " TLDT (%s)", spec,
destTldt));
LOG.debug("Dropping " + spec);
ms.dropPartition(spec.getDbName(), spec.getTableName(), spec.getPartitionName(), true);
LOG.debug("Dropped " + spec);
return new RunInfo(RunInfo.RunStatus.SUCCESSFUL, 0);
} else {
LOG.debug(
String.format("Not dropping %s as source(%s) and " + "destination(%s) TLDT's don't match",
spec.toString(), srcTldt, destTldt));
return new RunInfo(RunInfo.RunStatus.NOT_COMPLETABLE, 0);
}
}
@Override
public LockSet getRequiredLocks() {
LockSet lockSet = new LockSet();
lockSet.add(new Lock(Lock.Type.SHARED, spec.getTableSpec().toString()));
lockSet.add(new Lock(Lock.Type.EXCLUSIVE, spec.toString()));
return lockSet;
}
}
| 9,504 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/primitives/CopyCompleteTableTask.java | package com.airbnb.reair.incremental.primitives;
import com.airbnb.reair.common.DistCpException;
import com.airbnb.reair.common.HiveMetastoreClient;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.common.HiveUtils;
import com.airbnb.reair.incremental.DirectoryCopier;
import com.airbnb.reair.incremental.RunInfo;
import com.airbnb.reair.incremental.configuration.Cluster;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.incremental.configuration.DestinationObjectFactory;
import com.airbnb.reair.incremental.configuration.ObjectConflictHandler;
import com.airbnb.reair.multiprocessing.Lock;
import com.airbnb.reair.multiprocessing.LockSet;
import com.airbnb.reair.multiprocessing.ParallelJobExecutor;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
/**
* Task that copies the entire table, include all the partitions if it's a partitioned table. To
* reduce the number of distcp jobs necessary, this task tries to copy a common parent directory.
* However, a better solution would be to use a copy tool that can copy multiple source and
* destination directories simultaneously.
*/
public class CopyCompleteTableTask implements ReplicationTask {
private static final Log LOG = LogFactory.getLog(CopyCompleteTableTask.class);
private Configuration conf;
private DestinationObjectFactory objectModifier;
private ObjectConflictHandler objectConflictHandler;
private Cluster srcCluster;
private Cluster destCluster;
private HiveObjectSpec spec;
private Optional<Path> tableLocation;
private ParallelJobExecutor copyPartitionsExecutor;
private DirectoryCopier directoryCopier;
/**
* Constructs a task for copying an entire table.
*
* @param conf configuration object
* @param objectFactory factory for creating objects for the destination cluster
* @param objectConflictHandler handler for addressing conflicting tables/partitions on the
* destination cluster
* @param srcCluster source cluster
* @param destCluster destination cluster
* @param spec the Hive table specification
* @param tableLocation the location of the table
* @param copyPartitionsExecutor an executor for copying the partitions of a table
* @param directoryCopier runs directory copies through MR jobs
*/
public CopyCompleteTableTask(
Configuration conf,
DestinationObjectFactory objectFactory,
ObjectConflictHandler objectConflictHandler,
Cluster srcCluster,
Cluster destCluster,
HiveObjectSpec spec,
Optional<Path> tableLocation,
ParallelJobExecutor copyPartitionsExecutor,
DirectoryCopier directoryCopier) {
this.conf = conf;
this.objectModifier = objectFactory;
this.objectConflictHandler = objectConflictHandler;
this.srcCluster = srcCluster;
this.destCluster = destCluster;
this.spec = spec;
this.tableLocation = tableLocation;
this.copyPartitionsExecutor = copyPartitionsExecutor;
this.directoryCopier = directoryCopier;
}
@Override
public RunInfo runTask()
throws ConfigurationException, DistCpException, HiveMetastoreException, IOException {
LOG.debug("Copying " + spec);
HiveMetastoreClient destMs = destCluster.getMetastoreClient();
HiveMetastoreClient srcMs = srcCluster.getMetastoreClient();
// Get a fresh copy of the metadata from the source Hive metastore
Table freshSrcTable = srcMs.getTable(spec.getDbName(), spec.getTableName());
if (freshSrcTable == null) {
LOG.warn("Source table " + spec + " doesn't exist, so not " + "copying");
return new RunInfo(RunInfo.RunStatus.NOT_COMPLETABLE, 0);
}
if (HiveUtils.isPartitioned(freshSrcTable)) {
LOG.debug("Source table " + spec + " is a partitioned table");
// Create a collection containing all the partitions that should
// be copied.
List<String> partitionNames = srcMs.getPartitionNames(spec.getDbName(), spec.getTableName());
Map<HiveObjectSpec, Partition> specToPartition = new HashMap<>();
for (String partitionName : partitionNames) {
Partition partition =
srcMs.getPartition(spec.getDbName(), spec.getTableName(), partitionName);
if (partition == null) {
throw new HiveMetastoreException(String.format("Partition %s does not exist!",
spec));
}
HiveObjectSpec partitionSpec =
new HiveObjectSpec(spec.getDbName(), spec.getTableName(), partitionName);
specToPartition.put(partitionSpec, partition);
}
Optional<Path> commonDirectory = Optional.empty();
if (specToPartition.size() > 0) {
// If there are partitions to copy, see if there a common
// parent directory to optimistically copy.
Optional<Path> foundCommonDir =
CopyPartitionsTask.findCommonDirectory(spec, specToPartition);
if (foundCommonDir.isPresent()
&& objectModifier.shouldCopyData(foundCommonDir.get().toString())) {
commonDirectory = foundCommonDir;
} else {
LOG.warn("Not copying common directory " + foundCommonDir);
}
}
CopyPartitionsTask job = new CopyPartitionsTask(conf, objectModifier, objectConflictHandler,
srcCluster, destCluster, spec, partitionNames, commonDirectory, copyPartitionsExecutor,
directoryCopier);
RunInfo copyPartitionsRunInfo = job.runTask();
if (copyPartitionsRunInfo.getRunStatus() != RunInfo.RunStatus.SUCCESSFUL
|| copyPartitionsRunInfo.getRunStatus() != RunInfo.RunStatus.NOT_COMPLETABLE) {
return copyPartitionsRunInfo;
}
CopyPartitionedTableTask copyTableTask = new CopyPartitionedTableTask(conf, objectModifier,
objectConflictHandler, srcCluster, destCluster, spec, commonDirectory);
RunInfo copyTableRunInfo = copyTableTask.runTask();
return new RunInfo(copyTableRunInfo.getRunStatus(),
copyPartitionsRunInfo.getBytesCopied() + copyTableRunInfo.getBytesCopied());
} else {
LOG.debug("Source table " + spec + " is an unpartitioned table");
CopyUnpartitionedTableTask copyJob =
new CopyUnpartitionedTableTask(conf, objectModifier, objectConflictHandler, srcCluster,
destCluster, spec, tableLocation, directoryCopier, true);
return copyJob.runTask();
}
}
@Override
public LockSet getRequiredLocks() {
LockSet lockSet = new LockSet();
lockSet.add(new Lock(Lock.Type.EXCLUSIVE, spec.toString()));
return lockSet;
}
}
| 9,505 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/primitives/CopyPartitionJob.java | package com.airbnb.reair.incremental.primitives;
import com.airbnb.reair.common.DistCpException;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.RunInfo;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.multiprocessing.Job;
import com.airbnb.reair.multiprocessing.LockSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.io.IOException;
public class CopyPartitionJob extends Job {
private static final Log LOG = LogFactory.getLog(CopyPartitionJob.class);
private CopyPartitionTask copyPartitionTask;
private CopyPartitionsCounter copyPartitionsCounter;
public CopyPartitionJob(
CopyPartitionTask copyPartitionTask,
CopyPartitionsCounter copyPartitionsCounter) {
this.copyPartitionTask = copyPartitionTask;
this.copyPartitionsCounter = copyPartitionsCounter;
}
@Override
public int run() {
int attempt = 0;
while (true) {
try {
RunInfo runInfo = copyPartitionTask.runTask();
LOG.debug(String.format("Copy partition task %s finished " + "with status %s",
copyPartitionTask.getSpec(), runInfo.getRunStatus()));
switch (runInfo.getRunStatus()) {
case SUCCESSFUL:
case NOT_COMPLETABLE:
copyPartitionsCounter.incrementBytesCopied(runInfo.getBytesCopied());
copyPartitionsCounter.incrementCompletionCount();
return 0;
case FAILED:
return -1;
default:
throw new RuntimeException("State not handled: " + runInfo.getRunStatus());
}
} catch (HiveMetastoreException e) {
LOG.error("Got an exception - will retry", e);
} catch (DistCpException e) {
LOG.error("Got an exception - will retry", e);
} catch (IOException e) {
LOG.error("Got an exception - will retry", e);
} catch (ConfigurationException e) {
LOG.error("Got unrecoverable exception", e);
return -1;
}
LOG.error("Because " + copyPartitionTask.getSpec() + " was not successful, "
+ "it will be retried after sleeping.");
try {
ReplicationUtils.exponentialSleep(attempt);
} catch (InterruptedException e) {
LOG.warn("Got interrupted", e);
return 0;
}
attempt++;
}
}
@Override
public LockSet getRequiredLocks() {
return copyPartitionTask.getRequiredLocks();
}
@Override
public String toString() {
return "CopyPartitionJob{" + "spec=" + copyPartitionTask.getSpec() + '}';
}
}
| 9,506 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/primitives/CopyPartitionTask.java | package com.airbnb.reair.incremental.primitives;
import com.airbnb.reair.common.DistCpException;
import com.airbnb.reair.common.FsUtils;
import com.airbnb.reair.common.HiveMetastoreClient;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.incremental.DirectoryCopier;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.RunInfo;
import com.airbnb.reair.incremental.configuration.Cluster;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.incremental.configuration.DestinationObjectFactory;
import com.airbnb.reair.incremental.configuration.ObjectConflictHandler;
import com.airbnb.reair.incremental.deploy.ConfigurationKeys;
import com.airbnb.reair.multiprocessing.Lock;
import com.airbnb.reair.multiprocessing.LockSet;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import java.io.IOException;
import java.util.Arrays;
import java.util.Optional;
/**
* Task that copies a single Hive partition - both data and metadata.
*
* <p>Known issue: if multiple copy partition jobs are kicked off, and the partitioned table doesn't
* exist on the destination, then it's possible that multiple copy partition jobs try to create the
* same partitioned table.
*
* <p>This can result in a failure, but should be corrected on a retry.
*/
public class CopyPartitionTask implements ReplicationTask {
private static final Log LOG = LogFactory.getLog(CopyPartitionTask.class);
private Configuration conf;
private DestinationObjectFactory destObjectFactory;
private ObjectConflictHandler objectConflictHandler;
private Cluster srcCluster;
private Cluster destCluster;
private HiveObjectSpec spec;
private Optional<Path> partitionLocation;
private Optional<Path> optimisticCopyRoot;
private DirectoryCopier directoryCopier;
private boolean allowDataCopy;
/**
* Constructor for a task that copies a single Hive partition.
*
* @param conf configuration object
* @param destObjectFactory factory for creating objects for the destination cluster
* @param objectConflictHandler handler for addressing conflicting tables/partitions on the
* destination cluster
* @param srcCluster source cluster
* @param destCluster destination cluster
* @param spec specification for the Hive partition to copy
* @param partitionLocation the location for the partition, if applicable
* @param optimisticCopyRoot if data for this partitioned was copied in advance, the root
* directory where the data was copied to. For example, if the partition
* data was located in /a/b/c and /a was copied to /tmp/copy/a, then the
* copy root directory is /tmp/copy
* @param directoryCopier runs directory copies through MR jobs
* @param allowDataCopy Whether to copy data for this partition. If set to false, the task will
* check to see if the data exists already and if not, it will fail the task.
*/
public CopyPartitionTask(
Configuration conf,
DestinationObjectFactory destObjectFactory,
ObjectConflictHandler objectConflictHandler,
Cluster srcCluster,
Cluster destCluster,
HiveObjectSpec spec,
Optional<Path> partitionLocation,
Optional<Path> optimisticCopyRoot,
DirectoryCopier directoryCopier,
boolean allowDataCopy) {
this.conf = conf;
this.destObjectFactory = destObjectFactory;
this.objectConflictHandler = objectConflictHandler;
this.srcCluster = srcCluster;
this.destCluster = destCluster;
this.spec = spec;
this.partitionLocation = partitionLocation;
this.optimisticCopyRoot = optimisticCopyRoot;
this.directoryCopier = directoryCopier;
this.allowDataCopy = allowDataCopy;
}
@Override
public RunInfo runTask()
throws ConfigurationException, HiveMetastoreException, DistCpException, IOException {
LOG.debug("Copying " + spec);
HiveMetastoreClient destMs = destCluster.getMetastoreClient();
HiveMetastoreClient srcMs = srcCluster.getMetastoreClient();
Partition freshSrcPartition =
srcMs.getPartition(spec.getDbName(), spec.getTableName(), spec.getPartitionName());
if (freshSrcPartition == null) {
LOG.warn("Source partition " + spec + " does not exist, so not " + "copying");
return new RunInfo(RunInfo.RunStatus.NOT_COMPLETABLE, 0);
}
if (!conf.getBoolean(ConfigurationKeys.BATCH_JOB_OVERWRITE_NEWER, true)) {
Partition freshDestPartition =
destMs.getPartition(spec.getDbName(), spec.getTableName(), spec.getPartitionName());
if (ReplicationUtils.isSrcOlder(freshSrcPartition, freshDestPartition)) {
LOG.warn(String.format(
"Source %s (%s) is older than destination (%s), so not copying",
spec,
ReplicationUtils.getLastModifiedTime(freshSrcPartition),
ReplicationUtils.getLastModifiedTime(freshDestPartition)));
return new RunInfo(RunInfo.RunStatus.DEST_IS_NEWER, 0);
}
}
// Before copying a partition, first make sure that table is up to date
Table srcTable = srcMs.getTable(spec.getDbName(), spec.getTableName());
Table destTable = destMs.getTable(spec.getDbName(), spec.getTableName());
if (srcTable == null) {
LOG.warn("Source table " + spec + " doesn't exist, so not " + "copying");
return new RunInfo(RunInfo.RunStatus.NOT_COMPLETABLE, 0);
}
if (destTable == null || !ReplicationUtils.schemasMatch(srcTable, destTable)) {
LOG.warn("Copying source table over to the destination since "
+ "schemas do not match. (source: " + srcTable + " destination: " + destTable + ")");
CopyPartitionedTableTask copyTableJob =
new CopyPartitionedTableTask(conf, destObjectFactory, objectConflictHandler, srcCluster,
destCluster, spec.getTableSpec(), ReplicationUtils.getLocation(srcTable));
RunInfo status = copyTableJob.runTask();
if (status.getRunStatus() != RunInfo.RunStatus.SUCCESSFUL) {
LOG.error("Failed to copy " + spec.getTableSpec());
return new RunInfo(RunInfo.RunStatus.FAILED, 0);
}
}
Partition existingPartition =
destMs.getPartition(spec.getDbName(), spec.getTableName(), spec.getPartitionName());
Partition destPartition = destObjectFactory.createDestPartition(srcCluster, destCluster,
freshSrcPartition, existingPartition);
if (existingPartition != null) {
LOG.debug("Partition " + spec + " already exists!");
objectConflictHandler.handleCopyConflict(srcCluster, destCluster, freshSrcPartition,
existingPartition);
}
// Copy HDFS data
long bytesCopied = 0;
Optional<Path> srcPath = ReplicationUtils.getLocation(freshSrcPartition);
Optional<Path> destPath = ReplicationUtils.getLocation(destPartition);
// Try to copy data only if the location is defined and the location
// for the destination object is different. Usually, the location will
// be different as it will be situated on a different HDFS, but for
// S3 backed tables, the location may not change.
boolean needToCopy = false;
// TODO: An optimization can be made here to check for directories that
// already match and no longer need to be copied.
if (srcPath.isPresent() && !srcPath.equals(destPath)) {
// If a directory was copied optimistically, check if the data is
// there. If the data is there and it matches up with what is
// expected, then the directory can be moved into place.
if (optimisticCopyRoot.isPresent()) {
Path srcLocation = new Path(freshSrcPartition.getSd().getLocation());
// Assume that on the source, a table is stored at /a, and the
// partitions are stored at /a/ds=1 and /a/ds=1.
//
// For this optimization, the source directory (/u/a) was
// copied to a temporary location (/tmp/u/a). The optimistic
// copy root would be /tmp. To figure out the directory
// containing a partition's data, start with the optimistic
// copy root and add the relative path from / - e.g.
// /tmp + u/a/ds=1 = /tmp/u/a/ds=1
Path copiedPartitionDataLocation = new Path(optimisticCopyRoot.get(),
StringUtils.stripStart(srcLocation.toUri().getPath(), "/"));
if (directoryCopier.equalDirs(srcLocation, copiedPartitionDataLocation)) {
// In this case, the data is there and we can move the
// directory to the expected location.
Path destinationPath = new Path(destPartition.getSd().getLocation());
FsUtils.replaceDirectory(conf, copiedPartitionDataLocation, destinationPath);
} else {
needToCopy = !directoryCopier.equalDirs(srcPath.get(), destPath.get());
}
} else {
needToCopy = !directoryCopier.equalDirs(srcPath.get(), destPath.get());;
}
}
if (srcPath.isPresent() && destPath.isPresent() && needToCopy) {
if (!allowDataCopy) {
LOG.debug(String.format("Need to copy %s to %s, but data " + "copy is not allowed", srcPath,
destPath));
return new RunInfo(RunInfo.RunStatus.NOT_COMPLETABLE, 0);
}
if (!FsUtils.dirExists(conf, srcPath.get())) {
LOG.error("Source path " + srcPath + " does not exist!");
return new RunInfo(RunInfo.RunStatus.NOT_COMPLETABLE, 0);
}
bytesCopied = directoryCopier.copy(srcPath.get(), destPath.get(),
Arrays.asList(srcCluster.getName(), spec.getDbName(), spec.getTableName()));
}
// Figure out what to do with the table
MetadataAction action = MetadataAction.NOOP;
if (existingPartition == null) {
// If the partition doesn't exist on the destination, we need to
// create it.
action = MetadataAction.CREATE;
} else if (!ReplicationUtils.stripNonComparables(existingPartition)
.equals(ReplicationUtils.stripNonComparables(destPartition))) {
// The partition exists on the destination, but some of the metadata
// attributes are not as expected. This can be fixed with an alter
// call.
action = MetadataAction.ALTER;
}
// Take necessary action
switch (action) {
case CREATE:
ReplicationUtils.createDbIfNecessary(srcMs, destMs, destPartition.getDbName());
LOG.debug("Creating " + spec + " since it does not exist on " + "the destination");
destMs.addPartition(destPartition);
LOG.debug("Successfully created " + spec);
break;
case ALTER:
LOG.debug("Altering partition " + spec + " on destination");
destMs.alterPartition(destPartition.getDbName(), destPartition.getTableName(),
destPartition);
break;
case NOOP:
LOG.debug("Not doing anything for " + spec);
break;
default:
throw new RuntimeException("Unhandled case!");
}
return new RunInfo(RunInfo.RunStatus.SUCCESSFUL, bytesCopied);
}
public HiveObjectSpec getSpec() {
return this.spec;
}
@Override
public LockSet getRequiredLocks() {
LockSet lockSet = new LockSet();
lockSet.add(new Lock(Lock.Type.SHARED, spec.getTableSpec().toString()));
lockSet.add(new Lock(Lock.Type.EXCLUSIVE, spec.toString()));
return lockSet;
}
}
| 9,507 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/primitives/CopyPartitionsCounter.java | package com.airbnb.reair.incremental.primitives;
/**
* Counts the number of partitions copied by a replication task.
*/
public class CopyPartitionsCounter {
long completionCount = 0;
long bytesCopiedCount = 0;
synchronized void incrementCompletionCount() {
completionCount++;
}
synchronized long getCompletionCount() {
return completionCount;
}
synchronized void incrementBytesCopied(long bytesCopied) {
bytesCopiedCount += bytesCopied;
}
synchronized long getBytesCopied() {
return bytesCopiedCount;
}
}
| 9,508 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/primitives/ReplicationTask.java | package com.airbnb.reair.incremental.primitives;
import com.airbnb.reair.common.DistCpException;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.incremental.RunInfo;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.multiprocessing.LockSet;
import java.io.IOException;
/**
* Interface for a replication task. A replication task is one of many primitives that can be used
* to replicate data and actions from the source warehouse to the destination warehouse. Generally,
* a ReplicationTask is executed by a ReplicationJob.
*/
public interface ReplicationTask {
/**
* Runs the replication task without retries.
*
* @return RunInfo containing how the task execution went
* @throws HiveMetastoreException if there is an error making a metastore call
* @throws IOException if there is an error with writing to files
* @throws DistCpException if there is an error running DistCp
* @throws ConfigurationException if the config is improper
*/
RunInfo runTask()
throws ConfigurationException, HiveMetastoreException, IOException, DistCpException;
/**
* To handle concurrency issues, replication tasks should specify a set of locks so that two
* conflicting replication tasks do not run at the same time.
*
* @return a set of locks that this task should acquire before running
*/
LockSet getRequiredLocks();
}
| 9,509 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/primitives/RenameTableTask.java | package com.airbnb.reair.incremental.primitives;
import com.airbnb.reair.common.DistCpException;
import com.airbnb.reair.common.HiveMetastoreClient;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.incremental.DirectoryCopier;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.RunInfo;
import com.airbnb.reair.incremental.configuration.Cluster;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.incremental.configuration.DestinationObjectFactory;
import com.airbnb.reair.incremental.configuration.ObjectConflictHandler;
import com.airbnb.reair.multiprocessing.Lock;
import com.airbnb.reair.multiprocessing.LockSet;
import com.airbnb.reair.multiprocessing.ParallelJobExecutor;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.util.StringUtils;
import java.io.IOException;
import java.util.Optional;
public class RenameTableTask implements ReplicationTask {
private static final Log LOG = LogFactory.getLog(RenameTableTask.class);
private Configuration conf;
private DestinationObjectFactory destObjectFactory;
private ObjectConflictHandler objectConflictHandler;
private Cluster srcCluster;
private Cluster destCluster;
private HiveObjectSpec renameFromSpec;
private HiveObjectSpec renameToSpec;
private Optional<Path> renameFromPath;
private Optional<Path> renameToPath;
private Optional<String> renameFromTableTdlt;
private ParallelJobExecutor copyPartitionsExecutor;
private DirectoryCopier directoryCopier;
/**
* TODO.
*
* @param conf TODO
* @param srcCluster TODO
* @param destCluster TODO
* @param destObjectFactory TODO
* @param objectConflictHandler TODO
* @param renameFromSpec TODO
* @param renameToSpec TODO
* @param renameFromPath TODO
* @param renameToPath TODO
* @param renameFromTableTldt TODO
* @param copyPartitionsExecutor TODO
* @param directoryCopier TODO
*/
public RenameTableTask(
Configuration conf,
Cluster srcCluster,
Cluster destCluster,
DestinationObjectFactory destObjectFactory,
ObjectConflictHandler objectConflictHandler,
HiveObjectSpec renameFromSpec,
HiveObjectSpec renameToSpec,
Optional<Path> renameFromPath,
Optional<Path> renameToPath,
Optional<String> renameFromTableTldt,
ParallelJobExecutor copyPartitionsExecutor,
DirectoryCopier directoryCopier) {
this.conf = conf;
this.destObjectFactory = destObjectFactory;
this.objectConflictHandler = objectConflictHandler;
this.srcCluster = srcCluster;
this.destCluster = destCluster;
this.renameFromSpec = renameFromSpec;
this.renameToSpec = renameToSpec;
this.renameFromPath = renameFromPath;
this.renameToPath = renameToPath;
this.renameFromTableTdlt = renameFromTableTldt;
this.copyPartitionsExecutor = copyPartitionsExecutor;
this.directoryCopier = directoryCopier;
}
enum HandleRenameAction {
RENAME_TABLE, COPY_TABLE, NO_OP
}
@Override
public RunInfo runTask()
throws ConfigurationException, DistCpException, HiveMetastoreException, IOException {
LOG.debug("Renaming " + renameFromSpec + " to " + renameToSpec);
HiveMetastoreClient destMs = destCluster.getMetastoreClient();
HiveMetastoreClient srcMs = srcCluster.getMetastoreClient();
Table freshSrcRenameToTable =
srcMs.getTable(renameToSpec.getDbName(), renameToSpec.getTableName());
Table freshDestRenameToTable =
destMs.getTable(renameToSpec.getDbName(), renameToSpec.getTableName());
// Get a fresh copy of the metadata from the dest Hive metastore
Table freshDestTable =
destMs.getTable(renameFromSpec.getDbName(), renameFromSpec.getTableName());
if (!renameFromTableTdlt.isPresent()) {
LOG.error(
"For safety, not completing rename task since source " + " object TLDT is missing!");
return new RunInfo(RunInfo.RunStatus.NOT_COMPLETABLE, 0);
}
String expectedTldt = renameFromTableTdlt.get();
HandleRenameAction renameAction = null;
if (ReplicationUtils.transientLastDdlTimesMatch(freshSrcRenameToTable,
freshDestRenameToTable)) {
LOG.debug(
"Rename to table exists on destination and has a " + "matching TLDT. Not doing anything");
renameAction = HandleRenameAction.NO_OP;
} else if (freshDestRenameToTable != null) {
LOG.debug("Rename to table already exists on destination, but "
+ "doesn't have a matching TLDT. Copying instead...");
renameAction = HandleRenameAction.COPY_TABLE;
} else if (freshDestTable == null) {
LOG.warn(StringUtils.format("Destination rename from table %s " + "doesn't exist. "
+ "Copying %s to destination instead.", renameFromSpec, renameToSpec));
renameAction = HandleRenameAction.COPY_TABLE;
} else if (!ReplicationUtils.transientLastDdlTimesMatch(expectedTldt, freshDestTable)) {
LOG.warn(StringUtils.format(
"Destination table %s doesn't have " + "the expected modified time", renameFromSpec));
LOG.debug("Renamed from source table with a TLDT: " + expectedTldt);
LOG.debug("Table on destination: " + freshDestTable);
LOG.debug(String.format("Copying %s to destination instead", renameToSpec));
renameAction = HandleRenameAction.COPY_TABLE;
} else {
LOG.debug(String.format("Destination table (%s) matches " + "expected TLDT(%s) - will rename",
renameFromSpec, ReplicationUtils.getTldt(freshDestTable)));
renameAction = HandleRenameAction.RENAME_TABLE;
}
switch (renameAction) {
case NO_OP:
return new RunInfo(RunInfo.RunStatus.SUCCESSFUL, 0);
case RENAME_TABLE:
LOG.debug(StringUtils.format("Renaming %s to %s", renameFromSpec, renameToSpec));
Table newTableOnDestination = new Table(freshDestTable);
newTableOnDestination.setDbName(renameToSpec.getDbName());
newTableOnDestination.setTableName(renameToSpec.getTableName());
destMs.alterTable(renameFromSpec.getDbName(), renameFromSpec.getTableName(),
newTableOnDestination);
// After a rename, the table should be re-copied to get the
// correct modified time changes. With a proper rename, this
// should be a mostly no-op.
LOG.debug(StringUtils.format("Renamed %s to %s", renameFromSpec, renameToSpec));
// fallthrough to run the table copy as per above comment.
case COPY_TABLE:
CopyCompleteTableTask task =
new CopyCompleteTableTask(conf, destObjectFactory, objectConflictHandler, srcCluster,
destCluster, renameToSpec, renameToPath, copyPartitionsExecutor, directoryCopier);
return task.runTask();
default:
throw new RuntimeException("Unhandled case: " + renameAction);
}
}
@Override
public LockSet getRequiredLocks() {
LockSet lockSet = new LockSet();
lockSet.add(new Lock(Lock.Type.EXCLUSIVE, renameFromSpec.toString()));
lockSet.add(new Lock(Lock.Type.EXCLUSIVE, renameToSpec.toString()));
return lockSet;
}
}
| 9,510 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/primitives/TaskEstimate.java | package com.airbnb.reair.incremental.primitives;
import com.google.common.base.MoreObjects;
import org.apache.hadoop.fs.Path;
import java.util.Optional;
/**
* Stores estimates about what's required for a task to replicate a Hive object.
*/
public class TaskEstimate {
public enum TaskType {
COPY_UNPARTITIONED_TABLE,
COPY_PARTITIONED_TABLE,
COPY_PARTITION,
DROP_TABLE,
DROP_PARTITION,
CHECK_PARTITION,
NO_OP,
}
private TaskType taskType;
private boolean updateMetadata;
private boolean updateData;
private Optional<Path> srcPath;
private Optional<Path> destPath;
/**
* Constructor for a task estimate.
*
* @param taskType type of task
* @param updateMetadata whether the task needs to update Hive metadata
* @param updateData where the task needs to update data files
* @param srcPath the source path if the task needs to update data files
* @param destPath the destination path if the task needs to update files
*/
public TaskEstimate(TaskType taskType,
boolean updateMetadata,
boolean updateData,
Optional<Path> srcPath,
Optional<Path> destPath) {
this.taskType = taskType;
this.updateMetadata = updateMetadata;
this.updateData = updateData;
this.srcPath = srcPath;
this.destPath = destPath;
}
public boolean isUpdateMetadata() {
return updateMetadata;
}
public boolean isUpdateData() {
return updateData;
}
public Optional<Path> getSrcPath() {
return srcPath;
}
public Optional<Path> getDestPath() {
return destPath;
}
public TaskType getTaskType() {
return taskType;
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this).add("taskType", taskType.toString())
.add("updateMetadata", updateMetadata)
.add("updateData", updateData)
.add("srcPath", srcPath)
.add("destPath", destPath)
.toString();
}
}
| 9,511 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/primitives/RenamePartitionTask.java | package com.airbnb.reair.incremental.primitives;
import com.airbnb.reair.common.DistCpException;
import com.airbnb.reair.common.HiveMetastoreClient;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.common.HiveUtils;
import com.airbnb.reair.incremental.DirectoryCopier;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.RunInfo;
import com.airbnb.reair.incremental.configuration.Cluster;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.incremental.configuration.DestinationObjectFactory;
import com.airbnb.reair.incremental.configuration.ObjectConflictHandler;
import com.airbnb.reair.multiprocessing.Lock;
import com.airbnb.reair.multiprocessing.LockSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.util.StringUtils;
import java.io.IOException;
import java.util.List;
import java.util.Optional;
/**
* Task that renames a partition. The expected modified time of the partition to rename on the
* destination needs to be passed in to ensure that a newer partition with the same name is not
* renamed.
*/
public class RenamePartitionTask implements ReplicationTask {
private static final Log LOG = LogFactory.getLog(RenamePartitionTask.class);
private Configuration conf;
private DestinationObjectFactory destObjectFactory;
private ObjectConflictHandler objectConflictHandler;
private Cluster srcCluster;
private Cluster destCluster;
private HiveObjectSpec renameFromSpec;
private HiveObjectSpec renameToSpec;
private Optional<Path> renameFromPath;
private Optional<Path> renameToPath;
private Optional<String> renameFromPartitionTdlt;
private DirectoryCopier directoryCopier;
/**
* Constructor for a task a that renames a partition.
*
* @param conf configuration object
* @param destObjectFactory factory for creating objects for the destination cluster
* @param objectConflictHandler handler for addressing conflicting tables/partitions on the
* destination cluster
* @param srcCluster source cluster
* @param destCluster destination cluster
* @param renameFromSpec specification for the Hive partition to rename from
* @param renameToSpec specification for the Hive partition to rename to
* @param renameFromPath the path for the partition to rename from
* @param renameToPath the path to the partition to rename to
* @param renameFromPartitionTdlt The expected modified time for the partitions. This should be
* the transient_lastDdlTime value in the parameters field of the
* Thrift object. If the time does not match, the task will not
* rename the table.
* @param directoryCopier runs directory copies through MR jobs
*/
public RenamePartitionTask(
Configuration conf,
DestinationObjectFactory destObjectFactory,
ObjectConflictHandler objectConflictHandler,
Cluster srcCluster,
Cluster destCluster,
HiveObjectSpec renameFromSpec,
HiveObjectSpec renameToSpec,
Optional<Path> renameFromPath,
Optional<Path> renameToPath,
Optional<String> renameFromPartitionTdlt,
DirectoryCopier directoryCopier) {
this.conf = conf;
this.destObjectFactory = destObjectFactory;
this.objectConflictHandler = objectConflictHandler;
this.srcCluster = srcCluster;
this.destCluster = destCluster;
this.renameFromSpec = renameFromSpec;
this.renameToSpec = renameToSpec;
this.renameFromPath = renameFromPath;
this.renameToPath = renameToPath;
this.renameFromPartitionTdlt = renameFromPartitionTdlt;
this.directoryCopier = directoryCopier;
}
enum HandleRenameAction {
RENAME_PARTITION, EXCHANGE_PARTITION, COPY_PARTITION, NO_OP
}
@Override
public RunInfo runTask()
throws ConfigurationException, HiveMetastoreException, DistCpException, IOException {
LOG.debug("Renaming " + renameFromSpec + " to " + renameToSpec);
if (!renameFromPartitionTdlt.isPresent()) {
LOG.error("For safety, not completing rename since source " + " object TLDT is missing!");
return new RunInfo(RunInfo.RunStatus.NOT_COMPLETABLE, 0);
}
String expectedTldt = renameFromPartitionTdlt.get();
HiveMetastoreClient destMs = destCluster.getMetastoreClient();
HiveMetastoreClient srcMs = srcCluster.getMetastoreClient();
Partition freshSrcRenameToPart = srcMs.getPartition(renameToSpec.getDbName(),
renameToSpec.getTableName(), renameToSpec.getPartitionName());
Partition freshDestRenameToPart = destMs.getPartition(renameToSpec.getDbName(),
renameToSpec.getTableName(), renameToSpec.getPartitionName());
// Get a fresh copy of the metadata from the source Hive metastore
Partition freshDestRenameFromPart = destMs.getPartition(renameFromSpec.getDbName(),
renameFromSpec.getTableName(), renameFromSpec.getPartitionName());
HandleRenameAction renameAction = null;
// For cases where the table doesn't change, you can do a rename
if (renameFromSpec.getDbName().equals(renameToSpec.getDbName())
&& renameFromSpec.getTableName().equals(renameToSpec.getTableName())) {
renameAction = HandleRenameAction.RENAME_PARTITION;
} else {
// Otherwise, it needs to be an exchange.
renameAction = HandleRenameAction.EXCHANGE_PARTITION;
}
// Check to see if transient_lastDdl times match between what was
// renamed and what exists
if (ReplicationUtils.transientLastDdlTimesMatch(freshSrcRenameToPart, freshDestRenameToPart)) {
LOG.debug("Rename to partition exists on destination and has a "
+ "matching TLDT. Not doing anything");
renameAction = HandleRenameAction.NO_OP;
} else if (freshDestRenameToPart != null) {
LOG.debug("Rename to partition already exists on destination, but "
+ "doesn't have a matching TLDT. Copying instead...");
renameAction = HandleRenameAction.COPY_PARTITION;
} else if (freshDestRenameFromPart == null) {
LOG.warn(StringUtils.format(
"Renamed-from partition %s " + "doesn't exist. " + "Copying %s to destination instead.",
renameFromSpec, renameToSpec));
renameAction = HandleRenameAction.COPY_PARTITION;
} else if (!ReplicationUtils.transientLastDdlTimesMatch(expectedTldt,
freshDestRenameFromPart)) {
LOG.warn(StringUtils.format(
"Destination partition %s doesn't " + "have the expected modified time", renameFromSpec));
LOG.debug("Renamed from source table with a TLDT: " + renameFromPartitionTdlt);
LOG.debug("Partition on destination: " + freshDestRenameFromPart);
LOG.debug(String.format("Copying %s to destination instead", renameToSpec));
renameAction = HandleRenameAction.COPY_PARTITION;
} else {
LOG.debug(String.format("Destination table (%s) matches " + "expected TLDT(%s) - will rename",
renameFromSpec, ReplicationUtils.getTldt(freshDestRenameFromPart)));
// Action set in the beginning
}
switch (renameAction) {
case NO_OP:
return new RunInfo(RunInfo.RunStatus.SUCCESSFUL, 0);
case RENAME_PARTITION:
LOG.debug(StringUtils.format("Renaming %s to %s", renameFromSpec, renameToSpec));
Partition newPartitionOnDestination = new Partition(freshDestRenameFromPart);
List<String> renameToPartitionValues =
HiveUtils.partitionNameToValues(destMs, renameToSpec.getPartitionName());
List<String> renameFromPartitionValues =
HiveUtils.partitionNameToValues(srcMs, renameFromSpec.getPartitionName());
newPartitionOnDestination.setValues(renameToPartitionValues);
destMs.renamePartition(renameFromSpec.getDbName(), renameFromSpec.getTableName(),
renameFromPartitionValues, newPartitionOnDestination);
LOG.debug(StringUtils.format("Renamed %s to %s", renameFromSpec, renameToSpec));
// After a rename, the partition should be re-copied to get the
// correct modified time changes. With a proper rename, this
// should be a mostly no-op.
return copyPartition(renameToSpec, renameToPath);
case EXCHANGE_PARTITION:
// TODO: Exchange partition can't be done without HIVE-12865
// Just do a copy instead.
// fallthrough
case COPY_PARTITION:
return copyPartition(renameToSpec, renameToPath);
default:
throw new RuntimeException("Unhandled case: " + renameAction);
}
}
private RunInfo copyPartition(HiveObjectSpec spec, Optional<Path> partitionLocation)
throws ConfigurationException, HiveMetastoreException, DistCpException, IOException {
CopyPartitionTask task = new CopyPartitionTask(conf, destObjectFactory, objectConflictHandler,
srcCluster, destCluster, spec, partitionLocation, Optional.empty(), directoryCopier, true);
return task.runTask();
}
@Override
public LockSet getRequiredLocks() {
LockSet lockSet = new LockSet();
lockSet.add(new Lock(Lock.Type.EXCLUSIVE, renameFromSpec.toString()));
lockSet.add(new Lock(Lock.Type.EXCLUSIVE, renameToSpec.toString()));
return lockSet;
}
}
| 9,512 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/primitives/TaskEstimator.java | package com.airbnb.reair.incremental.primitives;
import com.airbnb.reair.common.HiveMetastoreClient;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.common.HiveUtils;
import com.airbnb.reair.incremental.DirectoryCopier;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.configuration.Cluster;
import com.airbnb.reair.incremental.configuration.DestinationObjectFactory;
import com.airbnb.reair.incremental.deploy.ConfigurationKeys;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import java.io.IOException;
import java.util.Optional;
/**
* Given a Hive object spec, this class tries to figure out what operations would be necessary to
* replicate the object from the source cluster to the destination cluster. If the source object
* does not exist, but the destination does, a drop is assumed to be necessary.
*/
public class TaskEstimator {
private static final Log LOG = LogFactory.getLog(TaskEstimator.class);
private Configuration conf;
private DestinationObjectFactory destObjectFactory;
private Cluster srcCluster;
private Cluster destCluster;
private DirectoryCopier directoryCopier;
/**
* Constructor for a task estimator.
*
* @param conf configuration object
* @param destObjectFactory factory for creating objects for the destination cluster
* @param srcCluster source cluster
* @param destCluster destination cluster
* @param directoryCopier runs directory copies through MR jobs
*/
public TaskEstimator(
Configuration conf,
DestinationObjectFactory destObjectFactory,
Cluster srcCluster,
Cluster destCluster,
DirectoryCopier directoryCopier) {
this.conf = conf;
this.destObjectFactory = destObjectFactory;
this.srcCluster = srcCluster;
this.destCluster = destCluster;
this.directoryCopier = directoryCopier;
}
/**
* Returns an estimate of what kind of task should be run to replicate the given object.
*
* @param spec The Hive object that should be replicated from the source to the destination.
* @throws HiveMetastoreException if there is an error connecting to the metastore
* @throws IOException if there is an error accessing the filesystem
*/
public TaskEstimate analyze(HiveObjectSpec spec) throws HiveMetastoreException, IOException {
if (!spec.isPartition()) {
return analyzeTableSpec(spec);
} else {
return analyzePartitionSpec(spec);
}
}
private TaskEstimate analyzeTableSpec(HiveObjectSpec spec)
throws HiveMetastoreException, IOException {
if (spec.isPartition()) {
throw new RuntimeException("Argument should be a table " + spec);
}
HiveMetastoreClient srcMs = srcCluster.getMetastoreClient();
Table tableOnSrc = srcMs.getTable(spec.getDbName(), spec.getTableName());
HiveMetastoreClient destMs = destCluster.getMetastoreClient();
Table tableOnDest = destMs.getTable(spec.getDbName(), spec.getTableName());
// If the souce table doesn't exist but the destination table doesn't,
// then it's most likely a drop.
if (tableOnSrc == null && tableOnDest != null) {
return new TaskEstimate(TaskEstimate.TaskType.DROP_TABLE, false, false, Optional.empty(),
Optional.empty());
}
// Nothing to do if the source table doesn't exist
if (tableOnSrc == null) {
return new TaskEstimate(TaskEstimate.TaskType.NO_OP, false, false, Optional.empty(),
Optional.empty());
}
// If both src and dest exist, and the dest is newer, and we don't overwrite newer partitions,
// then it's a NO_OP.
if (!conf.getBoolean(ConfigurationKeys.BATCH_JOB_OVERWRITE_NEWER, true)) {
if (ReplicationUtils.isSrcOlder(tableOnSrc, tableOnDest)) {
LOG.warn(String.format(
"Source %s (%s) is older than destination (%s), so not copying",
spec,
ReplicationUtils.getLastModifiedTime(tableOnSrc),
ReplicationUtils.getLastModifiedTime(tableOnDest)));
return new TaskEstimate(TaskEstimate.TaskType.NO_OP, false, false, Optional.empty(),
Optional.empty());
}
}
boolean isPartitionedTable = HiveUtils.isPartitioned(tableOnSrc);
// See if we need to update the data
// Locations are not defined for views
boolean updateData = false;
Optional<Path> srcPath = ReplicationUtils.getLocation(tableOnSrc);
Table expectedDestTable =
destObjectFactory.createDestTable(srcCluster, destCluster, tableOnSrc, tableOnDest);
Optional<Path> destPath = ReplicationUtils.getLocation(expectedDestTable);
if (!isPartitionedTable && srcPath.isPresent() && !srcPath.equals(destPath)) {
updateData = !directoryCopier.equalDirs(srcPath.get(), destPath.get());
}
// See if we need to update the metadata
boolean updateMetadata =
tableOnDest == null || !ReplicationUtils.stripNonComparables(tableOnDest)
.equals(ReplicationUtils.stripNonComparables(expectedDestTable));
if (!updateData && !updateMetadata) {
return new TaskEstimate(TaskEstimate.TaskType.NO_OP, false, false, Optional.empty(),
Optional.empty());
} else if (!isPartitionedTable) {
return new TaskEstimate(TaskEstimate.TaskType.COPY_UNPARTITIONED_TABLE, updateMetadata,
updateData, srcPath, destPath);
} else {
return new TaskEstimate(TaskEstimate.TaskType.COPY_PARTITIONED_TABLE, true, false,
Optional.empty(), Optional.empty());
}
}
private TaskEstimate analyzePartitionSpec(HiveObjectSpec spec)
throws HiveMetastoreException, IOException {
if (!spec.isPartition()) {
throw new RuntimeException("Argument should be a partition " + spec);
}
boolean updateData = false;
HiveMetastoreClient srcMs = srcCluster.getMetastoreClient();
Partition partitionOnSrc =
srcMs.getPartition(spec.getDbName(), spec.getTableName(), spec.getPartitionName());
HiveMetastoreClient destMs = destCluster.getMetastoreClient();
Partition partitionOnDest =
destMs.getPartition(spec.getDbName(), spec.getTableName(), spec.getPartitionName());
// If the source partition does not exist, but the destination does,
// it's most likely a drop.
if (partitionOnSrc == null && partitionOnDest != null) {
return new TaskEstimate(TaskEstimate.TaskType.DROP_PARTITION, false, false, Optional.empty(),
Optional.empty());
}
if (partitionOnSrc == null) {
return new TaskEstimate(TaskEstimate.TaskType.NO_OP, false, false, Optional.empty(),
Optional.empty());
}
// If both src and dest exist, and the dest is newer, and we don't overwrite newer partitions,
// then it's a NO_OP.
if (!conf.getBoolean(ConfigurationKeys.BATCH_JOB_OVERWRITE_NEWER, true)) {
if (ReplicationUtils.isSrcOlder(partitionOnSrc, partitionOnDest)) {
LOG.warn(String.format(
"Source %s (%s) is older than destination (%s), so not copying",
spec,
ReplicationUtils.getLastModifiedTime(partitionOnSrc),
ReplicationUtils.getLastModifiedTime(partitionOnDest)));
return new TaskEstimate(TaskEstimate.TaskType.NO_OP, false, false, Optional.empty(),
Optional.empty());
}
}
Partition expectedDestPartition = destObjectFactory.createDestPartition(srcCluster, destCluster,
partitionOnSrc, partitionOnDest);
Optional<Path> srcPath = ReplicationUtils.getLocation(partitionOnSrc);
Optional<Path> destPath = ReplicationUtils.getLocation(expectedDestPartition);
// See if we need to update the data
if (srcPath.isPresent() && !srcPath.equals(destPath)) {
updateData = !directoryCopier.equalDirs(srcPath.get(), destPath.get());
}
// A metadata update is required if the destination partition doesn't
// exist or the metadata differs from what's expected.
boolean updateMetadata =
partitionOnDest == null || !ReplicationUtils.stripNonComparables(partitionOnDest)
.equals(ReplicationUtils.stripNonComparables(expectedDestPartition));
if (!updateData && !updateMetadata) {
return new TaskEstimate(TaskEstimate.TaskType.NO_OP, false, false, Optional.empty(),
Optional.empty());
} else {
return new TaskEstimate(TaskEstimate.TaskType.COPY_PARTITION, updateMetadata, updateData,
srcPath, destPath);
}
}
}
| 9,513 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/primitives/CopyUnpartitionedTableTask.java | package com.airbnb.reair.incremental.primitives;
import com.airbnb.reair.common.DistCpException;
import com.airbnb.reair.common.FsUtils;
import com.airbnb.reair.common.HiveMetastoreClient;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.common.HiveUtils;
import com.airbnb.reair.incremental.DirectoryCopier;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.RunInfo;
import com.airbnb.reair.incremental.configuration.Cluster;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.incremental.configuration.DestinationObjectFactory;
import com.airbnb.reair.incremental.configuration.ObjectConflictHandler;
import com.airbnb.reair.incremental.deploy.ConfigurationKeys;
import com.airbnb.reair.multiprocessing.Lock;
import com.airbnb.reair.multiprocessing.LockSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.Table;
import java.io.IOException;
import java.util.Arrays;
import java.util.Optional;
/**
* Task that copies an unpartitioned table, copying data if allowed and necessary.
*/
public class CopyUnpartitionedTableTask implements ReplicationTask {
private static final Log LOG = LogFactory.getLog(CopyUnpartitionedTableTask.class);
private Configuration conf;
private DestinationObjectFactory objectModifier;
private ObjectConflictHandler objectConflictHandler;
private Cluster srcCluster;
private Cluster destCluster;
private HiveObjectSpec spec;
private Optional<Path> tableLocation;
private DirectoryCopier directoryCopier;
private boolean allowDataCopy;
/**
* Constructor for a task that copies an unpartitioned table.
*
* @param conf configuration object
* @param destObjectFactory factory for creating objects for the destination cluster
* @param objectConflictHandler handler for addressing conflicting tables/partitions on the
* destination cluster
* @param srcCluster source cluster
* @param destCluster destination cluster
* @param spec specification for the Hive partitioned table to copy
* @param tableLocation the location of the table
* @param directoryCopier runs directory copies through MR jobs
* @param allowDataCopy Whether to copy data for this partition. If set to false, the task will
* check to see if the data exists already and if not, it will fail the task.
*/
public CopyUnpartitionedTableTask(
Configuration conf,
DestinationObjectFactory destObjectFactory,
ObjectConflictHandler objectConflictHandler,
Cluster srcCluster,
Cluster destCluster,
HiveObjectSpec spec,
Optional<Path> tableLocation,
DirectoryCopier directoryCopier,
boolean allowDataCopy) {
this.conf = conf;
this.objectModifier = destObjectFactory;
this.objectConflictHandler = objectConflictHandler;
this.srcCluster = srcCluster;
this.destCluster = destCluster;
this.tableLocation = tableLocation;
this.spec = spec;
this.directoryCopier = directoryCopier;
this.allowDataCopy = allowDataCopy;
}
@Override
public RunInfo runTask()
throws ConfigurationException, HiveMetastoreException, DistCpException, IOException {
LOG.debug("Copying " + spec);
HiveMetastoreClient destMs = destCluster.getMetastoreClient();
HiveMetastoreClient srcMs = srcCluster.getMetastoreClient();
// Get a fresh copy of the metadata from the source Hive metastore
Table freshSrcTable = srcMs.getTable(spec.getDbName(), spec.getTableName());
if (freshSrcTable == null) {
LOG.warn("Source table " + spec + " doesn't exist, so not " + "copying");
return new RunInfo(RunInfo.RunStatus.NOT_COMPLETABLE, 0);
}
if (HiveUtils.isPartitioned(freshSrcTable)) {
LOG.warn("Source table " + spec + " is a partitioned table, so " + "not copying");
return new RunInfo(RunInfo.RunStatus.NOT_COMPLETABLE, 0);
}
// Check the table that exists already in the destination cluster
Table existingTable = destMs.getTable(spec.getDbName(), spec.getTableName());
if (existingTable != null) {
LOG.debug("Table " + spec + " exists on destination");
if (!conf.getBoolean(ConfigurationKeys.BATCH_JOB_OVERWRITE_NEWER, true)) {
Table freshDestTable = existingTable;
if (ReplicationUtils.isSrcOlder(freshSrcTable, freshDestTable)) {
LOG.warn(String.format(
"Source %s (%s) is older than destination (%s), so not copying",
spec,
ReplicationUtils.getLastModifiedTime(freshSrcTable),
ReplicationUtils.getLastModifiedTime(freshDestTable)));
return new RunInfo(RunInfo.RunStatus.DEST_IS_NEWER, 0);
}
}
objectConflictHandler.handleCopyConflict(srcCluster, destCluster, freshSrcTable,
existingTable);
}
Table destTable =
objectModifier.createDestTable(srcCluster, destCluster, freshSrcTable, existingTable);
// Refresh in case the conflict handler did something
existingTable = destMs.getTable(spec.getDbName(), spec.getTableName());
// Copy HDFS data if the location has changed in the destination object.
// Usually, this is the case, but for S3 backed tables, the location
// doesn't change.
Optional<Path> srcPath = ReplicationUtils.getLocation(freshSrcTable);
Optional<Path> destPath = ReplicationUtils.getLocation(destTable);
boolean needToCopy = srcPath.isPresent() && !srcPath.equals(destPath)
&& !directoryCopier.equalDirs(srcPath.get(), destPath.get());
long bytesCopied = 0;
if (needToCopy) {
if (!allowDataCopy) {
LOG.debug(String.format("Need to copy %s to %s, but data "
+ "copy is not allowed", srcPath,
destPath));
return new RunInfo(RunInfo.RunStatus.NOT_COMPLETABLE, 0);
}
if (!FsUtils.dirExists(conf, srcPath.get())) {
LOG.debug(String.format("Need to copy %s to %s, but "
+ "source directory is missing",
srcPath, destPath));
return new RunInfo(RunInfo.RunStatus.NOT_COMPLETABLE, 0);
}
// Copy directory
bytesCopied = directoryCopier.copy(srcPath.get(), destPath.get(),
Arrays.asList(srcCluster.getName(), spec.getDbName(), spec.getTableName()));
} else {
LOG.debug("Not copying data");
}
// Figure out what to do with the table
MetadataAction action = MetadataAction.NOOP;
if (existingTable == null) {
action = MetadataAction.CREATE;
} else if (!ReplicationUtils.stripNonComparables(existingTable)
.equals(ReplicationUtils.stripNonComparables(destTable))) {
action = MetadataAction.ALTER;
}
switch (action) {
case CREATE:
LOG.debug("Creating " + spec + " since it does not exist on " + "the destination");
ReplicationUtils.createDbIfNecessary(srcMs, destMs, destTable.getDbName());
LOG.debug("Creating: " + destTable);
destMs.createTable(destTable);
LOG.debug("Successfully created " + spec);
break;
case ALTER:
LOG.debug("Altering table " + spec + " on destination");
LOG.debug("Existing table: " + existingTable);
LOG.debug("New table: " + destTable);
destMs.alterTable(destTable.getDbName(), destTable.getTableName(), destTable);
LOG.debug("Successfully altered " + spec);
break;
case NOOP:
LOG.debug("Destination table is up to date - not doing " + "anything for " + spec);
break;
default:
throw new RuntimeException("Unhandled case: " + action);
}
return new RunInfo(RunInfo.RunStatus.SUCCESSFUL, bytesCopied);
}
@Override
public LockSet getRequiredLocks() {
LockSet lockSet = new LockSet();
lockSet.add(new Lock(Lock.Type.EXCLUSIVE, spec.toString()));
return lockSet;
}
}
| 9,514 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/primitives/CopyPartitionsTask.java | package com.airbnb.reair.incremental.primitives;
import com.airbnb.reair.common.DistCpException;
import com.airbnb.reair.common.FsUtils;
import com.airbnb.reair.common.HiveMetastoreClient;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.common.HiveUtils;
import com.airbnb.reair.common.PathBuilder;
import com.airbnb.reair.incremental.DirectoryCopier;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.RunInfo;
import com.airbnb.reair.incremental.configuration.Cluster;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.incremental.configuration.DestinationObjectFactory;
import com.airbnb.reair.incremental.configuration.ObjectConflictHandler;
import com.airbnb.reair.multiprocessing.Lock;
import com.airbnb.reair.multiprocessing.LockSet;
import com.airbnb.reair.multiprocessing.ParallelJobExecutor;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Random;
import java.util.Set;
/**
* Task that copies multiple partitions. To reduce the number of distcp jobs necessary, this task
* tries to copy a common parent directory of those partitions. However, a better solution would be
* to use a copy tool that can copy multiple source and destination directories simultaneously.
*/
public class CopyPartitionsTask implements ReplicationTask {
private static final Log LOG = LogFactory.getLog(CopyPartitionsTask.class);
private Configuration conf;
private DestinationObjectFactory objectModifier;
private ObjectConflictHandler objectConflictHandler;
private Cluster srcCluster;
private Cluster destCluster;
private HiveObjectSpec srcTableSpec;
private List<String> partitionNames;
private Optional<Path> commonDirectory;
private ParallelJobExecutor copyPartitionsExecutor;
private DirectoryCopier directoryCopier;
/**
* Constructor for a task to copy multiple partitions.
*
* @param conf configuration object
* @param destObjectFactory factory for creating objects for the destination cluster
* @param objectConflictHandler handler for addressing conflicting tables/partitions on the
* destination cluster
* @param srcCluster source cluster
* @param destCluster destination cluster
* @param srcTableSpec Hive specification for the table that these partitions belong to
* @param partitionNames names of the partitions to copy
* @param commonDirectory the common ancestor directory for the partitions, if applicable
* @param copyPartitionsExecutor an executor for copying the partitions of a table
* @param directoryCopier runs directory copies through MR jobs
*/
public CopyPartitionsTask(
Configuration conf,
DestinationObjectFactory destObjectFactory,
ObjectConflictHandler objectConflictHandler,
Cluster srcCluster,
Cluster destCluster,
HiveObjectSpec srcTableSpec,
List<String> partitionNames,
Optional<Path> commonDirectory,
ParallelJobExecutor copyPartitionsExecutor,
DirectoryCopier directoryCopier) {
this.conf = conf;
this.objectModifier = destObjectFactory;
this.objectConflictHandler = objectConflictHandler;
this.srcCluster = srcCluster;
this.destCluster = destCluster;
this.srcTableSpec = srcTableSpec;
this.partitionNames = partitionNames;
this.commonDirectory = commonDirectory;
this.copyPartitionsExecutor = copyPartitionsExecutor;
this.directoryCopier = directoryCopier;
}
/**
* Find the common directory for a set of partitions if one exists. For example if the partition
* ds=1 has a location /a/b/ds=1 and the partition ds=2 has a location /a/b/ds=2, then the common
* directory is /a/b
*
* @param srcTableSpec specification for the Hive table that these partitions belong to
* @param specToPartition a map from the Hive partition specification to the partition object
* @return the common directory for the partition, if one exists
*/
public static Optional<Path> findCommonDirectory(
HiveObjectSpec srcTableSpec,
Map<HiveObjectSpec, Partition> specToPartition) {
// Sanity check - verify that all the specified objects are partitions
// and that they are from the same table
for (HiveObjectSpec spec : specToPartition.keySet()) {
if (!srcTableSpec.equals(spec.getTableSpec())) {
throw new RuntimeException(
"Spec " + spec + " does not " + "match the source table spec " + srcTableSpec);
}
if (!spec.isPartition()) {
throw new RuntimeException("Partition not specified: " + spec);
}
}
// Collect all the partition locations
Set<Path> partitionLocations = new HashSet<>();
for (Map.Entry<HiveObjectSpec, Partition> entry : specToPartition.entrySet()) {
partitionLocations.add(new Path(entry.getValue().getSd().getLocation()));
}
// Find the common subdirectory among all the partitions
// TODO: This may copy more data than necessary - use multi directory
// copy instead once it's available.
Optional<Path> commonDirectory = ReplicationUtils.getCommonDirectory(partitionLocations);
LOG.debug("Common directory of partitions is " + commonDirectory);
return commonDirectory;
}
@Override
public RunInfo runTask()
throws HiveMetastoreException, DistCpException, IOException,
HiveMetastoreException, ConfigurationException {
LOG.debug("Copying partitions from " + srcTableSpec);
HiveMetastoreClient destMs = destCluster.getMetastoreClient();
HiveMetastoreClient srcMs = srcCluster.getMetastoreClient();
// Get a fresh copy of the metadata from the source Hive metastore
Table freshSrcTable = srcMs.getTable(srcTableSpec.getDbName(), srcTableSpec.getTableName());
if (freshSrcTable == null) {
LOG.warn("Source table " + srcTableSpec + " doesn't exist, so not " + "copying");
return new RunInfo(RunInfo.RunStatus.NOT_COMPLETABLE, 0);
}
if (!HiveUtils.isPartitioned(freshSrcTable)) {
LOG.warn(
"Source table " + srcTableSpec + " is not a a partitioned table," + " so not copying");
return new RunInfo(RunInfo.RunStatus.NOT_COMPLETABLE, 0);
}
Optional<Path> tableLocation = ReplicationUtils.getLocation(freshSrcTable);
LOG.debug("Location of table " + srcTableSpec + " is " + tableLocation);
// If possible, copy the common directory in a single distcp job.
// We call this the optimistic copy as this should result in no
// additional distcp jobs when copying the partitions.
long bytesCopied = 0;
boolean doOptimisticCopy = false;
if (commonDirectory.isPresent() && tableLocation.isPresent()
&& (tableLocation.equals(commonDirectory)
|| FsUtils.isSubDirectory(tableLocation.get(), commonDirectory.get()))) {
Path commonDir = commonDirectory.get();
// Get the size of all the partitions in the common directory and
// check if the size of the common directory is approximately
// the same size
long sizeOfPartitionsInCommonDirectory = 0;
for (String partitionName : partitionNames) {
Partition partition = srcMs.getPartition(
srcTableSpec.getDbName(),
srcTableSpec.getTableName(),
partitionName);
if (partition != null && partition.getSd().getLocation() != null) {
Path partitionLocation = new Path(partition.getSd().getLocation());
if (FsUtils.isSubDirectory(commonDir, partitionLocation)
&& FsUtils.dirExists(conf, partitionLocation)) {
sizeOfPartitionsInCommonDirectory +=
FsUtils.getSize(conf, partitionLocation, Optional.empty());
}
}
}
if (!FsUtils.dirExists(conf, commonDir)) {
LOG.debug(String.format("Common dir: %s does not exist", commonDir));
} else if (!FsUtils.exceedsSize(conf, commonDir, sizeOfPartitionsInCommonDirectory * 2)) {
doOptimisticCopy = true;
} else {
LOG.debug(String.format(
"Size of common directory %s is much " + "bigger than the size of the partitions in "
+ "the common directory (%s). Hence, not " + "copying the common directory",
commonDir, sizeOfPartitionsInCommonDirectory));
}
}
Optional<Path> optimisticCopyDir = Optional.empty();
// isPresent() isn't necessary, as doOptimisticCopy implies it's set.
if (commonDirectory.isPresent() && doOptimisticCopy) {
Path commonDir = commonDirectory.get();
// Check if the common directory is the same on the destination
String destinationLocation =
objectModifier.modifyLocation(srcCluster, destCluster, commonDir.toString());
Path destinationLocationPath = new Path(destinationLocation);
if (!objectModifier.shouldCopyData(destinationLocation)) {
LOG.debug("Skipping copy of destination location " + commonDirectory
+ " due to destination " + "object factory");
} else if (!FsUtils.dirExists(conf, commonDir)) {
LOG.debug("Skipping copy of destination location " + commonDirectory
+ " since it does not exist");
} else if (FsUtils.equalDirs(conf, commonDir, destinationLocationPath)) {
LOG.debug("Skipping copying common directory " + commonDir + " since it matches "
+ destinationLocationPath);
} else {
LOG.debug("Optimistically copying common directory " + commonDir);
Random random = new Random();
long randomLong = random.nextLong();
Path path =
new PathBuilder(destCluster.getTmpDir()).add("distcp_tmp").add(srcCluster.getName())
.add("optimistic_copy").add(Long.toString(randomLong)).toPath();
optimisticCopyDir = Optional.of(path);
bytesCopied += copyWithStructure(commonDir, path);
}
}
// Now copy all the partitions
CopyPartitionsCounter copyPartitionsCounter = new CopyPartitionsCounter();
long expectedCopyCount = 0;
for (String partitionName : partitionNames) {
Partition srcPartition =
srcMs.getPartition(srcTableSpec.getDbName(), srcTableSpec.getTableName(), partitionName);
HiveObjectSpec partitionSpec =
new HiveObjectSpec(srcTableSpec.getDbName(), srcTableSpec.getTableName(), partitionName);
if (srcPartition == null) {
LOG.warn("Not copying missing partition: " + partitionSpec);
continue;
}
CopyPartitionTask copyPartitionTask = new CopyPartitionTask(conf, objectModifier,
objectConflictHandler, srcCluster, destCluster, partitionSpec,
ReplicationUtils.getLocation(srcPartition), optimisticCopyDir, directoryCopier, true);
CopyPartitionJob copyPartitionJob =
new CopyPartitionJob(copyPartitionTask, copyPartitionsCounter);
copyPartitionsExecutor.add(copyPartitionJob);
expectedCopyCount++;
}
while (true) {
LOG.debug(String.format("Copied %s out of %s partitions",
copyPartitionsCounter.getCompletionCount(), expectedCopyCount));
if (copyPartitionsCounter.getCompletionCount() == expectedCopyCount) {
break;
}
try {
Thread.sleep(5 * 1000);
} catch (InterruptedException e) {
LOG.error("Got interrupted!");
throw new RuntimeException(e);
}
}
bytesCopied += copyPartitionsCounter.getBytesCopied();
return new RunInfo(RunInfo.RunStatus.SUCCESSFUL, bytesCopied);
}
/**
* Copies the source directory to the destination directory while preserving structure. i.e. if
* copying /a/b/c to the destination directory /d, then /d/a/b/c will be created and contain files
* from /a/b/c.
*
* @param srcDir source directory
* @param destDir destination directory
* @return total number of bytes copied
* @throws IOException if there is an error accessing the filesystem
* @throws DistCpException if there is an error copying the data
* @throws ConfigurationException if the config is improper
*/
private long copyWithStructure(Path srcDir, Path destDir)
throws ConfigurationException, DistCpException, IOException {
PathBuilder dirBuilder = new PathBuilder(destDir);
// Preserve the directory structure within the dest directory
// Decompose a directory like /a/b/c and add a, b, c as subdirectories
// within the tmp direcotry
List<String> pathElements =
new ArrayList<>(Arrays.asList(srcDir.toUri().getPath().split("/")));
// When splitting a path like '/a/b/c', the first element is ''
if (pathElements.get(0).equals("")) {
pathElements.remove(0);
}
for (String pathElement : pathElements) {
dirBuilder.add(pathElement);
}
Path destPath = dirBuilder.toPath();
// Copy directory
long bytesCopied = directoryCopier.copy(srcDir, destPath,
Arrays.asList(srcCluster.getName(), "copy_with_structure"));
return bytesCopied;
}
@Override
public LockSet getRequiredLocks() {
LockSet lockSet = new LockSet();
lockSet.add(new Lock(Lock.Type.SHARED, srcTableSpec.toString()));
for (String partitionName : partitionNames) {
HiveObjectSpec partitionSpec =
new HiveObjectSpec(srcTableSpec.getDbName(), srcTableSpec.getTableName(), partitionName);
lockSet.add(new Lock(Lock.Type.EXCLUSIVE, partitionSpec.toString()));
}
return lockSet;
}
}
| 9,515 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/primitives/DropTableTask.java | package com.airbnb.reair.incremental.primitives;
import com.airbnb.reair.common.HiveMetastoreClient;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.common.HiveParameterKeys;
import com.airbnb.reair.incremental.RunInfo;
import com.airbnb.reair.incremental.configuration.Cluster;
import com.airbnb.reair.multiprocessing.Lock;
import com.airbnb.reair.multiprocessing.LockSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.metastore.api.Table;
import java.util.Optional;
public class DropTableTask implements ReplicationTask {
private static final Log LOG = LogFactory.getLog(DropTableTask.class);
private Cluster srcCluster;
private Cluster destCluster;
private HiveObjectSpec spec;
private Optional<String> sourceTldt;
/**
* TODO.
*
* @param srcCluster TODO
* @param destCluster TODO
* @param spec TODO
* @param sourceTldt TODO
*/
public DropTableTask(
Cluster srcCluster,
Cluster destCluster,
HiveObjectSpec spec,
Optional<String> sourceTldt) {
this.srcCluster = srcCluster;
this.destCluster = destCluster;
this.spec = spec;
this.sourceTldt = sourceTldt;
}
@Override
public RunInfo runTask() throws HiveMetastoreException {
LOG.debug("Looking to drop: " + spec);
LOG.debug("Source TLDT is : " + sourceTldt);
if (!sourceTldt.isPresent()) {
LOG.error("For safety, not completing drop task since source " + " object TLDT is missing!");
return new RunInfo(RunInfo.RunStatus.NOT_COMPLETABLE, 0);
}
String expectedTldt = sourceTldt.get();
HiveMetastoreClient ms = destCluster.getMetastoreClient();
Table destTable = ms.getTable(spec.getDbName(), spec.getTableName());
if (destTable == null) {
LOG.warn("Missing " + spec + " on destination, so can't drop!");
return new RunInfo(RunInfo.RunStatus.NOT_COMPLETABLE, 0);
}
LOG.debug("Destination object is: " + destTable);
String destTldt = destTable.getParameters().get(HiveParameterKeys.TLDT);
if (expectedTldt.equals(destTldt)) {
LOG.debug(
String.format("Destination table %s matches expected" + " TLDT (%s)", spec, destTldt));
LOG.debug("Dropping " + spec);
ms.dropTable(spec.getDbName(), spec.getTableName(), true);
LOG.debug("Dropped " + spec);
return new RunInfo(RunInfo.RunStatus.SUCCESSFUL, 0);
} else {
LOG.debug(
String.format("Not dropping %s as source(%s) and " + "destination(%s) TLDT's dont match",
spec.toString(), expectedTldt, destTldt));
return new RunInfo(RunInfo.RunStatus.NOT_COMPLETABLE, 0);
}
}
@Override
public LockSet getRequiredLocks() {
LockSet lockSet = new LockSet();
lockSet.add(new Lock(Lock.Type.EXCLUSIVE, spec.toString()));
return lockSet;
}
}
| 9,516 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/primitives/CopyPartitionedTableTask.java | package com.airbnb.reair.incremental.primitives;
import com.airbnb.reair.common.HiveMetastoreClient;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.common.HiveUtils;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.RunInfo;
import com.airbnb.reair.incremental.configuration.Cluster;
import com.airbnb.reair.incremental.configuration.DestinationObjectFactory;
import com.airbnb.reair.incremental.configuration.ObjectConflictHandler;
import com.airbnb.reair.incremental.deploy.ConfigurationKeys;
import com.airbnb.reair.multiprocessing.Lock;
import com.airbnb.reair.multiprocessing.LockSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import java.util.Optional;
/**
* Task that copies metadata for a partitioned table.
*/
public class CopyPartitionedTableTask implements ReplicationTask {
private static final Log LOG = LogFactory.getLog(CopyPartitionedTableTask.class);
private Configuration conf;
private DestinationObjectFactory objectModifier;
private ObjectConflictHandler objectConflictHandler;
private Cluster srcCluster;
private Cluster destCluster;
private HiveObjectSpec spec;
private Optional<Path> srcPath;
/**
* Constructor for a task that copies the metadata for a partitioned table.
* @param conf configuration object
* @param destObjectFactory factory for creating objects for the destination cluster
* @param objectConflictHandler handler for addressing conflicting tables/partitions on the
* destination cluster
* @param srcCluster source cluster
* @param destCluster destination cluster
* @param spec specification for the Hive partitioned table to copy
* @param srcPath the path to the partition's data
*/
public CopyPartitionedTableTask(
Configuration conf,
DestinationObjectFactory destObjectFactory,
ObjectConflictHandler objectConflictHandler,
Cluster srcCluster,
Cluster destCluster,
HiveObjectSpec spec,
Optional<Path> srcPath) {
this.conf = conf;
this.objectModifier = destObjectFactory;
this.objectConflictHandler = objectConflictHandler;
this.srcCluster = srcCluster;
this.destCluster = destCluster;
this.spec = spec;
this.srcPath = srcPath;
}
@Override
public RunInfo runTask() throws HiveMetastoreException {
LOG.debug("Copying " + spec);
HiveMetastoreClient destMs = destCluster.getMetastoreClient();
HiveMetastoreClient srcMs = srcCluster.getMetastoreClient();
// Get a fresh copy of the metadata from the source Hive metastore
Table freshSrcTable = srcMs.getTable(spec.getDbName(), spec.getTableName());
if (freshSrcTable == null) {
LOG.warn("Source table " + spec + " doesn't exist, so not " + "copying");
return new RunInfo(RunInfo.RunStatus.NOT_COMPLETABLE, 0);
}
if (!HiveUtils.isPartitioned(freshSrcTable)) {
LOG.warn("Not copying " + spec + " since it's not partitioned");
return new RunInfo(RunInfo.RunStatus.NOT_COMPLETABLE, 0);
}
// Check the table that exists already in the destination cluster
Table existingTable = destMs.getTable(spec.getDbName(), spec.getTableName());
Table destTable =
objectModifier.createDestTable(srcCluster, destCluster, freshSrcTable, existingTable);
if (existingTable != null) {
LOG.debug("Table " + spec + " exists on destination!");
if (!conf.getBoolean(ConfigurationKeys.BATCH_JOB_OVERWRITE_NEWER, true)) {
Table freshDestTable = existingTable;
if (ReplicationUtils.isSrcOlder(freshSrcTable, freshDestTable)) {
LOG.warn(String.format(
"Source %s (%s) is older than destination (%s), so not copying",
spec,
ReplicationUtils.getLastModifiedTime(freshSrcTable),
ReplicationUtils.getLastModifiedTime(freshDestTable)));
return new RunInfo(RunInfo.RunStatus.DEST_IS_NEWER, 0);
}
}
objectConflictHandler.handleCopyConflict(srcCluster, destCluster, freshSrcTable,
existingTable);
}
// Refresh in case the conflict handler did something
existingTable = destMs.getTable(spec.getDbName(), spec.getTableName());
// Figure out what to do with the table
MetadataAction action = MetadataAction.NOOP;
if (existingTable == null) {
action = MetadataAction.CREATE;
} else if (!ReplicationUtils.stripNonComparables(existingTable)
.equals(ReplicationUtils.stripNonComparables(destTable))) {
action = MetadataAction.ALTER;
}
// Take necessary action
switch (action) {
case CREATE:
LOG.debug("Creating " + spec + " since it does not exist on " + "the destination");
ReplicationUtils.createDbIfNecessary(srcMs, destMs, destTable.getDbName());
LOG.debug("Creating: " + destTable);
destMs.createTable(destTable);
LOG.debug("Successfully created table " + spec);
break;
case ALTER:
LOG.debug("Altering table " + spec + " on destination");
LOG.debug("Existing table: " + existingTable);
LOG.debug("Replacement table: " + destTable);
destMs.alterTable(destTable.getDbName(), destTable.getTableName(), destTable);
LOG.debug("Successfully altered " + spec);
break;
case NOOP:
LOG.debug("Destination table " + spec + " is up to date, so " + "not doing anything");
break;
default:
throw new RuntimeException("Unhandled case!");
}
return new RunInfo(RunInfo.RunStatus.SUCCESSFUL, 0);
}
@Override
public LockSet getRequiredLocks() {
LockSet lockSet = new LockSet();
lockSet.add(new Lock(Lock.Type.EXCLUSIVE, spec.toString()));
return lockSet;
}
}
| 9,517 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/db/PersistedJobInfo.java | package com.airbnb.reair.incremental.db;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.incremental.ReplicationOperation;
import com.airbnb.reair.incremental.ReplicationStatus;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
/**
* Information about a replication job that gets persisted to a DB.
*/
public class PersistedJobInfo {
public enum PersistState {
PERSISTED, // if job is created in DB
PENDING // job hasnt been created yet in DB and has no ID
}
private static final Log LOG = LogFactory.getLog(PersistedJobInfo.class);
private CompletableFuture<Long> id;
private long createTime;
private ReplicationOperation operation;
private ReplicationStatus status;
// Path of the source may not exist for views
private Optional<Path> srcPath;
private String srcClusterName;
private String srcDbName;
private String srcTableName;
// If copying partition(s), the partition names
private List<String> srcPartitionNames;
// The modified time of the source object - from the field in parameters,
// transientLast_ddlTime. This field is only applicable for rename and
// drop operations.
private Optional<String> srcObjectTldt;
// These fields are only applicable for the rename operation.
private Optional<String> renameToDb;
private Optional<String> renameToTable;
private Optional<String> renameToPartition;
private Optional<Path> renameToPath;
// A flexible map to store some extra parameters
private Map<String, String> extras;
private PersistState persistState;
public static final String AUDIT_LOG_ID_EXTRAS_KEY = "audit_log_id";
public static final String AUDIT_LOG_ENTRY_CREATE_TIME_KEY = "audit_log_entry_create_time";
public static final String BYTES_COPIED_KEY = "bytes_copied";
/**
* Constructor for a persisted job info.
*
* @param id unique ID for this job
* @param createTime time that the job was created in millis (rounded to nearest 1000)
* @param operation the type of operation that the job performs
* @param status the status of the job
* @param srcPath the path of the source object
* @param srcClusterName the name of the source cluster
* @param srcDbName the name of the source database
* @param srcTableName the name of the source table
* @param srcPartitionNames the names of the source partitions
* @param srcObjectTldt the source object's last modified time (transient_lastDdlTime in
* the parameters field of the Hive Thrift object)
* @param renameToDb if renaming an object, the new database name
* @param renameToTable if renaming an object, the new table name
* @param renameToPartition if renaming an object, the new partition name
* @param renameToPath if renaming an object, the new object's new location
* @param extras a key value map of any extra information that is not critical to replication
*/
PersistedJobInfo(
Optional<Long> id,
Long createTime,
ReplicationOperation operation,
ReplicationStatus status,
Optional<Path> srcPath,
String srcClusterName,
String srcDbName,
String srcTableName,
List<String> srcPartitionNames,
Optional<String> srcObjectTldt,
Optional<String> renameToDb,
Optional<String> renameToTable,
Optional<String> renameToPartition,
Optional<Path> renameToPath,
Map<String, String> extras) {
if (id.isPresent()) {
this.id = CompletableFuture.completedFuture(id.get());
this.persistState = PersistState.PERSISTED;
} else {
this.id = new CompletableFuture<>();
this.persistState = PersistState.PENDING;
}
this.createTime = createTime;
this.operation = operation;
this.status = status;
this.srcPath = srcPath;
this.srcClusterName = srcClusterName;
this.srcDbName = srcDbName;
this.srcTableName = srcTableName;
if (srcPartitionNames != null) {
this.srcPartitionNames = srcPartitionNames;
} else {
LOG.error("null srcPartitionNames passed in constructor", new Exception());
this.srcPartitionNames = new ArrayList<>();
}
this.srcObjectTldt = srcObjectTldt;
this.renameToDb = renameToDb;
this.renameToTable = renameToTable;
this.renameToPartition = renameToPartition;
this.renameToPath = renameToPath;
if (extras == null) {
LOG.error("null extras passed in constructor", new Exception());
this.extras = new HashMap<>();
} else {
this.extras = extras;
}
}
void setPersisted(Long id) {
if (this.persistState == PersistState.PERSISTED) {
throw new RuntimeException("PersistedJobInfo.setPersisted can only be called once.");
}
this.persistState = PersistState.PERSISTED;
this.id.complete(id);
}
public PersistState getPersistState() {
return this.persistState;
}
/**
* Returns the ID if it has been persisted. Should only be called if persisted.
* @return the id
*/
public Long getId() {
try {
if (this.id.isDone()) {
return this.id.get();
} else {
throw new RuntimeException("getId should not be called before setPersisted().");
}
} catch (InterruptedException | ExecutionException e) {
throw new RuntimeException("These exceptions should never be thrown.");
}
}
public ReplicationOperation getOperation() {
return operation;
}
public ReplicationStatus getStatus() {
return status;
}
public String getSrcClusterName() {
return srcClusterName;
}
public Optional<Path> getSrcPath() {
return srcPath;
}
public String getSrcDbName() {
return srcDbName;
}
public String getSrcTableName() {
return srcTableName;
}
public List<String> getSrcPartitionNames() {
return srcPartitionNames;
}
public Optional<String> getSrcObjectTldt() {
return srcObjectTldt;
}
public void setStatus(ReplicationStatus status) {
this.status = status;
}
public Optional<String> getRenameToDb() {
return renameToDb;
}
public Optional<String> getRenameToTable() {
return renameToTable;
}
public Optional<String> getRenameToPartition() {
return renameToPartition;
}
public Optional<Path> getRenameToPath() {
return renameToPath;
}
public Map<String, String> getExtras() {
return extras;
}
public long getCreateTime() {
return createTime;
}
@Override
public String toString() {
return "PersistedJobInfo{" + "id=" + id + ", operation=" + operation + ", createTime="
+ createTime + ", status=" + status + ", srcPath=" + srcPath + ", srcClusterName='"
+ srcClusterName + '\'' + ", srcDbName='" + srcDbName + '\'' + ", srcTableName='"
+ srcTableName + '\'' + ", srcPartitionNames=" + srcPartitionNames + ", srcObjectTldt='"
+ srcObjectTldt + '\'' + ", renameToDb='" + renameToDb + '\'' + ", renameToTable='"
+ renameToTable + '\'' + ", renameToPartition='" + renameToPartition + '\''
+ ", renameToPath=" + renameToPath + ", extras=" + extras + '}';
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
PersistedJobInfo that = (PersistedJobInfo) obj;
if (createTime != that.createTime) {
return false;
}
if (extras != null ? !extras.equals(that.extras) : that.extras != null) {
return false;
}
// either the Future is determined and the value is equal, or they are the same future
if (id != null ? !(id.equals(that.id)
|| (id.isDone() && id.getNow(null).equals(that.id.getNow(null)))) :
that.id != null) {
return false;
}
if (operation != that.operation) {
return false;
}
if (renameToDb != null ? !renameToDb.equals(that.renameToDb) : that.renameToDb != null) {
return false;
}
if (renameToPartition != null ? !renameToPartition.equals(that.renameToPartition)
: that.renameToPartition != null) {
return false;
}
if (renameToPath != null ? !renameToPath.equals(that.renameToPath)
: that.renameToPath != null) {
return false;
}
if (renameToTable != null ? !renameToTable.equals(that.renameToTable)
: that.renameToTable != null) {
return false;
}
if (srcClusterName != null ? !srcClusterName.equals(that.srcClusterName)
: that.srcClusterName != null) {
return false;
}
if (srcDbName != null ? !srcDbName.equals(that.srcDbName) : that.srcDbName != null) {
return false;
}
if (srcObjectTldt != null ? !srcObjectTldt.equals(that.srcObjectTldt)
: that.srcObjectTldt != null) {
return false;
}
if (srcPartitionNames != null ? !srcPartitionNames.equals(that.srcPartitionNames)
: that.srcPartitionNames != null) {
return false;
}
if (srcPath != null ? !srcPath.equals(that.srcPath) : that.srcPath != null) {
return false;
}
if (srcTableName != null ? !srcTableName.equals(that.srcTableName)
: that.srcTableName != null) {
return false;
}
if (status != that.status) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = id != null ? id.hashCode() : 0;
result = 31 * result + (int) (createTime ^ (createTime >>> 32));
result = 31 * result + (operation != null ? operation.hashCode() : 0);
result = 31 * result + (status != null ? status.hashCode() : 0);
result = 31 * result + (srcPath != null ? srcPath.hashCode() : 0);
result = 31 * result + (srcClusterName != null ? srcClusterName.hashCode() : 0);
result = 31 * result + (srcDbName != null ? srcDbName.hashCode() : 0);
result = 31 * result + (srcTableName != null ? srcTableName.hashCode() : 0);
result = 31 * result + (srcPartitionNames != null ? srcPartitionNames.hashCode() : 0);
result = 31 * result + (srcObjectTldt != null ? srcObjectTldt.hashCode() : 0);
result = 31 * result + (renameToDb != null ? renameToDb.hashCode() : 0);
result = 31 * result + (renameToTable != null ? renameToTable.hashCode() : 0);
result = 31 * result + (renameToPartition != null ? renameToPartition.hashCode() : 0);
result = 31 * result + (renameToPath != null ? renameToPath.hashCode() : 0);
result = 31 * result + (extras != null ? extras.hashCode() : 0);
return result;
}
/**
* Creates a PersistedJobInfo with no ID.
* @param operation operation
* @param status status
* @param srcPath srcPath
* @param srcClusterName srcClusterName
* @param srcTableSpec srcTableSpec
* @param srcPartitionNames srcPartitionNames
* @param srcTldt srcTldt
* @param renameToObject renameToObject
* @param renameToPath renameToPath
* @param extras extras
* @return An unpersisted PersistedJobInfo
*/
public static PersistedJobInfo createDeferred(
ReplicationOperation operation,
ReplicationStatus status,
Optional<Path> srcPath,
String srcClusterName,
HiveObjectSpec srcTableSpec,
List<String> srcPartitionNames,
Optional<String> srcTldt,
Optional<HiveObjectSpec> renameToObject,
Optional<Path> renameToPath,
Map<String, String> extras) {
long timestampMillisRounded = System.currentTimeMillis() / 1000L * 1000L;
PersistedJobInfo persistedJobInfo =
new PersistedJobInfo(Optional.empty(), timestampMillisRounded, operation, status, srcPath,
srcClusterName, srcTableSpec.getDbName(), srcTableSpec.getTableName(),
srcPartitionNames, srcTldt,
renameToObject.map(HiveObjectSpec::getDbName),
renameToObject.map(HiveObjectSpec::getTableName),
renameToObject.map(HiveObjectSpec::getPartitionName),
renameToPath, extras);
return persistedJobInfo;
}
}
| 9,518 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/db/PersistedJobInfoStore.java | package com.airbnb.reair.incremental.db;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import com.airbnb.reair.db.DbConnectionFactory;
import com.airbnb.reair.incremental.ReplicationOperation;
import com.airbnb.reair.incremental.ReplicationStatus;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.StateUpdateException;
import com.airbnb.reair.incremental.deploy.ConfigurationKeys;
import com.airbnb.reair.utils.RetryableTask;
import com.airbnb.reair.utils.RetryingTaskRunner;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.StringUtils;
import java.io.IOException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
/**
* A store for managing and persisting PersistedJobInfo objects. The objects are stored though a
* state table that generally has a separate column for each field in PersistedJobInfo. This avoids
* the use of ORM as the use case is relatively simple.
*
* <p>Note: to simplify programming, all methods are synchronized. This could be slow, so another
* approach is for each thread to use a different DB connection for higher parallelism.
*/
public class PersistedJobInfoStore {
private static final Log LOG = LogFactory.getLog(PersistedJobInfoStore.class);
private static final String[] completedStateStrings = {
ReplicationStatus.SUCCESSFUL.name(),
ReplicationStatus.FAILED.name(),
ReplicationStatus.NOT_COMPLETABLE.name(),
ReplicationStatus.ABORTED.name()};
private DbConnectionFactory dbConnectionFactory;
private String dbTableName;
private RetryingTaskRunner retryingTaskRunner = new RetryingTaskRunner();
/**
* Constructor.
*
* @param conf configuration
* @param dbConnectionFactory factory for creating connections to the DB
* @param dbTableName name of the table on the DB that stores job information
*/
public PersistedJobInfoStore(Configuration conf,
DbConnectionFactory dbConnectionFactory,
String dbTableName) {
this.dbConnectionFactory = dbConnectionFactory;
this.dbTableName = dbTableName;
this.retryingTaskRunner = new RetryingTaskRunner(
conf.getInt(ConfigurationKeys.DB_QUERY_RETRIES,
DbConstants.DEFAULT_NUM_RETRIES),
DbConstants.DEFAULT_RETRY_EXPONENTIAL_BASE);
}
/**
* Make the `create table` statement that can be run on the DB to create the table containing the
* information required for a PersistedJobInfo object.
*
* @param tableName the table name to use in the DB
* @return a SQL command that could be executed to create the state table
*/
public static String getCreateTableSql(String tableName) {
return String.format("CREATE TABLE `%s` (\n" + " `id` bigint(20) NOT NULL AUTO_INCREMENT,\n"
+ " `create_time` timestamp DEFAULT 0, \n"
+ " `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON "
+ " UPDATE CURRENT_TIMESTAMP,\n"
+ " `operation` varchar(256) DEFAULT NULL,\n" + " `status` varchar(4000) DEFAULT NULL,\n"
+ " `src_path` varchar(4000) DEFAULT NULL,\n"
+ " `src_cluster` varchar(256) DEFAULT NULL,\n"
+ " `src_db` varchar(4000) DEFAULT NULL,\n" + " `src_table` varchar(4000) DEFAULT NULL,\n"
+ " `src_partitions` mediumtext DEFAULT NULL,\n"
+ " `src_tldt` varchar(4000) DEFAULT NULL,\n"
+ " `rename_to_db` varchar(4000) DEFAULT NULL,\n"
+ " `rename_to_table` varchar(4000) DEFAULT NULL,\n"
+ " `rename_to_partition` varchar(4000) DEFAULT NULL,\n"
+ " `rename_to_path` varchar(4000), \n" + " `extras` mediumtext, \n"
+ " PRIMARY KEY (`id`),\n" + " KEY `update_time_index` (`update_time`),\n"
+ " KEY `src_cluster_index` (`src_cluster`),\n" + " KEY `src_db_index` (`src_db`(767)),\n"
+ " KEY `src_table_index` (`src_table`(767))\n" + ") ENGINE=InnoDB", tableName);
}
/**
* Changes the state for all jobs that are not finished to ABORTED.
*
* @throws SQLException if there is an error querying the DB
*/
public synchronized void abortRunnableFromDb() throws SQLException {
// Convert from ['a', 'b'] to "'a', 'b'"
String completedStateList = StringUtils.join(", ",
Lists.transform(Arrays.asList(completedStateStrings), new Function<String, String>() {
public String apply(String str) {
return String.format("'%s'", str);
}
}));
String query = String.format("UPDATE %s SET status = 'ABORTED' " + "WHERE status NOT IN (%s)",
dbTableName, completedStateList);
Connection connection = dbConnectionFactory.getConnection();
Statement statement = connection.createStatement();
statement.execute(query);
}
/**
* Gets all the jobs that have a not completed status (and therefore should be run) from the DB.
*
* @return a list of jobs in the DB that should be run
* @throws SQLException if there's an error querying the DB
*/
public synchronized List<PersistedJobInfo> getRunnableFromDb() throws SQLException {
// Convert from ['a', 'b'] to "'a', 'b'"
String completedStateList = StringUtils.join(", ",
Lists.transform(Arrays.asList(completedStateStrings), new Function<String, String>() {
public String apply(String str) {
return String.format("'%s'", str);
}
}));
String query = String.format("SELECT id, create_time, operation, status, src_path, "
+ "src_cluster, src_db, "
+ "src_table, src_partitions, src_tldt, "
+ "rename_to_db, rename_to_table, rename_to_partition, "
+ "rename_to_path, extras "
+ "FROM %s WHERE status NOT IN (%s) ORDER BY id", dbTableName, completedStateList);
List<PersistedJobInfo> persistedJobInfos = new ArrayList<>();
Connection connection = dbConnectionFactory.getConnection();
Statement statement = connection.createStatement();
ResultSet rs = statement.executeQuery(query);
while (rs.next()) {
long id = rs.getLong("id");
Optional<Timestamp> createTimestamp = Optional.ofNullable(rs.getTimestamp("create_time"));
long createTime = createTimestamp.map(Timestamp::getTime).orElse(Long.valueOf(0));
ReplicationOperation operation = ReplicationOperation.valueOf(rs.getString("operation"));
ReplicationStatus status = ReplicationStatus.valueOf(rs.getString("status"));
Optional srcPath = Optional.ofNullable(rs.getString("src_path")).map(Path::new);
String srcClusterName = rs.getString("src_cluster");
String srcDbName = rs.getString("src_db");
String srcTableName = rs.getString("src_table");
List<String> srcPartitionNames = new ArrayList<>();
String partitionNamesJson = rs.getString("src_partitions");
if (partitionNamesJson != null) {
srcPartitionNames = ReplicationUtils.convertToList(partitionNamesJson);
}
Optional<String> srcObjectTldt = Optional.ofNullable(rs.getString("src_tldt"));
Optional<String> renameToDbName = Optional.ofNullable(rs.getString("rename_to_db"));
Optional<String> renameToTableName = Optional.ofNullable(rs.getString("rename_to_table"));
Optional<String> renameToPartitionName =
Optional.ofNullable(rs.getString("rename_to_partition"));
Optional<Path> renameToPath =
Optional.ofNullable(rs.getString("rename_to_path")).map(Path::new);
Optional<String> extrasJson = Optional.ofNullable(rs.getString("extras"));
Map<String, String> extras =
extrasJson.map(ReplicationUtils::convertToMap).orElse(new HashMap<>());
PersistedJobInfo persistedJobInfo = new PersistedJobInfo(Optional.of(id), createTime,
operation, status, srcPath, srcClusterName, srcDbName, srcTableName, srcPartitionNames,
srcObjectTldt, renameToDbName, renameToTableName, renameToPartitionName, renameToPath,
extras);
persistedJobInfos.add(persistedJobInfo);
}
return persistedJobInfos;
}
private synchronized void persistHelper(PersistedJobInfo job) throws SQLException, IOException {
String query = "INSERT INTO " + dbTableName
+ " SET " + "id = ?, " + "create_time = ?, "
+ "operation = ?, " + "status = ?, "
+ "src_path = ?, " + "src_cluster = ?, "
+ "src_db = ?, " + "src_table = ?, "
+ "src_partitions = ?, " + "src_tldt = ?, "
+ "rename_to_db = ?, " + "rename_to_table = ?, "
+ "rename_to_partition = ?, "
+ "rename_to_path = ?, "
+ "extras = ? "
+ "ON DUPLICATE KEY UPDATE " + "create_time = ?, "
+ "operation = ?, "
+ "status = ?, "
+ "src_path = ?, "
+ "src_cluster = ?, "
+ "src_db = ?, "
+ "src_table = ?, "
+ "src_partitions = ?, "
+ "src_tldt = ?, "
+ "rename_to_db = ?, "
+ "rename_to_table = ?, "
+ "rename_to_partition = ?, "
+ "rename_to_path = ?, "
+ "extras = ?";
Connection connection = dbConnectionFactory.getConnection();
PreparedStatement ps = connection.prepareStatement(query);
try {
int queryParamIndex = 1;
ps.setLong(queryParamIndex++, job.getId());
ps.setTimestamp(queryParamIndex++, new Timestamp(job.getCreateTime()));
ps.setString(queryParamIndex++, job.getOperation().toString());
ps.setString(queryParamIndex++, job.getStatus().toString());
ps.setString(queryParamIndex++, job.getSrcPath().map(Path::toString).orElse(null));
ps.setString(queryParamIndex++, job.getSrcClusterName());
ps.setString(queryParamIndex++, job.getSrcDbName());
ps.setString(queryParamIndex++, job.getSrcTableName());
ps.setString(queryParamIndex++, ReplicationUtils.convertToJson(job.getSrcPartitionNames()));
ps.setString(queryParamIndex++, job.getSrcObjectTldt().orElse(null));
ps.setString(queryParamIndex++, job.getRenameToDb().orElse(null));
ps.setString(queryParamIndex++, job.getRenameToTable().orElse(null));
ps.setString(queryParamIndex++, job.getRenameToPartition().orElse(null));
ps.setString(queryParamIndex++, job.getRenameToPath().map(Path::toString).orElse(null));
ps.setString(queryParamIndex++, ReplicationUtils.convertToJson(job.getExtras()));
// Handle the update case
ps.setTimestamp(queryParamIndex++, new Timestamp(job.getCreateTime()));
ps.setString(queryParamIndex++, job.getOperation().toString());
ps.setString(queryParamIndex++, job.getStatus().toString());
ps.setString(queryParamIndex++, job.getSrcPath().map(Path::toString).orElse(null));
ps.setString(queryParamIndex++, job.getSrcClusterName());
ps.setString(queryParamIndex++, job.getSrcDbName());
ps.setString(queryParamIndex++, job.getSrcTableName());
ps.setString(queryParamIndex++, ReplicationUtils.convertToJson(job.getSrcPartitionNames()));
ps.setString(queryParamIndex++, job.getSrcObjectTldt().orElse(null));
ps.setString(queryParamIndex++, job.getRenameToDb().orElse(null));
ps.setString(queryParamIndex++, job.getRenameToTable().orElse(null));
ps.setString(queryParamIndex++, job.getRenameToPartition().orElse(null));
ps.setString(queryParamIndex++, job.getRenameToPath().map(Path::toString).orElse(null));
ps.setString(queryParamIndex++, ReplicationUtils.convertToJson(job.getExtras()));
ps.execute();
} finally {
ps.close();
ps = null;
}
}
public synchronized void changeStatusAndPersist(ReplicationStatus status, PersistedJobInfo job)
throws StateUpdateException {
job.setStatus(status);
persist(job);
}
/**
* Persist the data from the job into the DB.
*
* @param job the job to persist
*/
public synchronized void persist(final PersistedJobInfo job) throws StateUpdateException {
try {
retryingTaskRunner.runWithRetries(new RetryableTask() {
@Override
public void run() throws Exception {
persistHelper(job);
}
});
} catch (IOException | SQLException e) {
throw new StateUpdateException(e);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private synchronized void createManyImpl(List<PersistedJobInfo> jobs)
throws IOException, SQLException, StateUpdateException {
LOG.debug(String.format("Persisting %d PersistedJobInfos", jobs.size()));
if (jobs.size() == 0) {
return;
}
String query = generateQuery(jobs.size());
Connection connection = dbConnectionFactory.getConnection();
try (PreparedStatement ps =
connection.prepareStatement(query, Statement.RETURN_GENERATED_KEYS)) {
int queryParamIndex = 1;
for (PersistedJobInfo job: jobs) {
ps.setTimestamp(queryParamIndex++, new Timestamp(job.getCreateTime()));
ps.setString(queryParamIndex++, job.getOperation().toString());
ps.setString(queryParamIndex++, job.getStatus().toString());
ps.setString(queryParamIndex++, job.getSrcPath().map(Path::toString).orElse(null));
ps.setString(queryParamIndex++, job.getSrcClusterName());
ps.setString(queryParamIndex++, job.getSrcDbName());
ps.setString(queryParamIndex++, job.getSrcTableName());
ps.setString(queryParamIndex++, ReplicationUtils.convertToJson(job.getSrcPartitionNames()));
ps.setString(queryParamIndex++, job.getSrcObjectTldt().orElse(null));
ps.setString(queryParamIndex++, job.getRenameToDb().orElse(null));
ps.setString(queryParamIndex++, job.getRenameToTable().orElse(null));
ps.setString(queryParamIndex++, job.getRenameToPartition().orElse(null));
ps.setString(queryParamIndex++, job.getRenameToPath().map(Path::toString).orElse(null));
ps.setString(queryParamIndex++, ReplicationUtils.convertToJson(job.getExtras()));
}
ps.execute();
ResultSet rs = ps.getGeneratedKeys();
for (PersistedJobInfo j : jobs) {
rs.next();
j.setPersisted(rs.getLong(1));
}
}
}
/**
* Persists PENDING PersistedJobInfos to the DB.
* @param jobs a list of PersistedJobInfos in the PENDING state
* @throws StateUpdateException if there is a SQLException or any Infos are not PENDING
*/
public synchronized void createMany(List<PersistedJobInfo> jobs)
throws StateUpdateException {
for (PersistedJobInfo job: jobs) {
if (job.getPersistState() == PersistedJobInfo.PersistState.PERSISTED) {
throw new StateUpdateException("Tried to persist already persisted PersistedJobInfo.");
}
}
try {
retryingTaskRunner.runWithRetries(() -> createManyImpl(jobs));
} catch (IOException | SQLException e) {
throw new StateUpdateException(e);
} catch (StateUpdateException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private String generateQuery(int len) {
String valuesStr = "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
StringBuffer sb = new StringBuffer();
sb.append(
"INSERT INTO " + dbTableName + " (create_time, operation, status, src_path, "
+ "src_cluster, src_db, src_table, src_partitions, src_tldt, rename_to_db, "
+ "rename_to_table, rename_to_partition, rename_to_path, extras) VALUES ");
for (int i = 1; i < len; i++) {
sb.append(valuesStr);
sb.append(" , ");
}
sb.append(valuesStr);
return sb.toString();
}
private synchronized PersistedJobInfo getJob(long id) throws SQLException {
String query = "SELECT id, create_time, operation, status, src_path, " + "src_cluster, src_db, "
+ "src_table, src_partitions, src_tldt, "
+ "rename_to_db, rename_to_table, rename_to_partition, " + "rename_to_path, extras "
+ "FROM " + dbTableName + " WHERE id = ?";
Connection connection = dbConnectionFactory.getConnection();
PreparedStatement ps = connection.prepareStatement(query);
ResultSet rs = ps.executeQuery(query);
while (rs.next()) {
Optional<Timestamp> ts = Optional.ofNullable(rs.getTimestamp("create_time"));
long createTime = ts.map(Timestamp::getTime).orElse(Long.valueOf(0));
ReplicationOperation operation = ReplicationOperation.valueOf(rs.getString("operation"));
ReplicationStatus status = ReplicationStatus.valueOf(rs.getString("status"));
Optional<Path> srcPath = Optional.ofNullable(rs.getString("src_path")).map(Path::new);
String srcClusterName = rs.getString("src_cluster");
String srcDbName = rs.getString("src_db");
String srcTableName = rs.getString("src_table");
List<String> srcPartitionNames = new ArrayList<>();
String partitionNamesJson = rs.getString("src_partitions");
if (partitionNamesJson != null) {
srcPartitionNames = ReplicationUtils.convertToList(partitionNamesJson);
}
Optional<String> srcObjectTldt = Optional.of(rs.getString("src_tldt"));
Optional<String> renameToDbName = Optional.of(rs.getString("rename_to_db"));
Optional<String> renameToTableName = Optional.of(rs.getString("rename_to_table"));
Optional<String> renameToPartitionName = Optional.of(rs.getString("rename_to_partition"));
Optional<Path> renameToPath = Optional.of(rs.getString("rename_to_path")).map(Path::new);
String extrasJson = rs.getString("extras");
Map<String, String> extras = new HashMap<>();
if (extrasJson != null) {
extras = ReplicationUtils.convertToMap(rs.getString("extras"));
}
PersistedJobInfo persistedJobInfo = new PersistedJobInfo(Optional.of(id), createTime,
operation, status, srcPath, srcClusterName, srcDbName, srcTableName, srcPartitionNames,
srcObjectTldt, renameToDbName, renameToTableName, renameToPartitionName, renameToPath,
extras);
return persistedJobInfo;
}
return null;
}
}
| 9,519 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/db/DbConstants.java | package com.airbnb.reair.incremental.db;
/**
* Constants used for DB operations.
*/
public class DbConstants {
// Default number of retries to run when a DB operation fails
public static final int DEFAULT_NUM_RETRIES = 9;
// When sleeping between retries, use this base number when calculating the exponential delay
public static final int DEFAULT_RETRY_EXPONENTIAL_BASE = 2;
}
| 9,520 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/filter/RemoveThriftEntriesReplicationFilter.java | package com.airbnb.reair.incremental.filter;
import com.airbnb.reair.common.NamedPartition;
import com.airbnb.reair.incremental.auditlog.AuditLogEntry;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.api.Table;
/**
* To filter out Thrift events from the audit log.
*/
public class RemoveThriftEntriesReplicationFilter implements ReplicationFilter {
private Configuration conf;
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
@Override
public boolean accept(AuditLogEntry entry) {
switch (entry.getCommandType()) {
case THRIFT_ADD_PARTITION:
case THRIFT_ALTER_PARTITION:
case THRIFT_ALTER_TABLE:
case THRIFT_CREATE_DATABASE:
case THRIFT_CREATE_TABLE:
case THRIFT_DROP_DATABASE:
case THRIFT_DROP_PARTITION:
case THRIFT_DROP_TABLE:
return false;
default:
return true;
}
}
@Override
public boolean accept(Table table) {
return accept(table, null);
}
@Override
public boolean accept(Table table, NamedPartition partition) {
return true;
}
}
| 9,521 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/filter/PassThoughReplicationFilter.java | package com.airbnb.reair.incremental.filter;
import com.airbnb.reair.common.NamedPartition;
import com.airbnb.reair.incremental.auditlog.AuditLogEntry;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.api.Table;
/**
* Filter that passes everything.
*/
public class PassThoughReplicationFilter implements ReplicationFilter {
@Override
public void setConf(Configuration conf) {
return;
}
@Override
public boolean accept(AuditLogEntry entry) {
return true;
}
@Override
public boolean accept(Table table) {
return true;
}
@Override
public boolean accept(Table table, NamedPartition partition) {
return true;
}
}
| 9,522 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/filter/ThriftLogReplicationFilter.java | package com.airbnb.reair.incremental.filter;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.common.NamedPartition;
import com.airbnb.reair.hive.hooks.HiveOperation;
import com.airbnb.reair.incremental.auditlog.AuditLogEntry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.api.Table;
/**
* To replicate thrift events and also ALTERTABLE_EXCHANGEPARTITION
* from audit log.
*/
public class ThriftLogReplicationFilter implements ReplicationFilter {
private Configuration conf;
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
@Override
public boolean accept(AuditLogEntry entry) {
switch (entry.getCommandType()) {
case THRIFT_ADD_PARTITION:
case THRIFT_ALTER_PARTITION:
case THRIFT_ALTER_TABLE:
case THRIFT_CREATE_DATABASE:
case THRIFT_CREATE_TABLE:
case THRIFT_DROP_DATABASE:
case THRIFT_DROP_PARTITION:
case THRIFT_DROP_TABLE:
// for completeness we need to replicate exchange partition from
// the normal audit log besides the thrift events
case ALTERTABLE_EXCHANGEPARTITION:
return true;
default:
return false;
}
}
@Override
public boolean accept(Table table) {
return accept(table, null);
}
@Override
public boolean accept(Table table, NamedPartition partition) {
return true;
}
}
| 9,523 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/filter/RegexReplicationFilter.java | package com.airbnb.reair.incremental.filter;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.common.NamedPartition;
import com.airbnb.reair.incremental.auditlog.AuditLogEntry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.api.Table;
/**
* Filters out objects from the audit log using regular expressions specified in the configuration.
*/
public class RegexReplicationFilter implements ReplicationFilter {
private static final Log LOG = LogFactory.getLog(RegexReplicationFilter.class);
public static final String WHITELIST_REGEX_KEY = "airbnb.reair.whitelist.regex";
public static final String BLACKLIST_REGEX_KEY = "airbnb.reair.blacklist.regex";
private Configuration conf;
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
@Override
public boolean accept(AuditLogEntry entry) {
return true;
}
@Override
public boolean accept(Table table) {
return accept(table, null);
}
@Override
public boolean accept(Table table, NamedPartition partition) {
return matchesRegex(table.getDbName(), table.getTableName(),
partition == null ? null : partition.getName());
}
private boolean matchesRegex(String dbName, String tableName, String partitionName) {
HiveObjectSpec spec = new HiveObjectSpec(dbName, tableName, partitionName);
String objectName = spec.toString();
String whitelistRegex = conf.get(WHITELIST_REGEX_KEY);
if (whitelistRegex == null) {
LOG.warn("Missing value for whitelist key: " + WHITELIST_REGEX_KEY);
return false;
}
if (!objectName.matches(whitelistRegex)) {
return false;
}
String blacklistRegex = conf.get(BLACKLIST_REGEX_KEY);
if (blacklistRegex == null) {
LOG.warn("Missing value for blacklist key: " + BLACKLIST_REGEX_KEY);
// It can be accepted since it passed the whitelist
return true;
}
return !objectName.matches(blacklistRegex);
}
}
| 9,524 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/filter/ReplicationFilter.java | package com.airbnb.reair.incremental.filter;
import com.airbnb.reair.common.NamedPartition;
import com.airbnb.reair.incremental.auditlog.AuditLogEntry;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.api.Table;
/**
* Interface for filtering out objects to replicate.
*/
public interface ReplicationFilter {
void setConf(Configuration conf);
/**
* Check to see if the given entry should be replicated.
*
* @param entry audit log entry to examine
* @return whether or not the given audit log entry should be accepted
*/
boolean accept(AuditLogEntry entry);
/**
* Check to see if the given entry should be replicated.
*
* @param table Hive Thrift table object to examine
* @return whether or not the given table should be accepted
*/
boolean accept(Table table);
/**
* Check to see if the given entry should be replicated.
*
* @param table table associated with the partition
* @param partition partition to examine
* @return whether or not the partition should be accepted
*/
boolean accept(Table table, NamedPartition partition);
}
| 9,525 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/auditlog/AuditLogReader.java | package com.airbnb.reair.incremental.auditlog;
import com.airbnb.reair.common.Container;
import com.airbnb.reair.common.NamedPartition;
import com.airbnb.reair.db.DbConnectionFactory;
import com.airbnb.reair.hive.hooks.HiveOperation;
import com.airbnb.reair.incremental.MetadataException;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.db.DbConstants;
import com.airbnb.reair.incremental.deploy.ConfigurationKeys;
import com.airbnb.reair.utils.RetryableTask;
import com.airbnb.reair.utils.RetryingTaskRunner;
import org.apache.commons.collections.list.SynchronizedList;
import org.apache.commons.lang.math.LongRange;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.Optional;
import java.util.Queue;
/**
* Reads entries from the Hive audit log.
*/
public class AuditLogReader {
private static final Log LOG = LogFactory.getLog(AuditLogReader.class);
private static final int ROW_FETCH_SIZE = 200;
private DbConnectionFactory dbConnectionFactory;
private String auditLogTableName;
private String outputObjectsTableName;
private String mapRedStatsTableName;
private long lastReadId;
private Queue<AuditLogEntry> auditLogEntries;
private RetryingTaskRunner retryingTaskRunner;
/**
* Constructs an AuditLogReader.
*
* @param dbConnectionFactory factory for creating connections to the DB where the log resides
* @param auditLogTableName name of the table on the DB that contains the audit log entries
* @param outputObjectsTableName name of the table on the DB that contains serialized objects
* @param mapRedStatsTableName name of the table on the DB that contains job stats
* @param getIdsAfter start reading entries from the audit log after this ID value
*/
public AuditLogReader(
Configuration conf,
DbConnectionFactory dbConnectionFactory,
String auditLogTableName,
String outputObjectsTableName,
String mapRedStatsTableName,
long getIdsAfter) throws SQLException {
this.dbConnectionFactory = dbConnectionFactory;
this.auditLogTableName = auditLogTableName;
this.outputObjectsTableName = outputObjectsTableName;
this.mapRedStatsTableName = mapRedStatsTableName;
this.lastReadId = getIdsAfter;
auditLogEntries = new LinkedList<>();
this.retryingTaskRunner = new RetryingTaskRunner(
conf.getInt(ConfigurationKeys.DB_QUERY_RETRIES,
DbConstants.DEFAULT_NUM_RETRIES),
DbConstants.DEFAULT_RETRY_EXPONENTIAL_BASE);
}
/**
* Return the next audit log entry from the DB. If there is an error connecting to the DB, retry.
*
* @return the next audit log entry
*
* @throws SQLException if there is an error querying the DB
*/
public synchronized Optional<AuditLogEntry> resilientNext()
throws AuditLogEntryException, SQLException {
final Container<Optional<AuditLogEntry>> ret = new Container<>();
try {
retryingTaskRunner.runWithRetries(new RetryableTask() {
@Override
public void run() throws Exception {
ret.set(next());
}
});
} catch (SQLException | AuditLogEntryException e) {
// These should be the only exceptions thrown
throw e;
} catch (Exception e) {
throw new RuntimeException(e);
}
return ret.get();
}
/**
* Returns (up to) the next N results. If we pass max retries, an exception is thrown,
* even if some results were retrieved successfully.
* @param maxResults the max amount of results returned (fewer are returned if fewer exist)
* @return A list of AuditLogEntries
* @throws AuditLogEntryException if the AuditLogEntry has issues
* @throws SQLException if SQL has issues
*/
public synchronized List<AuditLogEntry> resilientNext(int maxResults)
throws AuditLogEntryException, SQLException {
final Container<List<AuditLogEntry>> ret = new Container<>();
List<AuditLogEntry> results = Collections.synchronizedList(new ArrayList<>());
try {
retryingTaskRunner.runWithRetries(new RetryableTask() {
@Override
public void run() throws Exception {
while (results.size() < maxResults) {
Optional<AuditLogEntry> entry = next();
if (entry.isPresent()) {
results.add(entry.get());
} else {
return;
}
}
}
});
} catch (SQLException | AuditLogEntryException e) {
// These should be the only exceptions thrown
throw e;
} catch (Exception e) {
throw new RuntimeException(e);
}
return results;
}
/**
* Return the next audit log entry from the DB.
*
* @return the next audit log entry
*
* @throws SQLException if there is an error querying the DB
* @throws AuditLogEntryException if there is an error reading the audit log entry
*/
public synchronized Optional<AuditLogEntry> next() throws SQLException, AuditLogEntryException {
if (auditLogEntries.size() > 0) {
return Optional.of(auditLogEntries.remove());
}
LOG.debug("Executing queries to try to get more audit log entries from the DB");
fetchMoreEntries();
if (auditLogEntries.size() > 0) {
return Optional.of(auditLogEntries.remove());
} else {
return Optional.empty();
}
}
/**
* From the output column in the audit log table, return the partition name. An example is
* "default.table/ds=1" => "ds=1".
*
* @param outputCol the output column in the audit log table
* @return the partition name
*/
private String getPartitionNameFromOutputCol(String outputCol) {
return outputCol.substring(outputCol.indexOf("/") + 1);
}
private HiveOperation convertToHiveOperation(String operation) {
if (operation == null) {
return null;
}
try {
return HiveOperation.valueOf(operation);
} catch (IllegalArgumentException e) {
return null;
}
}
/**
* Given that we start reading after lastReadId and need to get
* ROW_FETCH_SIZE rows from the audit log, figure out the min and max row
* IDs to read.
*
* @returns a range of ID's to read from the audit log table based on the fetch size
* @throws SQLException if there is an error reading from the DB
*/
private LongRange getIdsToRead() throws SQLException {
String queryFormatString = "SELECT MIN(id) min_id, MAX(id) max_id "
+ "FROM (SELECT id FROM %s WHERE id > %s "
+ "AND (command_type IS NULL OR command_type NOT IN('SHOWTABLES', 'SHOWPARTITIONS', "
+ "'SWITCHDATABASE')) "
+ "ORDER BY id "
+ "LIMIT %s)"
+ " subquery "
// Get read locks on the specified rows to prevent skipping of rows that haven't committed
// yet, but have an id that matches the where clause.
// For example, one transaction starts and
// inserts id = 1, but another transaction starts, inserts, and commits i = 2 before the
// first transaction commits. Locking can also be done with serializable isolation level.
+ "LOCK IN SHARE MODE";
String query = String.format(queryFormatString, auditLogTableName, lastReadId, ROW_FETCH_SIZE);
Connection connection = dbConnectionFactory.getConnection();
PreparedStatement ps = connection.prepareStatement(query);
LOG.debug("Executing: " + query);
ResultSet rs = ps.executeQuery();
if (rs.next()) {
long minId = rs.getLong("min_id");
long maxId = rs.getLong("max_id");
return new LongRange(minId, maxId);
}
return new LongRange(0, 0);
}
private void fetchMoreEntries() throws SQLException, AuditLogEntryException {
LongRange idsToRead = getIdsToRead();
// No more entries to read
if (idsToRead.getMaximumLong() == 0) {
return;
}
// TODO: Remove left outer join and command type filter once the
// exchange partition bug is fixed in HIVE-12215
String queryFormatString = "SELECT a.id, a.create_time, "
+ "command_type, command, name, category, "
+ "type, serialized_object "
+ "FROM %s a LEFT OUTER JOIN %s b on a.id = b.audit_log_id "
+ "WHERE a.id >= ? AND a.id <= ? "
+ "AND (command_type IS NULL OR command_type "
+ "NOT IN('SHOWTABLES', 'SHOWPARTITIONS', 'SWITCHDATABASE')) "
+ "ORDER BY id "
// Get read locks on the specified rows to prevent skipping of rows that haven't committed
// yet, but have an ID between idsToRead. For example, one transaction starts and
// inserts id = 1, but another transaction starts, inserts, and commits i=2 before the
// first transaction commits. Locking can also be done with serializable isolation level.
+ "LOCK IN SHARE MODE";
String query = String.format(queryFormatString,
auditLogTableName, outputObjectsTableName,
idsToRead.getMinimumLong(), idsToRead.getMaximumLong());
Connection connection = dbConnectionFactory.getConnection();
PreparedStatement ps = connection.prepareStatement(query);
int index = 1;
ps.setLong(index++, idsToRead.getMinimumLong());
ps.setLong(index++, idsToRead.getMaximumLong());
ResultSet rs = ps.executeQuery();
long id = -1;
Timestamp createTime = null;
HiveOperation commandType = null;
String command = null;
String objectName;
String objectCategory;
String objectType;
String objectSerialized;
long previouslyReadId = -1;
Timestamp previouslyReadTs = null;
HiveOperation previousCommandType = null;
String previousCommand = null;
// For a given audit log ID, the join would have produced multiple rows
// for each ID. Each row contains a single output. Group all the rows
// and the outputs into a AuditLogEntry.
// For a given audit log ID, these accumulate the outputs from the
// different rows.
List<String> outputDirectories = new LinkedList<>();
List<Table> outputTables = new LinkedList<>();
List<NamedPartition> outputPartitions = new LinkedList<>();
List<Table> referenceTables = new LinkedList<>();
Table inputTable = null;
NamedPartition renameFromPartition = null;
while (rs.next()) {
id = rs.getLong("id");
createTime = rs.getTimestamp("create_time");
// Invalid operations are returned as null
String commandTypeString = rs.getString("command_type");
commandType = convertToHiveOperation(commandTypeString);
if (commandType == null) {
LOG.debug(String.format("Invalid operation %s in audit log id: %s", commandTypeString, id));
}
command = rs.getString("command");
objectName = rs.getString("name");
objectCategory = rs.getString("category");
objectType = rs.getString("type");
objectSerialized = rs.getString("serialized_object");
if (previouslyReadId != -1 && id != previouslyReadId) {
lastReadId = previouslyReadId;
// This means that all the outputs for a given audit log entry
// has been read.
AuditLogEntry entry = new AuditLogEntry(
previouslyReadId,
previouslyReadTs,
previousCommandType,
previousCommand,
outputDirectories,
referenceTables,
outputTables,
outputPartitions,
inputTable,
renameFromPartition);
auditLogEntries.add(entry);
// Reset these accumulated values
outputDirectories = new LinkedList<>();
referenceTables = new LinkedList<>();
outputTables = new LinkedList<>();
outputPartitions = new LinkedList<>();
renameFromPartition = null;
inputTable = null;
}
previouslyReadId = id;
previouslyReadTs = createTime;
previousCommandType = commandType;
previousCommand = command;
if ("DIRECTORY".equals(objectType)) {
outputDirectories.add(objectName);
} else if ("TABLE".equals(objectType)) {
Table table = new Table();
try {
ReplicationUtils.deserializeObject(objectSerialized, table);
} catch (MetadataException e) {
throw new AuditLogEntryException(e);
}
ReplicationUtils.normalizeNames(table);
if ("OUTPUT".equals(objectCategory)) {
outputTables.add(table);
} else if ("REFERENCE_TABLE".equals(objectCategory)) {
referenceTables.add(table);
} else if ("RENAME_FROM".equals(objectCategory) || "INPUT".equals(objectCategory)) {
inputTable = table;
} else {
throw new RuntimeException("Unhandled category: " + objectCategory);
}
} else if ("PARTITION".equals(objectType)) {
Partition partition = new Partition();
try {
ReplicationUtils.deserializeObject(objectSerialized, partition);
} catch (MetadataException e) {
throw new AuditLogEntryException(e);
}
ReplicationUtils.normalizeNames(partition);
String partitionName = getPartitionNameFromOutputCol(objectName);
NamedPartition namedPartition = new NamedPartition(partitionName, partition);
if ("OUTPUT".equals(objectCategory)) {
outputPartitions.add(namedPartition);
} else if ("RENAME_FROM".equals(objectCategory) || "INPUT".equals(objectCategory)) {
renameFromPartition = namedPartition;
} else {
throw new RuntimeException("Unhandled category: " + objectCategory);
}
} else if ("DFS_DIR".equals(objectType)) {
outputDirectories.add(objectName);
} else if ("LOCAL_DIR".equals(objectType)) {
outputDirectories.add(objectName);
} else if ("DATABASE".equals(objectType)) {
// Currently, nothing is done with DB's
} else if (objectType == null) {
// This will happen for queries that don't have any output
// objects. This can be removed a long with the OUTER aspect
// of the join above once the bug with exchange partitions is
// fixed.
LOG.debug("No output objects");
} else {
throw new RuntimeException("Unhandled output type: " + objectType);
}
}
// This is the case where we read to the end of the table.
if (id != -1) {
AuditLogEntry entry = new AuditLogEntry(
id,
createTime,
commandType,
command,
outputDirectories,
referenceTables,
outputTables,
outputPartitions,
inputTable,
renameFromPartition);
auditLogEntries.add(entry);
}
// Note: if we constantly get empty results (i.e. no valid entries
// because all the commands got filtered out), then the lastReadId won't
// be updated for a while.
lastReadId = idsToRead.getMaximumLong();
return;
}
/**
* Change the reader to start reading entries after this ID.
*
* @param readAfterId ID to configure the reader to read after
*/
public synchronized void setReadAfterId(long readAfterId) {
this.lastReadId = readAfterId;
// Clear the audit log entries since it's possible that the reader
// fetched a bunch of entries in advance, and the ID of those entries
// may not line up with the new read-after ID.
auditLogEntries.clear();
}
/**
* Return the highest value ID in the audit log table.
*
* @return the highest value ID in the audit log table or empty if it does not exist
* @throws SQLException if there an exception reading from the DB
*/
public synchronized Optional<Long> getMaxId() throws SQLException {
String query = String.format("SELECT MAX(id) FROM %s", auditLogTableName);
Connection connection = dbConnectionFactory.getConnection();
PreparedStatement ps = connection.prepareStatement(query);
ResultSet rs = ps.executeQuery();
rs.next();
return Optional.ofNullable(rs.getLong(1));
}
}
| 9,526 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/auditlog/AuditLogEntryException.java | package com.airbnb.reair.incremental.auditlog;
public class AuditLogEntryException extends Exception {
public AuditLogEntryException() {
super();
}
public AuditLogEntryException(String message) {
super(message);
}
public AuditLogEntryException(String message, Throwable cause) {
super(message, cause);
}
public AuditLogEntryException(Throwable cause) {
super(cause);
}
}
| 9,527 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/auditlog/MetricNames.java | package com.airbnb.reair.incremental.auditlog;
/**
* Contains string names of metrics that are reported to statsd.
*/
public class MetricNames {
// Count of tasks by status in ReplicationCounters
public static final String REPLICATION_STATUS_COUNT = "replication.%s";
// Count of jobs above age thresholds in ReplicationJobRegistry
public static final String REPLICATION_JOBS_AGE_COUNT = "replication_jobs.age.%ds";
// Counts how many jobs are in memory
public static final String JOBS_IN_MEMORY_GAUGE = "jobs_in_memory";
// Counts the number of audit log entries read
public static final String AUDIT_LOG_ENTRIES_COUNT = "audit_log_entries_read";
// Counts the number of jobs persisted to the database
public static final String PERSISTED_JOBS_COUNT = "replication_jobs_created";
}
| 9,528 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental | Create_ds/reair/main/src/main/java/com/airbnb/reair/incremental/auditlog/AuditLogEntry.java | package com.airbnb.reair.incremental.auditlog;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.common.NamedPartition;
import com.airbnb.reair.hive.hooks.HiveOperation;
import org.apache.hadoop.hive.metastore.api.Table;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.List;
public class AuditLogEntry {
// The audit log has more fields, but only these are relevant for
// replication.
private long id;
private Timestamp createTime;
private String command;
private HiveOperation commandType;
private List<String> outputDirectories;
private List<Table> referenceTables;
private List<Table> outputTables;
private List<NamedPartition> outputPartitions;
private Table inputTable;
private NamedPartition inputPartition;
/**
* Constructs AuditLogEntry using specific values.
*
* @param id ID of the row in the DB
* @param createTime time that the audit log entry was created
* @param commandType type of Hive command e.g QUERY
* @param command the command string e.g. 'CREATE TABLE...'
* @param outputDirectories for queries that write to directories, the directories that were
* written
* @param referenceTables the partition's table if the outputs include partitions
* @param outputTables tables that were changed
* @param outputPartitions partitions that were changed
* @param inputTable if renaming a table, the table that was renamed from
* @param inputPartition if renaming a partition, the partition that was renamed from.
*/
public AuditLogEntry(
long id,
Timestamp createTime,
HiveOperation commandType,
String command,
List<String> outputDirectories,
List<Table> referenceTables,
List<Table> outputTables,
List<NamedPartition> outputPartitions,
Table inputTable,
NamedPartition inputPartition) {
this.id = id;
this.createTime = createTime;
this.commandType = commandType;
this.command = command;
this.referenceTables = referenceTables;
this.outputDirectories = outputDirectories;
this.outputTables = outputTables;
this.outputPartitions = outputPartitions;
this.inputTable = inputTable;
this.inputPartition = inputPartition;
}
public long getId() {
return id;
}
public Timestamp getCreateTime() {
return createTime;
}
public HiveOperation getCommandType() {
return commandType;
}
@Override
public String toString() {
List<String> outputTableStrings = new ArrayList<>();
for (Table table : outputTables) {
outputTableStrings.add(new HiveObjectSpec(table).toString());
}
List<String> outputPartitionStrings = new ArrayList<>();
for (NamedPartition pwn : outputPartitions) {
outputPartitionStrings.add(new HiveObjectSpec(pwn).toString());
}
List<String> referenceTableStrings = new ArrayList<>();
for (Table t : referenceTables) {
referenceTableStrings.add(new HiveObjectSpec(t).toString());
}
return "AuditLogEntry{" + "id=" + id + ", createTime=" + createTime + ", commandType="
+ commandType + ", outputDirectories=" + outputDirectories + ", referenceTables="
+ referenceTableStrings + ", outputTables=" + outputTableStrings + ", outputPartitions="
+ outputPartitionStrings + ", inputTable=" + inputTable + ", inputPartition="
+ inputPartition + '}';
}
public List<String> getOutputDirectories() {
return outputDirectories;
}
public List<Table> getOutputTables() {
return outputTables;
}
public List<NamedPartition> getOutputPartitions() {
return outputPartitions;
}
public List<Table> getReferenceTables() {
return referenceTables;
}
public Table getInputTable() {
return inputTable;
}
public NamedPartition getInputPartition() {
return inputPartition;
}
public String getCommand() {
return command;
}
}
| 9,529 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch/BatchUtils.java | package com.airbnb.reair.batch;
import com.airbnb.reair.common.FsUtils;
import com.airbnb.reair.incremental.deploy.ConfigurationKeys;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Progressable;
import java.io.IOException;
/**
* Utilities for batch replication.
*/
public class BatchUtils {
private static final Log LOG = LogFactory.getLog(BatchUtils.class);
/**
* Executes a file copy.
*
* @param conf Hadoop configuration object
* @param srcFileStatus Status of the source file
* @param srcFs Source FileSystem
* @param dstDir Destination directory
* @param dstFs Destination FileSystem
* @param tmpDirPath Temporary copy staging location.
* @param progressable A progressable object to progress during long file copies
* @param forceUpdate Whether to force a copy
* @param identifier Identifier to use in the temporary file
* @return An error string or null if successful
*/
public static String doCopyFileAction(
Configuration conf,
SimpleFileStatus srcFileStatus,
FileSystem srcFs,
String dstDir,
FileSystem dstFs,
Path tmpDirPath,
Progressable progressable,
boolean forceUpdate,
String identifier) {
// TODO: Should be configurable
int retry = 3;
String lastError = null;
while (retry > 0) {
try {
Path srcPath = new Path(srcFileStatus.getFullPath());
if (!srcFs.exists(srcPath)) {
LOG.info("Src does not exist. " + srcFileStatus.getFullPath());
return "Src does not exist. " + srcFileStatus.getFullPath();
}
FileStatus srcStatus = srcFs.getFileStatus(srcPath);
Path dstPath = new Path(dstDir, srcFileStatus.getFileName());
// if dst already exists.
if (dstFs.exists(dstPath)) {
FileStatus dstStatus = dstFs.getFileStatus(dstPath);
// If it is not force update, and the file size are same we will not recopy.
// This normally happens when we do retry run.
if (!forceUpdate && srcStatus.getLen() == dstStatus.getLen()) {
LOG.info("dst already exists. " + dstPath.toString());
return "dst already exists. " + dstPath.toString();
}
}
Path dstParentPath = new Path(dstDir);
if (!dstFs.exists(dstParentPath) && !dstFs.mkdirs(dstParentPath)) {
LOG.info("Could not create directory: " + dstDir);
return "Could not create directory: " + dstDir;
}
Path tmpDstPath = new Path(
tmpDirPath,
"__tmp__copy__file_" + identifier + "_" + srcFileStatus.getFileName()
+ "." + System.currentTimeMillis());
if (dstFs.exists(tmpDstPath)) {
dstFs.delete(tmpDstPath, false);
}
// Keep the same replication factor and block size as the source file.
try (FSDataInputStream inputStream = srcFs.open(srcPath);
FSDataOutputStream outputStream = dstFs.create(
tmpDstPath,
srcStatus.getPermission(),
true,
conf.getInt("io.file.buffer.size", 4096),
srcStatus.getReplication(),
srcStatus.getBlockSize(),
progressable)) {
IOUtils.copyBytes(inputStream, outputStream, conf);
}
if (forceUpdate && dstFs.exists(dstPath)) {
dstFs.delete(dstPath, false);
}
// If checksums exist and don't match, re-do the copy. If checksums do not exist, assume
// that they match.
if (conf.getBoolean(ConfigurationKeys.BATCH_JOB_VERIFY_COPY_CHECKSUM, true)
&& !FsUtils.checksumsMatch(conf, srcPath, tmpDstPath)
.map(Boolean::booleanValue)
.orElse(true)) {
throw new IOException(String.format("Not renaming %s to %s since checksums do not match "
+ "between %s and %s",
tmpDstPath,
dstPath,
srcPath,
tmpDstPath));
}
dstFs.rename(tmpDstPath, dstPath);
dstFs.setTimes(dstPath, srcStatus.getModificationTime(), srcStatus.getAccessTime());
LOG.info(dstPath.toString() + " file copied");
progressable.progress();
return null;
} catch (IOException e) {
LOG.info("Got an exception!", e);
lastError = e.getMessage();
--retry;
}
}
return lastError;
}
}
| 9,530 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch/SimpleFileStatus.java | package com.airbnb.reair.batch;
import com.google.common.base.MoreObjects;
import org.apache.hadoop.fs.Path;
import java.net.URI;
public class SimpleFileStatus {
private final long fileSize;
private final long modificationTime;
private final Path path;
/**
* Class to track file size, modification time, and Path. It is used
* to serialize between mapper and reducer.
*
* @param path file path
* @param fileSize file size
* @param modificationTime file modification time
*/
public SimpleFileStatus(String path, long fileSize, long modificationTime) {
this.fileSize = fileSize;
this.modificationTime = modificationTime;
this.path = new Path(path);
}
/**
* Class to track file size, modification time, and Path. It is used
* to serialize between mapper and reducer.
*
* @param path file path
* @param fileSize file size
* @param modificationTime file modification time
*/
public SimpleFileStatus(Path path, long fileSize, long modificationTime) {
this.fileSize = fileSize;
this.modificationTime = modificationTime;
this.path = path;
}
public String getPath() {
return path.toUri().getPath();
}
public String getFullPath() {
return path.toString();
}
public String getFileName() {
return path.getName();
}
public long getFileSize() {
return fileSize;
}
public long getModificationTime() {
return modificationTime;
}
public URI getUri() {
return path.toUri();
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this).add("path", path.toString())
.add("size", fileSize).add("ts", modificationTime).toString();
}
}
| 9,531 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch/template/VelocityUtils.java | package com.airbnb.reair.batch.template;
import org.apache.velocity.VelocityContext;
import org.apache.velocity.app.VelocityEngine;
import org.apache.velocity.exception.MethodInvocationException;
import org.apache.velocity.exception.ParseErrorException;
import org.apache.velocity.exception.ResourceNotFoundException;
import org.apache.velocity.runtime.RuntimeConstants;
import org.apache.velocity.runtime.resource.loader.ClasspathResourceLoader;
import java.io.StringWriter;
/**
* Utilities for working with Velocity templates.
*/
public class VelocityUtils {
/**
* Return the String representation of the template rendered using Velocity.
*
* @param context context use to render the template
* @param templateFileName file name of the template in the classpath
* @throws TemplateRenderException if there is an error with the template
*/
public static String renderTemplate(String templateFileName,
VelocityContext context)
throws TemplateRenderException {
VelocityEngine ve = new VelocityEngine();
ve.setProperty(RuntimeConstants.RESOURCE_LOADER, "classpath");
ve.setProperty("classpath.resource.loader.class",
ClasspathResourceLoader.class.getName());
StringWriter sw = new StringWriter();
try {
ve.mergeTemplate(templateFileName, "UTF-8", context, sw);
} catch (ResourceNotFoundException
| ParseErrorException
| MethodInvocationException e) {
throw new TemplateRenderException("Error rendering template file: " + templateFileName, e);
}
return sw.toString();
}
}
| 9,532 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch/template/TemplateRenderException.java | package com.airbnb.reair.batch.template;
/**
* Exception thrown when there is an error rendering a template using Velocity.
*/
public class TemplateRenderException extends Exception {
public TemplateRenderException(String message) {
super(message);
}
public TemplateRenderException(String message, Throwable cause) {
super(message, cause);
}
}
| 9,533 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch/hdfs/ReplicationJob.java | package com.airbnb.reair.batch.hdfs;
import com.google.common.base.Function;
import com.google.common.base.Joiner;
import com.google.common.base.Predicate;
import com.google.common.base.Predicates;
import com.google.common.collect.Iterables;
import com.google.common.collect.LinkedListMultimap;
import com.google.common.collect.ListMultimap;
import com.google.common.collect.Lists;
import com.google.common.collect.Ordering;
import com.google.common.collect.Sets;
import com.google.common.hash.Hashing;
import com.airbnb.reair.batch.BatchUtils;
import com.airbnb.reair.batch.SimpleFileStatus;
import com.airbnb.reair.common.FsUtils;
import org.apache.commons.cli.BasicParser;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.EnumSet;
import java.util.List;
import java.util.Optional;
import java.util.stream.Stream;
import javax.annotation.Nullable;
/**
* A Map/Reduce job that copies HDFS files from one or more source directories to a destination
* directory.
* In case of conflict in sources, the source with largest timestamp value is copied.
*
* <p>See https://github.com/airbnb/reair/blob/master/docs/hdfs_copy.md for usage.
*
* <p>ReplicationJob was a tool that was developed to address some of the shortcomings of DistCp
* when copying directories with a large number of files (e.g. /user/hive/warehouse). It was
* included in the repo since it might be useful, but it's not directly used for Hive replication.
*
* <p>It can potentially replace DistCp in incremental replication, but since incremental
* replication generally runs copies for shallow directories with a relatively small number of
* files (e.g. /user/hive/warehouse/my_table/ds=2016-01-01), there isn't a strong need.
*
* <p>There are 2 Map-Reduce jobs in this job:
*
* <p>1. runDirectoryComparisonJob
*
* <p>1.1. job.setInputFormatClass(DirScanInputFormat.class) -
* take all source roots and destination root as inputs,
* do an initial glob on inputs to get initial dir list,
* then breadth-first search on the initial dir list until it reaches max_level and get enough
* directories (note that the search in each level is done in a multi-threaded way).
*
* <p>1.2. job.setMapperClass(ListFileMapper.class) -
* list the files in those dirs (and recursively on the leaf dirs)
*
* <p>1.3. job.setReducerClass(DirectoryCompareReducer.class)
* for the same file path name, use the newest one from all source roots to compare with the
* one in destination, generate the action, and write the action into the reducer output.
*
* <p>2. runSyncJob
*
* <p>2.1. job.setInputFormatClass(TextInputFormat.class);
*
* <p>2.2. job.setMapperClass(HdfsSyncMapper.class) -
* Redistribute all file actions based on hash of filenames.
*
* <p>2.3. job.setReducerClass(HdfsSyncReducer.class) -
* Take the action. Note that only ADD and UPDATE are supported.
* TODO: DELETE needs to be added.
*/
public class ReplicationJob extends Configured implements Tool {
private static final Log LOG = LogFactory.getLog(ReplicationJob.class);
private static final String SRC_PATH_CONF = "replication.src.path";
private static final String DST_PATH_CONF = "replication.dst.path";
private static final String TMP_PATH_CONF = "replication.tmp.path";
private static final String COMPARE_OPTION_CONF = "replication.compare.option";
// Names of the command line argument (e.g. --source)
public static final String SOURCE_DIRECTORY_ARG = "source";
public static final String DESTINATION_DIRECTORY_ARG = "destination";
public static final String TEMP_DIRECTORY_ARG = "temp";
public static final String LOG_DIRECTORY_ARG = "log";
public static final String OPERATIONS_ARG = "operations";
public static final String BLACKLIST_ARG = "blacklist";
public static final String DRY_RUN_ARG = "dry-run";
public static final String DIRECTORY_BLACKLIST_REGEX = "replication.directory.blacklist";
private enum Operation {
ADD,
DELETE,
UPDATE;
public static Operation getEnum(String value) {
if (value.equals("a")) {
return ADD;
} else if (value.equals("d")) {
return DELETE;
} else if (value.equals("u")) {
return UPDATE;
}
throw new RuntimeException("Invalid Operation");
}
}
private static final PathFilter hiddenFileFilter = new PathFilter() {
public boolean accept(Path path) {
String name = path.getName();
return !name.startsWith("_") && !name.startsWith(".");
}
};
private static URI findRootUri(URI [] rootUris, Path path) {
return Stream.of(rootUris).filter(uri -> !uri.relativize(path.toUri()).equals(path.toUri()))
.findFirst().get();
}
public static class ListFileMapper extends Mapper<Text, Boolean, Text, FileStatus> {
private String directoryBlackList;
// Store root URI for sources and destination directory
private URI [] rootUris;
private void enumDirectories(FileSystem fs, URI rootUri, Path directory, boolean recursive,
Mapper.Context context) throws IOException, InterruptedException {
try {
for (FileStatus status : fs.listStatus(directory, hiddenFileFilter)) {
if (status.isDirectory()) {
if (recursive) {
if (directoryBlackList == null
|| !status.getPath().getName().matches(directoryBlackList)) {
enumDirectories(fs,rootUri, status.getPath(), recursive, context);
}
}
} else {
context.write(new Text(rootUri.relativize(directory.toUri()).getPath()),
new FileStatus(status));
}
}
context.progress();
} catch (FileNotFoundException e) {
return;
}
}
@Override
protected void setup(Context context) throws IOException, InterruptedException {
this.rootUris = Stream.concat(
Stream.of(context.getConfiguration().get(DST_PATH_CONF)),
Stream.of(context.getConfiguration().get(SRC_PATH_CONF).split(","))).map(
root -> new Path(root).toUri()).toArray(size -> new URI[size]
);
this.directoryBlackList = context.getConfiguration().get(DIRECTORY_BLACKLIST_REGEX);
}
@Override
protected void map(Text key, Boolean value, Context context)
throws IOException, InterruptedException {
Path directory = new Path(key.toString());
FileSystem fileSystem = directory.getFileSystem(context.getConfiguration());
enumDirectories(fileSystem, findRootUri(rootUris, directory), directory, value, context);
LOG.info(key.toString() + " processed.");
}
}
private static Text generateValue(String action, SimpleFileStatus fileStatus) {
ArrayList<String> fields = new ArrayList<>();
fields.add(action);
fields.add(fileStatus.getFullPath());
fields.add(String.valueOf(fileStatus.getFileSize()));
fields.add(String.valueOf(fileStatus.getModificationTime()));
return new Text(Joiner.on("\t").useForNull("\\N").join(fields));
}
/**
* Compare source1 + source2 with destination.
*/
public static class DirectoryCompareReducer extends Reducer<Text, FileStatus, Text, Text> {
private URI dstRoot;
// Store root URI for sources and destination directory
private URI [] rootUris;
private Predicate<SimpleFileStatus> underDstRootPred;
private EnumSet<Operation> operationSet;
private SimpleFileStatus findSrcFileStatus(List<SimpleFileStatus> fileStatuses) {
// pick copy source. The source is the one with largest timestamp value
return Ordering.from(new Comparator<SimpleFileStatus>() {
@Override
public int compare(SimpleFileStatus o1, SimpleFileStatus o2) {
return Long.compare(o1.getModificationTime(), o2.getModificationTime());
}
}).max(fileStatuses);
}
@Override
protected void setup(Context context) throws IOException, InterruptedException {
super.setup(context);
this.dstRoot = new Path(context.getConfiguration().get(DST_PATH_CONF)).toUri();
this.underDstRootPred = new Predicate<SimpleFileStatus>() {
@Override
public boolean apply(@Nullable SimpleFileStatus simpleFileStatus) {
return !dstRoot.relativize(simpleFileStatus.getUri())
.equals(simpleFileStatus.getUri());
}
};
this.operationSet = Sets.newEnumSet(
Iterables.<String, Operation>transform(
Arrays.asList(context.getConfiguration().get(COMPARE_OPTION_CONF, "a,d,u")
.split(",")),
new Function<String, Operation>() {
@Override
public Operation apply(@Nullable String value) {
return Operation.getEnum(value);
}
}),
Operation.class);
this.rootUris = Stream.concat(
Stream.of(context.getConfiguration().get(DST_PATH_CONF)),
Stream.of(context.getConfiguration().get(SRC_PATH_CONF).split(","))).map(
root -> new Path(root).toUri()).toArray(size -> new URI[size]
);
}
@Override
protected void reduce(Text key, Iterable<FileStatus> values, Context context)
throws IOException, InterruptedException {
ListMultimap<String, SimpleFileStatus> fileStatusHashMap = LinkedListMultimap.create();
for (FileStatus fs : values) {
SimpleFileStatus efs =
new SimpleFileStatus(fs.getPath(), fs.getLen(), fs.getModificationTime());
URI rootUris = findRootUri(this.rootUris, fs.getPath());
fileStatusHashMap.put(rootUris.relativize(fs.getPath().toUri()).getPath(), efs);
}
for (String relativePath : fileStatusHashMap.keySet()) {
List<SimpleFileStatus> fileStatuses = fileStatusHashMap.get(relativePath);
ArrayList<SimpleFileStatus> srcFileStatus =
Lists.newArrayList(Iterables.filter(fileStatuses,
Predicates.not(this.underDstRootPred)));
ArrayList<SimpleFileStatus> dstFileStatus =
Lists.newArrayList(Iterables.filter(fileStatuses, this.underDstRootPred));
// If destination has file,
if (dstFileStatus.size() > 0) {
// we can only have one destination
assert dstFileStatus.size() == 1;
// There are two cases:
// update or delete.
if (srcFileStatus.size() > 0) {
// pick source first. The source is the one with largest timestamp value
SimpleFileStatus finalSrcFileStatus = findSrcFileStatus(srcFileStatus);
// If file size is different we need to copy
if (finalSrcFileStatus.getFileSize() != dstFileStatus.get(0).getFileSize()) {
if (operationSet.contains(Operation.UPDATE)) {
context.write(new Text(relativePath),
generateValue(Operation.UPDATE.toString(), finalSrcFileStatus));
}
}
} else {
// source does not exist, then we need to delete if operation contains delete.
if (operationSet.contains(Operation.DELETE)) {
// 2. source does not exist it is delete
context.write(new Text(relativePath),
generateValue(Operation.DELETE.toString(), dstFileStatus.get(0)));
}
}
} else {
// Destination does not exist. So we need to add the file if needed.
if (operationSet.contains(Operation.ADD)) {
// if no destination, then this is a new file.
SimpleFileStatus src = findSrcFileStatus(srcFileStatus);
context.write(new Text(relativePath),
generateValue(Operation.ADD.toString(), src));
}
}
}
}
}
// Mapper to rebalance files need to be copied.
public static class HdfsSyncMapper extends Mapper<LongWritable, Text, LongWritable, Text> {
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String[] fields = value.toString().split("\t");
long hashValue = Hashing.murmur3_128()
.hashLong(Long.valueOf(fields[3]).hashCode() * Long.valueOf(fields[4]).hashCode())
.asLong();
context.write(new LongWritable(hashValue), value);
}
}
public static class HdfsSyncReducer extends Reducer<LongWritable, Text, Text, Text> {
private String dstRoot;
private Path tmpDirPath;
private long copiedSize = 0;
enum CopyStatus {
COPIED,
SKIPPED
}
@Override
protected void setup(Context context) throws IOException, InterruptedException {
super.setup(context);
this.dstRoot = context.getConfiguration().get(DST_PATH_CONF);
this.tmpDirPath = new Path(context.getConfiguration().get(TMP_PATH_CONF));
}
@Override
protected void reduce(LongWritable key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
for (Text value : values) {
String[] fields = value.toString().split("\t");
Operation operation = Operation.valueOf(fields[1]);
// We only support add operation for now.
if (operation == Operation.ADD || operation == Operation.UPDATE) {
SimpleFileStatus fileStatus =
new SimpleFileStatus(fields[2], Long.valueOf(fields[3]), Long.valueOf(fields[4]));
Path dstFile = new Path(dstRoot, fields[0]);
FileSystem srcFs = (new Path(fileStatus.getFullPath()))
.getFileSystem(context.getConfiguration());
FileSystem dstFs = dstFile.getFileSystem(context.getConfiguration());
String copyError =
BatchUtils.doCopyFileAction(context.getConfiguration(), fileStatus,
srcFs, dstFile.getParent().toString(),
dstFs, tmpDirPath, context, fields[1].equals("update"),
context.getTaskAttemptID().toString());
if (copyError == null) {
context.write(new Text(fields[0]),
generateValue(CopyStatus.COPIED.toString(), fileStatus));
} else {
context.write(new Text(fields[0]),
generateValue(CopyStatus.SKIPPED.toString(), fileStatus));
}
}
}
}
@Override
protected void cleanup(Context context) throws IOException, InterruptedException {
LOG.info("Total bytes copied = " + copiedSize);
}
}
/**
* Print usage information to provided OutputStream.
*
* @param options Command-line options to be part of usage.
* @param out OutputStream to which to write the usage information.
*/
public static void printUsage(final Options options, final OutputStream out) {
final PrintWriter writer = new PrintWriter(out);
final HelpFormatter usageFormatter = new HelpFormatter();
usageFormatter.printUsage(writer, 80,
"Usage: hadoop jar <jar name> " + ReplicationJob.class.getName(), options);
writer.flush();
}
/**
* Construct and provide Options.
*
* @return Options expected from command-line of GNU form.
*/
@SuppressWarnings("static-access")
public static Options constructOptions() {
Options options = new Options();
options.addOption(OptionBuilder.withLongOpt(SOURCE_DIRECTORY_ARG)
.withDescription(
"Comma separated list of source directories")
.hasArg()
.withArgName("S")
.create());
options.addOption(OptionBuilder.withLongOpt(DESTINATION_DIRECTORY_ARG)
.withDescription("Copy destination directory")
.hasArg()
.withArgName("D")
.create());
options.addOption(OptionBuilder.withLongOpt(TEMP_DIRECTORY_ARG)
.withDescription("Copy temporary directory path")
.hasArg()
.withArgName("T")
.create());
options.addOption(OptionBuilder.withLongOpt(LOG_DIRECTORY_ARG)
.withDescription("Job logging output path")
.hasArg()
.withArgName("O")
.create());
options.addOption(OptionBuilder.withLongOpt(OPERATIONS_ARG)
.withDescription("checking options: comma separated option"
+ " including a(add), d(delete), u(update)")
.hasArg()
.withArgName("P")
.create());
options.addOption(OptionBuilder.withLongOpt(BLACKLIST_ARG)
.withDescription("Directory blacklist regex")
.hasArg()
.withArgName("B")
.create());
options.addOption(OptionBuilder.withLongOpt(DRY_RUN_ARG)
.withDescription("Dry run only")
.create());
return options;
}
/**
* Method to run HDFS copy job.
* 1. Parse program args.
* 2. Run two MR jobs in sequence.
*
* @param args program arguments
* @return 0 on success, 1 on failure
*
* @throws Exception IOException, InterruptedException, ClassNotFoundException
*/
public int run(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
final CommandLineParser cmdLineParser = new BasicParser();
final Options options = constructOptions();
CommandLine commandLine;
try {
commandLine = cmdLineParser.parse(options, args);
} catch (ParseException e) {
LOG.error("Encountered exception while parsing using GnuParser: ", e);
printUsage(options, System.out);
System.out.println();
ToolRunner.printGenericCommandUsage(System.out);
return 1;
}
if (!commandLine.hasOption(SOURCE_DIRECTORY_ARG)
|| !commandLine.hasOption(DESTINATION_DIRECTORY_ARG)) {
printUsage(options, System.out);
return 1;
}
if (!commandLine.hasOption(LOG_DIRECTORY_ARG)) {
printUsage(options, System.out);
return 1;
}
boolean dryRun = commandLine.hasOption(DRY_RUN_ARG);
Path srcDir = new Path(commandLine.getOptionValue(SOURCE_DIRECTORY_ARG));
Path destDir = new Path(commandLine.getOptionValue(DESTINATION_DIRECTORY_ARG));
String operationsStr = commandLine.getOptionValue(OPERATIONS_ARG);
String tmpDirStr = commandLine.getOptionValue(TEMP_DIRECTORY_ARG);
String blacklistRegex = commandLine.getOptionValue(BLACKLIST_ARG);
if (blacklistRegex != null) {
getConf().set(DIRECTORY_BLACKLIST_REGEX, blacklistRegex);
LOG.info("Blacklist: " + blacklistRegex);
}
if (!dryRun && tmpDirStr == null) {
LOG.error("Temporary directory must be specified");
return -1;
}
// Disable speculative execution since neither mapper nor reducer handles this properly.
if (this.getConf().getBoolean(MRJobConfig.MAP_SPECULATIVE, true)) {
LOG.warn("Turning off speculative mappers in configuration");
getConf().set(MRJobConfig.MAP_SPECULATIVE, "false");
}
if (this.getConf().getBoolean(MRJobConfig.REDUCE_SPECULATIVE, true)) {
LOG.warn("Turning off speculative reducers in configuration");
getConf().set(MRJobConfig.REDUCE_SPECULATIVE, "false");
}
Path logPath = new Path(commandLine.getOptionValue(LOG_DIRECTORY_ARG));
// If the log directory exists and it is not empty, throw an error
FileSystem fs = logPath.getFileSystem(getConf());
if (!fs.exists(logPath)) {
LOG.info("Creating " + logPath);
} else if (FsUtils.getSize(getConf(), logPath, Optional.empty()) != 0) {
LOG.error("Log directory already exists and is not empty: " + logPath);
return -1;
}
Path stage1LogDir = new Path(logPath, "stage1");
Path stage2LogDir = new Path(logPath, "stage2");
if (dryRun) {
LOG.info("Starting stage 1 with log directory " + stage1LogDir);
return runDirectoryComparisonJob(srcDir,
destDir,
stage1LogDir,
operationsStr);
} else {
Path tmpDir = new Path(tmpDirStr);
// Verify that destination directory exists
if (!FsUtils.dirExists(getConf(), destDir)) {
LOG.warn("Destination directory does not exist. Creating " + destDir);
FileSystem destFs = destDir.getFileSystem(getConf());
fs.mkdirs(destDir);
}
LOG.info("Starting stage 1 with log directory " + stage1LogDir);
if (runDirectoryComparisonJob(srcDir,
destDir,
stage1LogDir,
operationsStr) == 0) {
LOG.info("Starting stage 2 with log directory " + stage2LogDir);
return runSyncJob(srcDir,
destDir,
tmpDir,
stage1LogDir,
stage2LogDir);
} else {
return -1;
}
}
}
private int runDirectoryComparisonJob(Path source, Path destination, Path output,
String compareOption)
throws IOException, InterruptedException, ClassNotFoundException {
Job job = new Job(getConf(), "Directory Comparison Job");
job.setJarByClass(getClass());
job.setInputFormatClass(DirScanInputFormat.class);
job.setMapperClass(ListFileMapper.class);
job.setReducerClass(DirectoryCompareReducer.class);
// last directory is destination, all other directories are source directories
job.getConfiguration().set(SRC_PATH_CONF, source.toString());
job.getConfiguration().set(DST_PATH_CONF, destination.toString());
job.getConfiguration().set(FileInputFormat.INPUT_DIR, Joiner.on(",").join(source, destination));
job.getConfiguration().set(COMPARE_OPTION_CONF, compareOption);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FileStatus.class);
FileOutputFormat.setOutputPath(job, output);
FileOutputFormat.setOutputCompressorClass(job, GzipCodec.class);
boolean success = job.waitForCompletion(true);
return success ? 0 : 1;
}
private int runSyncJob(Path source, Path destination, Path tmpDir, Path input,
Path output)
throws IOException, InterruptedException, ClassNotFoundException {
Job job = new Job(getConf(), "HDFS Sync job");
job.setJarByClass(getClass());
job.setInputFormatClass(TextInputFormat.class);
job.setMapperClass(HdfsSyncMapper.class);
job.setReducerClass(HdfsSyncReducer.class);
job.setOutputKeyClass(LongWritable.class);
job.setOutputValueClass(Text.class);
job.getConfiguration().set(SRC_PATH_CONF, source.toString());
job.getConfiguration().set(DST_PATH_CONF, destination.toString());
job.getConfiguration().set(TMP_PATH_CONF, tmpDir.toString());
FileInputFormat.setInputPaths(job, input);
FileInputFormat.setInputDirRecursive(job, true);
FileInputFormat.setMaxInputSplitSize(job,
this.getConf().getLong( FileInputFormat.SPLIT_MAXSIZE, 60000L));
FileOutputFormat.setOutputPath(job, new Path(output.toString()));
FileOutputFormat.setOutputCompressorClass(job, GzipCodec.class);
boolean success = job.waitForCompletion(true);
return success ? 0 : 1;
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new ReplicationJob(), args);
System.exit(res);
}
}
| 9,534 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch/hdfs/DirRecordReader.java | package com.airbnb.reair.batch.hdfs;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import java.io.IOException;
import java.util.List;
/**
* Record Reader that returns paths to directories.
*/
public class DirRecordReader extends RecordReader<Text, Boolean> {
private List<InputSplit> inputSplits;
private int index = 0;
private DirInputSplit cur;
@Override
public void initialize(InputSplit inputSplit, TaskAttemptContext taskAttemptContext)
throws IOException, InterruptedException {
if (!(inputSplit instanceof ListDirInputSplit)) {
throw new IOException("Invalid split class passed in.");
}
this.inputSplits = ((ListDirInputSplit) inputSplit).getSplits();
this.index = 0;
this.cur = null;
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
if (index < inputSplits.size()) {
cur = (DirInputSplit) inputSplits.get(index++);
return true;
}
return false;
}
@Override
public Text getCurrentKey() throws IOException, InterruptedException {
if (cur == null) {
return null;
}
return new Text(cur.getFilePath());
}
@Override
public Boolean getCurrentValue() throws IOException, InterruptedException {
if (cur == null) {
return null;
}
return cur.isLeafLevel();
}
@Override
public float getProgress() throws IOException, InterruptedException {
return ((float) index + 1) / inputSplits.size();
}
@Override
public void close() throws IOException {}
}
| 9,535 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch/hdfs/DirScanInputFormat.java | package com.airbnb.reair.batch.hdfs;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.InvalidInputException;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import javax.annotation.Nullable;
/**
* InputFormat that scans directories breadth-first. It will stop at a level when it gets enough
* splits. The InputSplit it returns will keep track if a directory needs further traversal. If it
* does, a further recursive scan will be done in RecorderReader. The InputFormat will return the
* file path as the key and the file size information as the value.
*/
public class DirScanInputFormat extends FileInputFormat<Text, Boolean> {
private static final Log LOG = LogFactory.getLog(DirScanInputFormat.class);
private static final PathFilter hiddenFileFilter = new PathFilter() {
public boolean accept(Path path) {
String name = path.getName();
return !name.startsWith("_") && !name.startsWith(".");
}
};
private static final int NUMBER_OF_THREADS = 16;
private static final int NUMBER_OF_DIRECTORIES_PER_MAPPER = 10;
public static final String NO_HIDDEN_FILE_FILTER = "replication.inputformat.nohiddenfilefilter";
public static final String DIRECTORY_TRAVERSE_MAX_LEVEL =
"replication.inputformat.max.traverse.level";
@Override
public RecordReader<Text, Boolean> createRecordReader(InputSplit inputSplit,
TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
return new DirRecordReader();
}
private List<FileStatus> getInitialSplits(JobContext job) throws IOException {
String directoryBlackList = job.getConfiguration()
.get(ReplicationJob.DIRECTORY_BLACKLIST_REGEX);
boolean nofilter = job.getConfiguration().getBoolean(NO_HIDDEN_FILE_FILTER, false);
ArrayList result = new ArrayList();
Path[] dirs = getInputPaths(job);
if (dirs.length == 0) {
throw new IOException("No input paths specified in job");
} else {
ArrayList errors = new ArrayList();
for (int i = 0; i < dirs.length; ++i) {
Path path = dirs[i];
Configuration conf = job.getConfiguration();
FileSystem fs = path.getFileSystem(conf);
FileStatus[] matches = nofilter ? fs.globStatus(path)
: fs.globStatus(path, hiddenFileFilter);
if (matches == null) {
errors.add(new IOException("Input path does not exist: " + path));
} else if (matches.length == 0) {
errors.add(new IOException("Input Pattern " + path + " matches 0 files"));
} else {
for (FileStatus globStat : matches) {
if (globStat.isDirectory()) {
if (directoryBlackList == null
|| !globStat.getPath().getName().matches(directoryBlackList)) {
result.add(globStat);
}
}
}
}
}
if (!errors.isEmpty()) {
throw new InvalidInputException(errors);
} else {
LOG.info("Total input directory to process : " + result.size());
return result;
}
}
}
@Override
public List<InputSplit> getSplits(JobContext context) throws IOException {
// split into pieces, fetching the splits in parallel
ExecutorService executor = Executors.newCachedThreadPool();
List<InputSplit> splits = new ArrayList<>();
List<FileStatus> dirToProcess = getInitialSplits(context);
int level = 0;
final int numberOfMappers = context.getConfiguration().getInt("mapreduce.job.maps", 500);
final int max_level = context.getConfiguration().getInt(DIRECTORY_TRAVERSE_MAX_LEVEL, 3);
try {
splits.addAll(Lists.transform(dirToProcess, new Function<FileStatus, DirInputSplit>() {
@Nullable
@Override
public DirInputSplit apply(FileStatus status) {
return new DirInputSplit(status.getPath().toString(), false);
}
}));
boolean finished = false;
while (!finished) {
List<Future<List<FileStatus>>> splitfutures = new ArrayList<Future<List<FileStatus>>>();
final int directoriesPerThread = Math.max(dirToProcess.size() / NUMBER_OF_THREADS, 1);
for (List<FileStatus> range : Lists.partition(dirToProcess, directoriesPerThread)) {
// for each range, pick a live owner and ask it to compute bite-sized splits
splitfutures
.add(executor.submit(new SplitCallable(range, context.getConfiguration(), level)));
}
dirToProcess = new ArrayList<>();
// wait until we have all the results back
for (Future<List<FileStatus>> futureInputSplits : splitfutures) {
try {
dirToProcess.addAll(futureInputSplits.get());
} catch (Exception e) {
throw new IOException("Could not get input splits", e);
}
}
// at least explore max_level or if we can generate numberOfMappers with
// NUMBER_OF_DIRECTORIES_PER_MAPPER directories for each mapper.
if (level >= max_level && (dirToProcess.size() == 0
|| (splits.size() + dirToProcess.size())
> NUMBER_OF_DIRECTORIES_PER_MAPPER * numberOfMappers)) {
finished = true;
}
final boolean leaf = finished;
splits.addAll(Lists.transform(dirToProcess, new Function<FileStatus, DirInputSplit>() {
@Nullable
@Override
public DirInputSplit apply(FileStatus status) {
return new DirInputSplit(status.getPath().toString(), leaf);
}
}));
LOG.info(String.format("Running: directory to process size is %d, split size is %d, ",
dirToProcess.size(), splits.size()));
level++;
}
} finally {
executor.shutdownNow();
}
assert splits.size() > 0;
Collections.shuffle(splits, new Random(System.nanoTime()));
final int directoriesPerSplit = Math.max(splits.size() / numberOfMappers, 1);
return Lists.transform(Lists.partition(splits, directoriesPerSplit),
new Function<List<InputSplit>, InputSplit>() {
@Override
public InputSplit apply(@Nullable List<InputSplit> inputSplits) {
return new ListDirInputSplit(inputSplits);
}
});
}
/**
* Get list of directories. Find next level of directories and return.
*/
class SplitCallable implements Callable<List<FileStatus>> {
private final Configuration conf;
private final List<FileStatus> candidates;
private final int level;
private final String directoryBlackList;
private final boolean nofilter;
public SplitCallable(List<FileStatus> candidates, Configuration conf, int level) {
this.candidates = candidates;
this.conf = conf;
this.level = level;
this.directoryBlackList = conf.get(ReplicationJob.DIRECTORY_BLACKLIST_REGEX);
this.nofilter = conf.getBoolean(NO_HIDDEN_FILE_FILTER, false);
}
public List<FileStatus> call() throws Exception {
ArrayList<FileStatus> nextLevel = new ArrayList<FileStatus>();
for (FileStatus f : candidates) {
if (!f.isDirectory()) {
LOG.error(f.getPath() + " is not a directory");
continue;
}
FileSystem fs = f.getPath().getFileSystem(conf);
try {
for (FileStatus child : nofilter ? fs.listStatus(f.getPath())
: fs.listStatus(f.getPath(), hiddenFileFilter)) {
if (child.isDirectory()) {
if (directoryBlackList == null
|| !child.getPath().toUri().getPath().matches(directoryBlackList)) {
nextLevel.add(child);
}
}
}
} catch (FileNotFoundException e) {
LOG.error(f.getPath() + " removed during operation. Skip...");
}
}
LOG.info("Thread " + Thread.currentThread().getId() + ", level " + level + ":processed "
+ candidates.size() + " directories");
return nextLevel;
}
}
}
| 9,536 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch/hdfs/DirInputSplit.java | package com.airbnb.reair.batch.hdfs;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.InputSplit;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
/**
* Splits containing a path to a directory.
*/
public class DirInputSplit extends InputSplit implements Writable {
private String filePath;
private boolean leafLevel;
public String getFilePath() {
return filePath;
}
public boolean isLeafLevel() {
return leafLevel;
}
@Override
public long getLength() throws IOException, InterruptedException {
return 0;
}
@Override
public String toString() {
return filePath + ":" + leafLevel;
}
@Override
public void write(DataOutput dataOutput) throws IOException {
Text.writeString(dataOutput, this.filePath);
dataOutput.writeBoolean(leafLevel);
}
@Override
public void readFields(DataInput dataInput) throws IOException {
this.filePath = Text.readString(dataInput);
this.leafLevel = dataInput.readBoolean();
}
@Override
public String[] getLocations() throws IOException, InterruptedException {
return new String[0];
}
public DirInputSplit() {}
public DirInputSplit(String filePath, boolean leaf) {
this.filePath = filePath;
this.leafLevel = leaf;
}
}
| 9,537 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch/hdfs/ListDirInputSplit.java | package com.airbnb.reair.batch.hdfs;
import com.google.common.base.Joiner;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.InputSplit;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/**
* Splits containing paths to directories.
*/
public class ListDirInputSplit extends InputSplit implements Writable {
private List<InputSplit> splits;
public ListDirInputSplit(List<InputSplit> splits) {
this.splits = splits;
}
public ListDirInputSplit() {}
public List<InputSplit> getSplits() {
return splits;
}
@Override
public long getLength() throws IOException, InterruptedException {
return 0;
}
@Override
public String[] getLocations() throws IOException, InterruptedException {
return new String[0];
}
@Override
public void write(DataOutput dataOutput) throws IOException {
dataOutput.writeInt(splits.size());
for (InputSplit s : splits) {
((DirInputSplit) s).write(dataOutput);
}
}
@Override
public void readFields(DataInput dataInput) throws IOException {
int size = dataInput.readInt();
this.splits = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
DirInputSplit split = new DirInputSplit();
split.readFields(dataInput);
this.splits.add(split);
}
}
@Override
public String toString() {
return "[" + Joiner.on(",").join(splits) + "]";
}
}
| 9,538 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch/hive/TableCompareWorker.java | package com.airbnb.reair.batch.hive;
import com.google.common.base.Function;
import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.airbnb.reair.common.HiveMetastoreClient;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.incremental.DirectoryCopier;
import com.airbnb.reair.incremental.configuration.Cluster;
import com.airbnb.reair.incremental.configuration.ClusterFactory;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.incremental.configuration.DestinationObjectFactory;
import com.airbnb.reair.incremental.configuration.ObjectConflictHandler;
import com.airbnb.reair.incremental.deploy.ConfigurationKeys;
import com.airbnb.reair.incremental.primitives.CopyPartitionedTableTask;
import com.airbnb.reair.incremental.primitives.TaskEstimate;
import com.airbnb.reair.incremental.primitives.TaskEstimator;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Optional;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.annotation.Nullable;
/**
* Worker to figure out the action for a table entity.
*
* <p>For partitioned table, the worker will generate a CHECK_PARTITION action for each partition.
* In PartitionCompareReducer, a more specific action will be determined. The reason for having
* separate table and partition checks is for load balancing. In a production data warehouse,
* tables can have millions of partitions. Since each check to metastore takes a hundred
* milliseconds, it is important to distribute metastore calls to many reducers through a shuffle.
*/
public class TableCompareWorker {
private static class BlackListPair {
private final Pattern dbNamePattern;
private final Pattern tblNamePattern;
public BlackListPair(String dbNamePattern, String tblNamePattern) {
this.dbNamePattern = Pattern.compile(dbNamePattern);
this.tblNamePattern = Pattern.compile(tblNamePattern);
}
boolean matches(String dbName, String tableName) {
Matcher dbMatcher = this.dbNamePattern.matcher(dbName);
Matcher tblmatcher = this.tblNamePattern.matcher(tableName);
return dbMatcher.matches() && tblmatcher.matches();
}
}
private static final DestinationObjectFactory DESTINATION_OBJECT_FACTORY =
new DestinationObjectFactory();
private Configuration conf;
private HiveMetastoreClient srcClient;
private HiveMetastoreClient dstClient;
private Cluster srcCluster;
private Cluster dstCluster;
// list of db and table blacklist.
private List<BlackListPair> blackList;
private DirectoryCopier directoryCopier;
private TaskEstimator estimator;
private ObjectConflictHandler objectConflictHandler = new ObjectConflictHandler();
protected void setup(Mapper.Context context)
throws IOException, InterruptedException, ConfigurationException {
try {
this.conf = context.getConfiguration();
ClusterFactory clusterFactory = MetastoreReplUtils.createClusterFactory(conf);
this.srcCluster = clusterFactory.getSrcCluster();
this.srcClient = this.srcCluster.getMetastoreClient();
this.dstCluster = clusterFactory.getDestCluster();
this.dstClient = this.dstCluster.getMetastoreClient();
this.directoryCopier = clusterFactory.getDirectoryCopier();
if (context.getConfiguration()
.get(ConfigurationKeys.BATCH_JOB_METASTORE_BLACKLIST) == null) {
this.blackList = Collections.<BlackListPair>emptyList();
} else {
this.blackList = Lists.transform(Arrays.asList(context.getConfiguration()
.get(ConfigurationKeys.BATCH_JOB_METASTORE_BLACKLIST).split(",")),
new Function<String, BlackListPair>() {
@Override
public BlackListPair apply(@Nullable String str) {
String[] parts = str.split(":");
return new BlackListPair(parts[0], parts[1]);
}
});
}
this.estimator = new TaskEstimator(conf,
DESTINATION_OBJECT_FACTORY,
srcCluster,
dstCluster,
directoryCopier);
} catch (HiveMetastoreException e) {
throw new IOException(e);
}
}
protected List<String> processTable(final String db, final String table)
throws IOException, HiveMetastoreException {
// If table and db matches black list, we will skip it.
if (Iterables.any(blackList,
new Predicate<BlackListPair>() {
@Override
public boolean apply(@Nullable BlackListPair blackListPair) {
return blackListPair.matches(db, table);
}
})) {
return Collections.emptyList();
}
HiveObjectSpec spec = new HiveObjectSpec(db, table);
// Table exists in source, but not in dest. It should copy the table.
TaskEstimate estimate = estimator.analyze(spec);
ArrayList<String> ret = new ArrayList<>();
ret.add(MetastoreReplicationJob.serializeJobResult(estimate, spec));
Table tab = srcClient.getTable(db, table);
if (tab != null && tab.getPartitionKeys().size() > 0) {
// For partitioned table, if action is COPY we need to make sure to handle partition key
// change case first. The copy task will be run twice once here and the other time at commit
// phase. The task will handle the case properly.
if (estimate.getTaskType() == TaskEstimate.TaskType.COPY_PARTITIONED_TABLE) {
CopyPartitionedTableTask copyPartitionedTableTaskJob = new CopyPartitionedTableTask(
conf,
DESTINATION_OBJECT_FACTORY,
objectConflictHandler,
srcCluster,
dstCluster,
spec,
Optional.<Path>empty());
copyPartitionedTableTaskJob.runTask();
}
// partition tables need to generate partitions.
HashSet<String> partNames = Sets.newHashSet(srcClient.getPartitionNames(db, table));
HashSet<String> dstPartNames = Sets.newHashSet(dstClient.getPartitionNames(db, table));
ret.addAll(Lists.transform(Lists.newArrayList(Sets.union(partNames, dstPartNames)),
new Function<String, String>() {
public String apply(String str) {
return MetastoreReplicationJob.serializeJobResult(
new TaskEstimate(TaskEstimate.TaskType.CHECK_PARTITION,
false,
false,
Optional.empty(),
Optional.empty()),
new HiveObjectSpec(db, table, str));
}
}));
}
return ret;
}
protected void cleanup() throws IOException, InterruptedException {
this.srcClient.close();
this.dstClient.close();
}
}
| 9,539 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch/hive/MetastoreReplicationJob.java | package com.airbnb.reair.batch.hive;
import com.google.common.collect.ImmutableList;
import com.airbnb.reair.batch.template.TemplateRenderException;
import com.airbnb.reair.batch.template.VelocityUtils;
import com.airbnb.reair.common.FsUtils;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.incremental.deploy.ConfigurationKeys;
import com.airbnb.reair.incremental.primitives.TaskEstimate;
import org.apache.commons.cli.BasicParser;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.velocity.VelocityContext;
import java.io.IOException;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.util.Arrays;
import java.util.Calendar;
import java.util.List;
import java.util.Optional;
import java.util.TimeZone;
import java.util.UUID;
/**
* MetastoreReplicationJobs runs 3 jobs to replicate the Hive metadata and HDFS data.
*
* <p>1. runMetastoreCompareJob(tableListFileOnHdfs, step1Out).
*
* <p>1.1 job.setInputFormatClass(MetastoreScanInputFormat.class)
* - Scan source metastore for all tables in all databases (or given a whitelist). Each map input
* split will contain a list of "database:table"s.
*
* <p>1.2 job.setMapperClass(Stage1ProcessTableMapper.class)
* - For each table, generate a task (called TaskEstimate) for it.
* - For non-partitioned table, an equal check on HDFS file is performed to decide whether data of
* the table needs to be copied.
* - For each partitioned table, first check and create the table in destination cluster
* (COPY_PARTITIONED_TABLE), and then generate a list of tasks, one for each partition from union
* of src and destination.
*
* <p>1.3 job.setReducerClass(Stage1PartitionCompareReducer.class);
* - Pass through all other tasks, except the CHECK_PARTITION tasks, which are re-analyzed to be
* COPY_PARTITION, DROP_PARTITION, NO_OP, etc, using an equal check on the HDFS file.
*
* <p>2. runHdfsCopyJob(new Path(step1Out, "part*"), step2Out) (note when running end-to-end,
* "part*" is not specified).
*
* <p>2.1 job.setInputFormatClass(TextInputFormat.class).
* - Input of this job is the output of stage 1. It contains the actions to take for the tables and
* partitions. In this stage, we only care about the COPY actions.
*
* <p>2.2 job.setMapperClass(Stage2DirectoryCopyMapper.class).
* - In the mapper, it will enumerate the directories and figure out files needs to be copied. It
* also cleans up destination directory, which means even idential HDFS files will be recopied.
* Since each directory can have an uneven number of files, we shuffle again to distribute the
* work for copying files, which is done on the reducers.
*
* <p>2.3 job.setReducerClass(Stage2DirectoryCopyReducer.class).
* - The actual copy of the files are done here. Although the BatchUtils.doCopyFileAction tries to
* avoid copying when the destination file exists with the same timestamp and size, in reality, the
* destination file is already deleted in the mapper. :(
*
* <p>3. runCommitChangeJob(new Path(step1Out, "part*"), step3Out) (note when running end-to-end,
* "part*" is not specified).
*
* <p>3.1 job.setInputFormatClass(TextInputFormat.class).
*
* <p>3.2 job.setMapperClass(Stage3CommitChangeMapper.class).
* - Takes action like COPY_PARTITION, COPY_PARTITIONED_TABLE, COPY_UNPARTITIONED_TABLE,
* DROP_PARTITION, DROP_TABLE.
*/
public class MetastoreReplicationJob extends Configured implements Tool {
private static final Log LOG = LogFactory.getLog(MetastoreReplicationJob.class);
public static final String USAGE_COMMAND_STR = "Usage: hadoop jar <jar name> "
+ MetastoreReplicationJob.class.getName();
// Context for rendering templates using velocity
private VelocityContext velocityContext = new VelocityContext();
// After each job completes, we'll output Hive commands to the screen that can be used to view
// debug data. These are the templates for those commands.
private static final String STEP1_HQL_TEMPLATE = "step1_log.hql.vm";
private static final String STEP2_HQL_TEMPLATE = "step2_log.hql.vm";
private static final String STEP3_HQL_TEMPLATE = "step3_log.hql.vm";
/**
* Serialize TaskEstimate and HiveObjectSpec into a String. The String is passed between MR jobs.
*
* @param estimate TaskEstimate object
* @param spec HiveObjectSpec
* @return serialized output for estimate and spec object
*/
public static String serializeJobResult(TaskEstimate estimate, HiveObjectSpec spec) {
return ReplicationUtils.genValue(estimate.getTaskType().name(),
String.valueOf(estimate.isUpdateMetadata()),
String.valueOf(estimate.isUpdateData()),
!estimate.getSrcPath().isPresent() ? null : estimate.getSrcPath().get().toString(),
!estimate.getDestPath().isPresent() ? null : estimate.getDestPath().get().toString(),
spec.getDbName(),
spec.getTableName(),
spec.getPartitionName());
}
/**
* Deserialize TaskEstimate and HiveObjectSpec from a String.
*
* @param result serialized string
* @return Pair of TaskEstimate and HiveObjectSpec
*/
public static Pair<TaskEstimate, HiveObjectSpec> deseralizeJobResult(String result) {
String [] fields = result.split("\t");
TaskEstimate estimate = new TaskEstimate(TaskEstimate.TaskType.valueOf(fields[0]),
Boolean.valueOf(fields[1]),
Boolean.valueOf(fields[2]),
fields[3].equals("NULL") ? Optional.empty() : Optional.of(new Path(fields[3])),
fields[4].equals("NULL") ? Optional.empty() : Optional.of(new Path(fields[4])));
HiveObjectSpec spec = null;
if (fields[7].equals("NULL")) {
spec = new HiveObjectSpec(fields[5], fields[6]);
} else {
spec = new HiveObjectSpec(fields[5], fields[6], fields[7]);
}
return Pair.of(estimate, spec);
}
/**
/**
* Print usage information to provided OutputStream.
*
* @param applicationName Name of application to list in usage.
* @param options Command-line options to be part of usage.
* @param out OutputStream to which to write the usage information.
*/
public static void printUsage(String applicationName, Options options, OutputStream out) {
PrintWriter writer = new PrintWriter(out);
HelpFormatter usageFormatter = new HelpFormatter();
usageFormatter.printUsage(writer, 80, applicationName, options);
writer.flush();
}
/**
* Run batch replication of the Hive metastore.
* 1. Parse input arguments.
* 2. Run three MR jobs in sequence.
*
* @param args command arguments
* @return 1 failed
* 0 succeeded.
*
* @throws Exception InterruptedException,
* IOException,
* ClassNotFoundException,
* TemplateRenderException
*/
@SuppressWarnings("static-access")
public int run(String[] args) throws Exception {
Options options = new Options();
options.addOption(OptionBuilder.withLongOpt("config-files")
.withDescription(
"Comma separated list of paths to configuration files")
.hasArg()
.withArgName("PATH")
.create());
options.addOption(OptionBuilder.withLongOpt("step")
.withDescription("Run a specific step")
.hasArg()
.withArgName("ST")
.create());
options.addOption(OptionBuilder.withLongOpt("override-input")
.withDescription("Input override for step")
.hasArg()
.withArgName("OI")
.create());
options.addOption(OptionBuilder.withLongOpt("table-list")
.withDescription("File containing a list of tables to copy")
.hasArg()
.withArgName("PATH")
.create());
CommandLineParser parser = new BasicParser();
CommandLine cl = null;
try {
cl = parser.parse(options, args);
} catch (ParseException e) {
System.err.println("Encountered exception while parsing using GnuParser:\n" + e.getMessage());
printUsage(USAGE_COMMAND_STR, options, System.out);
System.out.println();
ToolRunner.printGenericCommandUsage(System.err);
return 1;
}
String configPaths = null;
if (cl.hasOption("config-files")) {
configPaths = cl.getOptionValue("config-files");
LOG.info("configPaths=" + configPaths);
// load configure and merge with job conf
Configuration conf = new Configuration();
if (configPaths != null) {
for (String configPath : configPaths.split(",")) {
conf.addResource(new Path(configPath));
}
}
mergeConfiguration(conf, this.getConf());
} else {
LOG.warn("Configuration files not specified. Running unit test?");
}
if (this.getConf().getBoolean(MRJobConfig.MAP_SPECULATIVE, true)) {
throw new ConfigurationException(String.format("Speculative execution must be disabled "
+ "for mappers! Please set %s appropriately.", MRJobConfig.MAP_SPECULATIVE));
}
if (this.getConf().getBoolean(MRJobConfig.REDUCE_SPECULATIVE, true)) {
throw new ConfigurationException(String.format("Speculative execution must be disabled "
+ "for reducers! Please set %s appropriately.", MRJobConfig.REDUCE_SPECULATIVE));
}
Optional<Path> localTableListFile = Optional.empty();
if (cl.hasOption("table-list")) {
localTableListFile = Optional.of(new Path(cl.getOptionValue("table-list")));
}
int step = -1;
if (cl.hasOption("step")) {
step = Integer.valueOf(cl.getOptionValue("step"));
}
String finalOutput = this.getConf().get(ConfigurationKeys.BATCH_JOB_OUTPUT_DIR);
if (finalOutput == null) {
System.err.println(
ConfigurationKeys.BATCH_JOB_OUTPUT_DIR + " is required in configuration file.");
return 1;
}
Path outputParent = new Path(finalOutput);
Path step1Out = new Path(outputParent, "step1output");
Path step2Out = new Path(outputParent, "step2output");
Path step3Out = new Path(outputParent, "step3output");
Calendar calendar = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
String jobStartTime = String.format("%tY-%<tm-%<tdT%<tH_%<tM_%<tS",
calendar);
velocityContext.put("job_start_time", jobStartTime);
velocityContext.put("step1_output_directory", step1Out.toString());
velocityContext.put("step2_output_directory", step2Out.toString());
velocityContext.put("step3_output_directory", step3Out.toString());
Optional<Path> tableListFileOnHdfs = Optional.empty();
if (localTableListFile.isPresent()) {
// Create a temporary directory on HDFS and copy our table list to that directory so that it
// can be read by mappers in the HDFS job.
Path tableFilePath = localTableListFile.get();
Path tmpDir = createTempDirectory(getConf());
tableListFileOnHdfs = Optional.of(new Path(tmpDir, tableFilePath.getName()));
LOG.info(String.format("Copying %s to temporary directory %s",
tableFilePath,
tableListFileOnHdfs.get()));
copyFile(localTableListFile.get(), tableListFileOnHdfs.get());
LOG.info(String.format("Copied %s to temporary directory %s",
tableFilePath,
tableListFileOnHdfs.get()));
} else {
LOG.info("List of tables to copy is not specified. Copying all tables instead.");
}
if (step == -1) {
LOG.info("Deleting " + step1Out);
FsUtils.deleteDirectory(getConf(), step1Out);
LOG.info("Deleting " + step2Out);
FsUtils.deleteDirectory(getConf(), step2Out);
LOG.info("Deleting " + step3Out);
FsUtils.deleteDirectory(getConf(), step3Out);
if (runMetastoreCompareJob(tableListFileOnHdfs, step1Out) != 0) {
return -1;
}
if (runHdfsCopyJob(step1Out, step2Out) != 0) {
return -1;
}
if (runCommitChangeJob(step1Out, step3Out) != 0) {
return -1;
}
return 0;
} else {
switch (step) {
case 1:
LOG.info("Deleting " + step1Out);
FsUtils.deleteDirectory(this.getConf(), step1Out);
return this.runMetastoreCompareJob(tableListFileOnHdfs, step1Out);
case 2:
LOG.info("Deleting " + step2Out);
FsUtils.deleteDirectory(getConf(), step2Out);
if (cl.hasOption("override-input")) {
step1Out = new Path(cl.getOptionValue("override-input"));
}
return this.runHdfsCopyJob(new Path(step1Out, "part*"), step2Out);
case 3:
LOG.info("Deleting " + step3Out);
FsUtils.deleteDirectory(this.getConf(), step3Out);
if (cl.hasOption("override-input")) {
step1Out = new Path(cl.getOptionValue("override-input"));
}
return this.runCommitChangeJob(new Path(step1Out, "part*"), step3Out);
default:
LOG.error("Invalid step specified: " + step);
return 1;
}
}
}
private void mergeConfiguration(Configuration inputConfig, Configuration merged) {
List<String> mergeKeys = ImmutableList.of(ConfigurationKeys.SRC_CLUSTER_NAME,
ConfigurationKeys.SRC_CLUSTER_METASTORE_URL,
ConfigurationKeys.SRC_HDFS_ROOT,
ConfigurationKeys.SRC_HDFS_TMP,
ConfigurationKeys.DEST_CLUSTER_NAME,
ConfigurationKeys.DEST_CLUSTER_METASTORE_URL,
ConfigurationKeys.DEST_HDFS_ROOT,
ConfigurationKeys.DEST_HDFS_TMP,
ConfigurationKeys.BATCH_JOB_METASTORE_BLACKLIST,
ConfigurationKeys.BATCH_JOB_CLUSTER_FACTORY_CLASS,
ConfigurationKeys.BATCH_JOB_OUTPUT_DIR,
ConfigurationKeys.BATCH_JOB_INPUT_LIST,
ConfigurationKeys.BATCH_JOB_METASTORE_PARALLELISM,
ConfigurationKeys.BATCH_JOB_COPY_PARALLELISM,
ConfigurationKeys.SYNC_MODIFIED_TIMES_FOR_FILE_COPY,
ConfigurationKeys.BATCH_JOB_VERIFY_COPY_CHECKSUM,
ConfigurationKeys.BATCH_JOB_OVERWRITE_NEWER,
MRJobConfig.MAP_SPECULATIVE,
MRJobConfig.REDUCE_SPECULATIVE
);
for (String key : mergeKeys) {
String value = inputConfig.get(key);
if (value != null) {
merged.set(key, value);
}
}
}
private int runMetastoreCompareJob(Path output)
throws IOException, InterruptedException, ClassNotFoundException {
Job job = Job.getInstance(this.getConf(), "Stage1: Metastore Compare Job");
job.setJarByClass(this.getClass());
job.setInputFormatClass(MetastoreScanInputFormat.class);
job.setMapperClass(Stage1ProcessTableMapper.class);
job.setReducerClass(Stage1PartitionCompareReducer.class);
job.setOutputKeyClass(LongWritable.class);
job.setOutputValueClass(Text.class);
FileOutputFormat.setOutputPath(job, output);
FileOutputFormat.setOutputCompressorClass(job, GzipCodec.class);
boolean success = job.waitForCompletion(true);
return success ? 0 : 1;
}
/**
* Runs the job to scan the metastore for directory locations.
*
* @param inputTableListPath the path to the file containing the tables to copy
* @param outputPath the directory to store the logging output data
*/
private int runMetastoreCompareJob(Optional<Path> inputTableListPath, Path outputPath)
throws InterruptedException, IOException, ClassNotFoundException, TemplateRenderException {
LOG.info("Starting job for step 1...");
int result;
if (inputTableListPath.isPresent()) {
result = runMetastoreCompareJobWithTextInput(inputTableListPath.get(), outputPath);
} else {
result = runMetastoreCompareJob(outputPath);
}
if (result == 0) {
LOG.info("Job for step 1 finished successfully! To view logging data, run the following "
+ "commands in Hive: \n\n"
+ VelocityUtils.renderTemplate(STEP1_HQL_TEMPLATE, velocityContext));
}
return result;
}
private int runMetastoreCompareJobWithTextInput(Path input, Path output)
throws IOException, InterruptedException, ClassNotFoundException {
Job job = Job.getInstance(this.getConf(), "Stage1: Metastore Compare Job with Input List");
job.setJarByClass(this.getClass());
job.setInputFormatClass(TextInputFormat.class);
job.setMapperClass(Stage1ProcessTableMapperWithTextInput.class);
job.setReducerClass(Stage1PartitionCompareReducer.class);
FileInputFormat.setInputPaths(job, input);
FileInputFormat.setMaxInputSplitSize(job,
this.getConf().getLong(FileInputFormat.SPLIT_MAXSIZE, 60000L));
job.setOutputKeyClass(LongWritable.class);
job.setOutputValueClass(Text.class);
FileOutputFormat.setOutputPath(job, output);
FileOutputFormat.setOutputCompressorClass(job, GzipCodec.class);
job.setNumReduceTasks(getConf().getInt(
ConfigurationKeys.BATCH_JOB_METASTORE_PARALLELISM,
150));
boolean success = job.waitForCompletion(true);
return success ? 0 : 1;
}
private int runHdfsCopyJob(Path input, Path output)
throws IOException, InterruptedException, ClassNotFoundException, TemplateRenderException {
LOG.info("Starting job for step 2...");
Job job = Job.getInstance(this.getConf(), "Stage 2: HDFS Copy Job");
job.setJarByClass(this.getClass());
job.setInputFormatClass(TextInputFormat.class);
job.setMapperClass(Stage2DirectoryCopyMapper.class);
job.setReducerClass(Stage2DirectoryCopyReducer.class);
FileInputFormat.setInputPaths(job, input);
FileInputFormat.setInputDirRecursive(job, true);
FileInputFormat.setMaxInputSplitSize(job,
this.getConf().getLong(FileInputFormat.SPLIT_MAXSIZE, 60000L));
job.setOutputKeyClass(LongWritable.class);
job.setOutputValueClass(Text.class);
FileOutputFormat.setOutputPath(job, output);
FileOutputFormat.setOutputCompressorClass(job, GzipCodec.class);
job.setNumReduceTasks(getConf().getInt(
ConfigurationKeys.BATCH_JOB_COPY_PARALLELISM,
150));
boolean success = job.waitForCompletion(true);
if (success) {
LOG.info("Job for step 2 finished successfully! To view logging data, run the following "
+ "commands in Hive: \n\n"
+ VelocityUtils.renderTemplate(STEP2_HQL_TEMPLATE, velocityContext)
+ "\n");
}
return success ? 0 : 1;
}
private int runCommitChangeJob(Path input, Path output)
throws IOException, InterruptedException, ClassNotFoundException, TemplateRenderException {
LOG.info("Starting job for step 3...");
Job job = Job.getInstance(this.getConf(), "Stage3: Commit Change Job");
job.setJarByClass(this.getClass());
job.setInputFormatClass(TextInputFormat.class);
job.setMapperClass(Stage3CommitChangeMapper.class);
job.setNumReduceTasks(0);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
FileInputFormat.setInputPaths(job, input);
FileInputFormat.setInputDirRecursive(job, true);
FileInputFormat.setMaxInputSplitSize(job,
this.getConf().getLong(FileInputFormat.SPLIT_MAXSIZE, 60000L));
FileOutputFormat.setOutputPath(job, output);
FileOutputFormat.setOutputCompressorClass(job, GzipCodec.class);
job.setNumReduceTasks(getConf().getInt(
ConfigurationKeys.BATCH_JOB_METASTORE_PARALLELISM,
150));
boolean success = job.waitForCompletion(true);
if (success) {
LOG.info("Job for step 3 finished successfully! To view logging data, run the following "
+ "commands in Hive: \n\n"
+ VelocityUtils.renderTemplate(STEP3_HQL_TEMPLATE, velocityContext));
}
return success ? 0 : 1;
}
/**
* Main function - invoke ToolRunner.run().
*
* @param args program arguments
*
* @throws Exception InterruptedException,
* IOException,
* ClassNotFoundException,
* TemplateRenderException
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new MetastoreReplicationJob(), args);
System.exit(res);
}
public static class Stage1ProcessTableMapper extends Mapper<Text, Text, LongWritable, Text> {
private TableCompareWorker worker = new TableCompareWorker();
protected void setup(Context context) throws IOException, InterruptedException {
try {
worker.setup(context);
} catch (ConfigurationException e) {
throw new IOException("Invalid configuration", e);
}
}
protected void map(Text key, Text value, Context context)
throws IOException, InterruptedException {
try {
for (String result : worker.processTable(key.toString(), value.toString())) {
context.write(new LongWritable((long)result.hashCode()), new Text(result));
}
LOG.info(
String.format("database %s, table %s processed", key.toString(), value.toString()));
} catch (HiveMetastoreException e) {
throw new IOException(
String.format(
"database %s, table %s got exception", key.toString(), value.toString()), e);
}
}
protected void cleanup(Context context) throws IOException, InterruptedException {
worker.cleanup();
}
}
public static class Stage1ProcessTableMapperWithTextInput
extends Mapper<LongWritable, Text, LongWritable, Text> {
private TableCompareWorker worker = new TableCompareWorker();
protected void setup(Context context) throws IOException, InterruptedException {
try {
worker.setup(context);
} catch (ConfigurationException e) {
throw new IOException("Invalid configuration", e);
}
}
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
try {
String [] columns = value.toString().split("\\.");
if (columns.length != 2) {
LOG.error(String.format("invalid input at line %d: %s", key.get(), value.toString()));
return;
}
for (String result : worker.processTable(columns[0], columns[1])) {
context.write(new LongWritable((long)result.hashCode()), new Text(result));
}
LOG.info(
String.format("database %s, table %s processed", key.toString(), value.toString()));
} catch (HiveMetastoreException e) {
throw new IOException(
String.format(
"database %s, table %s got exception", key.toString(), value.toString()), e);
}
}
protected void cleanup(Context context) throws IOException, InterruptedException {
worker.cleanup();
}
}
/**
* Creates a new temporary directory under the temporary directory root.
*
* @param conf Configuration containing the directory for temporary files on HDFS
* @return A path to a new and unique directory under the temporary directory
* @throws IOException if there's an error creating the temporary directory
*/
private static Path createTempDirectory(Configuration conf) throws IOException {
Path tmpRoot = new Path(conf.get(ConfigurationKeys.DEST_HDFS_TMP));
String uuid = String.format("reair_%d_%s",
System.currentTimeMillis(),
UUID.randomUUID().toString());
Path tmpDir = new Path(tmpRoot, uuid);
FileSystem fs = tmpDir.getFileSystem(conf);
fs.mkdirs(tmpDir);
LOG.info(String.format("Registering %s to be deleted on exit", tmpDir));
fs.deleteOnExit(tmpDir);
return tmpDir;
}
/**
* Copies a files.
* @param srcFile File to copy from.
* @param destFile File to copy to. The file should not exist.
* @throws IOException if there is an error copying the file.
*/
private static void copyFile(Path srcFile, Path destFile) throws IOException {
String[] copyArgs = {"-cp", srcFile.toString(), destFile.toString()};
FsShell shell = new FsShell();
try {
LOG.debug("Using shell to copy with args " + Arrays.asList(copyArgs));
ToolRunner.run(shell, copyArgs);
} catch (Exception e) {
throw new IOException(e);
} finally {
shell.close();
}
}
}
| 9,540 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch/hive/HiveTablesInputSplit.java | package com.airbnb.reair.batch.hive;
import com.google.common.base.Joiner;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.InputSplit;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/**
* Split containing a Hive table name.
*/
public class HiveTablesInputSplit extends InputSplit implements Writable {
private List<String> tables;
public HiveTablesInputSplit(List<String> tables) {
this.tables = tables;
}
public HiveTablesInputSplit() {}
@Override
public long getLength() throws IOException, InterruptedException {
return 0;
}
@Override
public String toString() {
return Joiner.on(",").join(tables);
}
@Override
public void write(DataOutput dataOutput) throws IOException {
dataOutput.writeInt(tables.size());
for (String t : tables) {
Text.writeString(dataOutput, t);
}
}
@Override
public void readFields(DataInput dataInput) throws IOException {
int size = dataInput.readInt();
this.tables = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
this.tables.add(Text.readString(dataInput));
}
}
@Override
public String[] getLocations() throws IOException, InterruptedException {
return new String[0];
}
public List<String> getTables() {
return tables;
}
}
| 9,541 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch/hive/Stage3CommitChangeMapper.java | package com.airbnb.reair.batch.hive;
import com.airbnb.reair.common.DistCpException;
import com.airbnb.reair.common.HiveMetastoreClient;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.incremental.DirectoryCopier;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.RunInfo;
import com.airbnb.reair.incremental.configuration.Cluster;
import com.airbnb.reair.incremental.configuration.ClusterFactory;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.incremental.configuration.DestinationObjectFactory;
import com.airbnb.reair.incremental.configuration.ObjectConflictHandler;
import com.airbnb.reair.incremental.primitives.CopyPartitionTask;
import com.airbnb.reair.incremental.primitives.CopyPartitionedTableTask;
import com.airbnb.reair.incremental.primitives.CopyUnpartitionedTableTask;
import com.airbnb.reair.incremental.primitives.DropPartitionTask;
import com.airbnb.reair.incremental.primitives.DropTableTask;
import com.airbnb.reair.incremental.primitives.TaskEstimate;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
import java.util.Optional;
/**
* Stage 3 mapper to commit metadata changes.
*
* <p>Input of the Stage 3 job is Stage 1 job output, which is a list of actions to take for each
* table / partition.
*/
public class Stage3CommitChangeMapper extends Mapper<LongWritable, Text, Text, Text> {
private static final Log LOG = LogFactory.getLog(Stage3CommitChangeMapper.class);
private static final DestinationObjectFactory DESTINATION_OBJECT_FACTORY =
new DestinationObjectFactory();
private Configuration conf;
private HiveMetastoreClient srcClient;
private HiveMetastoreClient dstClient;
private Cluster srcCluster;
private Cluster dstCluster;
private DirectoryCopier directoryCopier;
private ObjectConflictHandler objectConflictHandler = new ObjectConflictHandler();
protected void setup(Context context) throws IOException, InterruptedException {
try {
this.conf = context.getConfiguration();
ClusterFactory clusterFactory = MetastoreReplUtils.createClusterFactory(conf);
this.srcCluster = clusterFactory.getSrcCluster();
this.srcClient = this.srcCluster.getMetastoreClient();
this.dstCluster = clusterFactory.getDestCluster();
this.dstClient = this.dstCluster.getMetastoreClient();
this.directoryCopier = clusterFactory.getDirectoryCopier();
} catch (HiveMetastoreException | ConfigurationException e) {
throw new IOException(e);
}
}
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
try {
Pair<TaskEstimate, HiveObjectSpec> input =
MetastoreReplicationJob.deseralizeJobResult(value.toString());
TaskEstimate estimate = input.getLeft();
HiveObjectSpec spec = input.getRight();
RunInfo status = null;
LOG.info(String.format("Working on %s with estimate %s", spec, estimate));
switch (estimate.getTaskType()) {
case COPY_PARTITION:
CopyPartitionTask copyPartitionTask = new CopyPartitionTask(
conf,
DESTINATION_OBJECT_FACTORY,
objectConflictHandler,
srcCluster,
dstCluster,
spec,
Optional.<Path>empty(),
Optional.<Path>empty(),
directoryCopier,
false);
status = copyPartitionTask.runTask();
context.write(value, new Text(status.getRunStatus().toString()));
break;
case COPY_PARTITIONED_TABLE:
CopyPartitionedTableTask copyPartitionedTableTaskJob = new CopyPartitionedTableTask(
conf,
DESTINATION_OBJECT_FACTORY,
objectConflictHandler,
srcCluster,
dstCluster,
spec,
Optional.<Path>empty());
status = copyPartitionedTableTaskJob.runTask();
context.write(value, new Text(status.getRunStatus().toString()));
break;
case COPY_UNPARTITIONED_TABLE:
CopyUnpartitionedTableTask copyUnpartitionedTableTask = new CopyUnpartitionedTableTask(
conf,
DESTINATION_OBJECT_FACTORY,
objectConflictHandler,
srcCluster,
dstCluster,
spec,
Optional.<Path>empty(),
directoryCopier,
false);
status = copyUnpartitionedTableTask.runTask();
context.write(value, new Text(status.getRunStatus().toString()));
break;
case DROP_PARTITION:
Partition dstPart = dstClient.getPartition(
spec.getDbName(),
spec.getTableName(),
spec.getPartitionName());
if (dstPart == null) {
context.write(value, new Text(RunInfo.RunStatus.SUCCESSFUL.toString()));
break;
}
DropPartitionTask dropPartitionTask = new DropPartitionTask(srcCluster,
dstCluster,
spec,
ReplicationUtils.getTldt(dstPart));
status = dropPartitionTask.runTask();
context.write(value, new Text(status.getRunStatus().toString()));
break;
case DROP_TABLE:
Table dstTable = dstClient.getTable(spec.getDbName(), spec.getTableName());
if (dstTable == null) {
context.write(value, new Text(RunInfo.RunStatus.SUCCESSFUL.toString()));
break;
}
DropTableTask dropTableTask = new DropTableTask(srcCluster,
dstCluster,
spec,
ReplicationUtils.getTldt(dstTable));
status = dropTableTask.runTask();
context.write(value, new Text(status.getRunStatus().toString()));
break;
default:
break;
}
} catch (HiveMetastoreException | DistCpException | ConfigurationException e) {
LOG.error(String.format("Got exception while processing %s", value.toString()), e);
context.write(value, new Text(RunInfo.RunStatus.FAILED.toString()));
}
}
protected void cleanup(Context context) throws IOException, InterruptedException {
this.srcClient.close();
this.dstClient.close();
}
}
| 9,542 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch/hive/MetastoreScanInputFormat.java | package com.airbnb.reair.batch.hive;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import com.airbnb.reair.common.HiveMetastoreClient;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.incremental.configuration.Cluster;
import com.airbnb.reair.incremental.configuration.ClusterFactory;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.incremental.deploy.ConfigurationKeys;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import javax.annotation.Nullable;
/**
* Input format that scans the metastore and generates splits containing tables.
*/
public class MetastoreScanInputFormat extends FileInputFormat<Text, Text> {
private static final Log LOG = LogFactory.getLog(MetastoreScanInputFormat.class);
private static final int NUMBER_OF_THREADS = 16;
@Override
public RecordReader<Text, Text> createRecordReader(
InputSplit inputSplit,
TaskAttemptContext taskAttemptContext)
throws IOException, InterruptedException {
return new TableRecordReader();
}
@Override
public List<InputSplit> getSplits(JobContext context) throws IOException {
// split into pieces, fetching the splits in parallel
ExecutorService executor = Executors.newCachedThreadPool();
Cluster srcCluster = null;
HiveMetastoreClient srcClient = null;
List<String> allTables = new ArrayList<>();
final int numberOfMappers = context.getConfiguration()
.getInt(ConfigurationKeys.BATCH_JOB_METASTORE_PARALLELISM, 150);
try {
ClusterFactory clusterFactory =
MetastoreReplUtils.createClusterFactory(context.getConfiguration());
srcCluster = clusterFactory.getSrcCluster();
srcClient = srcCluster.getMetastoreClient();
} catch (ConfigurationException | HiveMetastoreException e) {
throw new IOException("Invalid metastore host name.", e);
}
try {
List<String> databases = srcClient.getAllDatabases();
LOG.info("Total dbs: " + databases.size());
List<Future<List<String>>> splitfutures = new ArrayList<>();
final int dbPerThread = Math.max(databases.size() / NUMBER_OF_THREADS, 1);
for (List<String> range : Lists.partition(databases, dbPerThread)) {
// for each range, pick a live owner and ask it to compute bite-sized splits
splitfutures.add(executor.submit(
new SplitCallable(range, srcCluster)));
}
// wait until we have all the results back
for (Future<List<String>> futureInputSplits : splitfutures) {
try {
allTables.addAll(futureInputSplits.get());
} catch (Exception e) {
throw new IOException("Could not get input splits", e);
}
}
LOG.info(String.format("Total tables: %d", allTables.size()));
} catch (HiveMetastoreException e) {
LOG.error(e.getMessage());
throw new IOException(e);
} finally {
executor.shutdownNow();
srcClient.close();
}
assert allTables.size() > 0;
Collections.shuffle(allTables, new Random(System.nanoTime()));
final int tablesPerSplit = Math.max(allTables.size() / numberOfMappers, 1);
return Lists.transform(
Lists.partition(allTables, tablesPerSplit),
new Function<List<String>, InputSplit>() {
@Override
public InputSplit apply(@Nullable List<String> tables) {
return new HiveTablesInputSplit(tables);
}
});
}
/**
* Get list of tables from databases in parallel. Each thread will work on database candidates
* and generate list of table names in those databases.
*/
class SplitCallable implements Callable<List<String>> {
private final Cluster cluster;
private final List<String> candidates;
public SplitCallable(List<String> candidates, Cluster cluster) {
this.candidates = candidates;
this.cluster = cluster;
}
public List<String> call() throws Exception {
ArrayList<String> tables = new ArrayList<>();
HiveMetastoreClient client = cluster.getMetastoreClient();
for (final String db: candidates) {
tables.addAll(Lists.transform(client.getAllTables(db), new Function<String, String>() {
@Override
public String apply(String str) {
return db + ":" + str;
}
}));
}
LOG.info("Thread " + Thread.currentThread().getId() + ":processed "
+ candidates.size() + " dbs. Produced " + tables.size() + " tables.");
return tables;
}
}
}
| 9,543 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch/hive/TableRecordReader.java | package com.airbnb.reair.batch.hive;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import java.io.IOException;
import java.util.List;
/**
* Record reader that returns DB / table names.
*/
public class TableRecordReader extends RecordReader<Text, Text> {
private List<String> tables;
private int index = 0;
private String[] cur;
@Override
public void initialize(InputSplit inputSplit, TaskAttemptContext taskAttemptContext)
throws IOException, InterruptedException {
if (!(inputSplit instanceof HiveTablesInputSplit)) {
throw new IOException("Invalid split class passed in.");
}
this.tables = ((HiveTablesInputSplit) inputSplit).getTables();
this.index = 0;
this.cur = null;
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
if (index < tables.size()) {
cur = tables.get(index++).split(":");
return true;
}
return false;
}
@Override
public Text getCurrentKey() throws IOException, InterruptedException {
if (cur == null) {
return null;
}
return new Text(cur[0]);
}
@Override
public Text getCurrentValue() throws IOException, InterruptedException {
if (cur == null) {
return null;
}
return new Text(cur[1]);
}
@Override
public float getProgress() throws IOException, InterruptedException {
return ((float) index) / tables.size();
}
@Override
public void close() throws IOException {}
}
| 9,544 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch/hive/Stage2DirectoryCopyReducer.java | package com.airbnb.reair.batch.hive;
import com.airbnb.reair.batch.BatchUtils;
import com.airbnb.reair.batch.SimpleFileStatus;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.configuration.Cluster;
import com.airbnb.reair.incremental.configuration.ClusterFactory;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
/**
* Stage 2 reducer to handle directory copy.
*
* <p>The inputs for this reducer are the files needs to be copied. The output of the reducer are
* rows that indicate whether the specified file was copied or not.
*/
public class Stage2DirectoryCopyReducer extends Reducer<LongWritable, Text, Text, Text> {
private static final Log LOG = LogFactory.getLog(Stage2DirectoryCopyReducer.class);
private Configuration conf;
private Cluster dstCluster;
enum CopyStatus {
COPIED,
SKIPPED
}
public Stage2DirectoryCopyReducer() {
}
protected void setup(Context context) throws IOException, InterruptedException {
try {
this.conf = context.getConfiguration();
ClusterFactory clusterFactory = MetastoreReplUtils.createClusterFactory(conf);
this.dstCluster = clusterFactory.getDestCluster();
} catch (ConfigurationException e) {
throw new IOException(e);
}
}
protected void reduce(LongWritable key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
for (Text value : values) {
String[] fields = value.toString().split("\t");
String srcFileName = fields[0];
String dstDirectory = fields[1];
long size = Long.valueOf(fields[2]);
SimpleFileStatus fileStatus = new SimpleFileStatus(srcFileName, size, 0L);
FileSystem srcFs = (new Path(srcFileName)).getFileSystem(this.conf);
FileSystem dstFs = (new Path(dstDirectory)).getFileSystem(this.conf);
String result = BatchUtils.doCopyFileAction(
conf,
fileStatus,
srcFs,
dstDirectory,
dstFs,
dstCluster.getTmpDir(),
context,
false,
context.getTaskAttemptID().toString());
if (result == null) {
context.write(new Text(CopyStatus.COPIED.toString()),
new Text(ReplicationUtils.genValue(value.toString(), " ",
String.valueOf(System.currentTimeMillis()))));
} else {
context.write(
new Text(CopyStatus.SKIPPED.toString()),
new Text(ReplicationUtils.genValue(
value.toString(),
result,
String.valueOf(System.currentTimeMillis()))));
}
}
}
}
| 9,545 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch/hive/Stage2DirectoryCopyMapper.java | package com.airbnb.reair.batch.hive;
import static com.airbnb.reair.batch.hive.MetastoreReplicationJob.deseralizeJobResult;
import com.google.common.hash.Hashing;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.incremental.ReplicationUtils;
import com.airbnb.reair.incremental.primitives.TaskEstimate;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
/**
* Stage 2 Mapper to handle copying of HDFS directories.
*
* <p>Input of this job is the output of stage 1. It contains the actions to take for the tables and
* partitions. In this stage, we only care about the COPY actions. In the mapper, it will enumerate
* the directories and figure out files needs to be copied. Since each directory can have an uneven
* number of files, we shuffle again to distribute the work for copying files, which is done on the
* reducers.
*/
public class Stage2DirectoryCopyMapper extends Mapper<LongWritable, Text, LongWritable, Text> {
private static final Log LOG = LogFactory.getLog(Stage2DirectoryCopyMapper.class);
private static final PathFilter hiddenFileFilter = new PathFilter() {
public boolean accept(Path path) {
String name = path.getName();
return !name.startsWith("_") && !name.startsWith(".");
}
};
private Configuration conf;
protected void setup(Context context) throws IOException, InterruptedException {
this.conf = context.getConfiguration();
}
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
Pair<TaskEstimate, HiveObjectSpec> input = deseralizeJobResult(value.toString());
TaskEstimate estimate = input.getLeft();
HiveObjectSpec spec = input.getRight();
switch (estimate.getTaskType()) {
case COPY_PARTITION:
case COPY_UNPARTITIONED_TABLE:
if (estimate.isUpdateData()) {
updateDirectory(context, spec.getDbName(), spec.getTableName(), spec.getPartitionName(),
estimate.getSrcPath().get(), estimate.getDestPath().get());
}
break;
default:
break;
}
}
private static void hdfsCleanDirectory(
String db,
String table,
String part,
String dst,
Configuration conf,
boolean recreate) throws IOException {
Path dstPath = new Path(dst);
FileSystem fs = dstPath.getFileSystem(conf);
if (fs.exists(dstPath) && !fs.delete(dstPath, true)) {
throw new IOException("Failed to delete destination directory: " + dstPath.toString());
}
if (fs.exists(dstPath)) {
throw new IOException("Validate delete destination directory failed: " + dstPath.toString());
}
if (!recreate) {
return;
}
if (!fs.mkdirs(dstPath)) {
throw new IOException("Validate recreate destination directory failed: "
+ dstPath.toString());
}
}
private void updateDirectory(
Context context,
String db,
String table,
String partition,
Path src,
Path dst) throws IOException, InterruptedException {
LOG.info("updateDirectory:" + dst.toString());
hdfsCleanDirectory(db, table, partition, dst.toString(), this.conf, true);
try {
FileSystem srcFs = src.getFileSystem(this.conf);
LOG.info("src file: " + src.toString());
for (FileStatus status : srcFs.listStatus(src, hiddenFileFilter)) {
LOG.info("file: " + status.getPath().toString());
long hashValue = Hashing.murmur3_128().hashLong(
(long) (Long.valueOf(status.getLen()).hashCode()
* Long.valueOf(status.getModificationTime()).hashCode())).asLong();
context.write(
new LongWritable(hashValue),
new Text(ReplicationUtils.genValue(
status.getPath().toString(),
dst.toString(),
String.valueOf(status.getLen()))));
}
} catch (IOException e) {
// Ignore File list generate error because source directory could be removed while we
// enumerate it.
LOG.warn("Error listing " + src, e);
}
}
}
| 9,546 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch/hive/MetastoreReplUtils.java | package com.airbnb.reair.batch.hive;
import com.airbnb.reair.incremental.configuration.ClusterFactory;
import com.airbnb.reair.incremental.configuration.ConfiguredClusterFactory;
import com.airbnb.reair.incremental.deploy.ConfigurationKeys;
import org.apache.hadoop.conf.Configuration;
/**
* Utilities class for metastore replication.
*/
public class MetastoreReplUtils {
private MetastoreReplUtils() {
}
/**
* Static function to create ClusterFactory object based on configuration. For test environment it
* will create a mock ClusterFactory.
*
* @param conf configuration for the cluster
* @return ClusterFactory implementation
*/
public static ClusterFactory createClusterFactory(Configuration conf) {
String clusterFactoryClassName =
conf.get(ConfigurationKeys.BATCH_JOB_CLUSTER_FACTORY_CLASS);
if (clusterFactoryClassName != null) {
ClusterFactory factory = null;
try {
factory = (ClusterFactory) Class.forName(clusterFactoryClassName).newInstance();
} catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
throw new RuntimeException(e);
}
return factory;
} else {
ConfiguredClusterFactory configuredClusterFactory = new ConfiguredClusterFactory();
configuredClusterFactory.setConf(conf);
return configuredClusterFactory;
}
}
}
| 9,547 |
0 | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch | Create_ds/reair/main/src/main/java/com/airbnb/reair/batch/hive/Stage1PartitionCompareReducer.java | package com.airbnb.reair.batch.hive;
import com.airbnb.reair.common.HiveMetastoreClient;
import com.airbnb.reair.common.HiveMetastoreException;
import com.airbnb.reair.common.HiveObjectSpec;
import com.airbnb.reair.incremental.DirectoryCopier;
import com.airbnb.reair.incremental.configuration.Cluster;
import com.airbnb.reair.incremental.configuration.ClusterFactory;
import com.airbnb.reair.incremental.configuration.ConfigurationException;
import com.airbnb.reair.incremental.configuration.DestinationObjectFactory;
import com.airbnb.reair.incremental.primitives.TaskEstimate;
import com.airbnb.reair.incremental.primitives.TaskEstimator;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
/**
* Reducer to process partition entities.
*
* <p>Table and partition entities are evenly distributed to reducers via shuffle. For partition
* entities, the reducer will figure out the action to take. For table entities, the reducer will
* pass them through to the next stage.
*/
public class Stage1PartitionCompareReducer extends Reducer<LongWritable, Text, Text, Text> {
private static final Log LOG = LogFactory.getLog(Stage1PartitionCompareReducer.class);
private static final DestinationObjectFactory destinationObjectFactory =
new DestinationObjectFactory();
private Configuration conf;
private HiveMetastoreClient srcClient;
private HiveMetastoreClient dstClient;
private Cluster srcCluster;
private Cluster dstCluster;
private DirectoryCopier directoryCopier;
private long count = 0;
private TaskEstimator estimator;
public Stage1PartitionCompareReducer() {
}
protected void setup(Context context) throws IOException, InterruptedException {
try {
this.conf = context.getConfiguration();
ClusterFactory clusterFactory = MetastoreReplUtils.createClusterFactory(conf);
this.srcCluster = clusterFactory.getSrcCluster();
this.srcClient = this.srcCluster.getMetastoreClient();
this.dstCluster = clusterFactory.getDestCluster();
this.dstClient = this.dstCluster.getMetastoreClient();
this.directoryCopier = clusterFactory.getDirectoryCopier();
this.estimator = new TaskEstimator(conf,
destinationObjectFactory,
srcCluster,
dstCluster,
directoryCopier);
} catch (HiveMetastoreException | ConfigurationException e) {
throw new IOException(e);
}
}
protected void reduce(LongWritable key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
for (Text value : values) {
Pair<TaskEstimate, HiveObjectSpec> input =
MetastoreReplicationJob.deseralizeJobResult(value.toString());
TaskEstimate estimate = input.getLeft();
HiveObjectSpec spec = input.getRight();
String result = value.toString();
String extra = "";
try {
if (estimate.getTaskType() == TaskEstimate.TaskType.CHECK_PARTITION) {
// Table exists in source, but not in dest. It should copy the table.
TaskEstimate newEstimate = estimator.analyze(spec);
result = MetastoreReplicationJob.serializeJobResult(newEstimate, spec);
}
} catch (HiveMetastoreException e) {
LOG.error(String.format("Hit exception during db:%s, tbl:%s, part:%s", spec.getDbName(),
spec.getTableName(), spec.getPartitionName()), e);
extra = String.format("exception in %s of mapper = %s", estimate.getTaskType().toString(),
context.getTaskAttemptID().toString());
}
context.write(new Text(result), new Text(extra));
++this.count;
if (this.count % 100 == 0) {
LOG.info("Processed " + this.count + " entities");
}
}
}
protected void cleanup(Context context) throws IOException,
InterruptedException {
this.srcClient.close();
this.dstClient.close();
}
}
| 9,548 |
0 | Create_ds/aws-for-fluent-bit/troubleshooting/tools/log4j-tcp-app/src/test/java/com/mycompany | Create_ds/aws-for-fluent-bit/troubleshooting/tools/log4j-tcp-app/src/test/java/com/mycompany/app/AppTest.java | package com.mycompany.app;
import static org.junit.Assert.assertTrue;
import org.junit.Test;
/**
* Unit test for simple App.
*/
public class AppTest
{
/**
* Rigorous Test :-)
*/
@Test
public void shouldAnswerWithTrue()
{
assertTrue( true );
}
}
| 9,549 |
0 | Create_ds/aws-for-fluent-bit/troubleshooting/tools/log4j-tcp-app/src/main/java/com/mycompany | Create_ds/aws-for-fluent-bit/troubleshooting/tools/log4j-tcp-app/src/main/java/com/mycompany/app/MultithreadApp.java | package com.mycompany.app;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.util.concurrent.*;
public class MultithreadApp {
private static final Logger logger = LogManager.getLogger(MultithreadApp.class);
private static int LOGGER_THREADS = 20;
private static long LOGGER_ITERATIONS = 1000000;
private static int LOG_SIZE_BYTES = 1000;
private static long LOGGER_SLEEP_MS = 1000;
public static void main(String[] args) throws InterruptedException {
CountDownLatch latch = new CountDownLatch(LOGGER_THREADS);
Runnable app_instance;
Thread thread;
boolean infinite = false;
System.out.println("log4j app multithread Mar-6-22");
LOGGER_THREADS = Integer.parseInt(System.getenv("LOGGER_THREADS"));
LOGGER_ITERATIONS = Integer.parseInt(System.getenv("LOGGER_ITERATIONS"));
LOG_SIZE_BYTES = Integer.parseInt(System.getenv("LOG_SIZE_BYTES"));
LOGGER_SLEEP_MS = Integer.parseInt(System.getenv("LOGGER_SLEEP_MS"));
String is_infinite = System.getenv("INFINITE");
if (is_infinite.startsWith("y") || is_infinite.startsWith("Y") || is_infinite.startsWith("T") || is_infinite.startsWith("t") ) {
infinite = true;
}
for (int i=0; i < LOGGER_THREADS; i++) {
app_instance = new LoggerApp(latch, i, LOGGER_ITERATIONS, LOG_SIZE_BYTES, LOGGER_SLEEP_MS, infinite);
thread = new Thread(app_instance);
thread.start();
}
latch.await();
}
}
class LoggerApp implements Runnable {
private static final Logger logger = LogManager.getLogger(LoggerApp.class);
private CountDownLatch latch;
private int ID;
private long iterations;
private int log_size;
private long logger_sleep_ms;
private boolean infinite;
public LoggerApp(CountDownLatch latch, int ID, long iterations, int log_size, long logger_sleep_ms, boolean infinite) {
this.latch = latch;
this.ID = ID;
this.iterations = iterations;
this.log_size = log_size;
this.logger_sleep_ms = logger_sleep_ms;
this.infinite = infinite;
}
public void run(){
int count = 0;
System.out.println("running logger instance " + this.ID + "...");
String padding = createPadding(this.log_size);
boolean iterate = true;
while (iterate) {
for (int i=0; i < this.iterations; i++) {
try {
Thread.sleep(this.logger_sleep_ms);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
logger.debug("Thread " + this.ID + ": "+ i + " " + padding);
logger.info("Thread " + this.ID + ": "+ i + " " + padding);
}
count++;
System.out.println("logger instance " + this.ID + " completed set " + count + " of interations..");
iterate = this.infinite;
}
latch.countDown();
}
private static String createPadding(int msgSize) {
StringBuilder sb = new StringBuilder(msgSize);
for (int i=0; i<msgSize; i++) {
sb.append('x');
}
return sb.toString();
}
}
| 9,550 |
0 | Create_ds/aws-for-fluent-bit/troubleshooting/tools/log4j-tcp-app/src/main/java/com/mycompany | Create_ds/aws-for-fluent-bit/troubleshooting/tools/log4j-tcp-app/src/main/java/com/mycompany/app/App.java | package com.mycompany.app;
//import org.apache.log4j.Logger;
//
//import org.slf4j.Logger;
//import org.slf4j.LoggerFactory;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.util.*;
/**
* Hello world!
*
*/
public class App
{
private static final Logger logger = LogManager.getLogger(App.class);
private static int LOOP_ITERATIONS = 40;
private static int INNER_LOOP_ITERATIONS = 12500;
private static int LOG_PADDING = 20000;
// static org.apache.log4j.Logger log = org.apache.log4j.Logger.getLogger(App.class.getName());
public static void main( String[] args ) throws InterruptedException
{
String tmp;
System.out.println( "Hello World! v5: 2 second sleep, 1 mil logs" );
//System.out.println(Thread.currentThread().getContextClassLoader().getResource("log4j.properties"));
ClassLoader loader = App.class.getClassLoader();
// System.out.println(loader.getResource("App.class"));
tmp = System.getenv("LOOP_ITERATIONS");
if (tmp != null) {
try {
LOOP_ITERATIONS = Integer.parseInt(tmp);
}
catch (NumberFormatException e) {
e.printStackTrace();
}
}
tmp = System.getenv("LOG_PADDING");
if (tmp != null) {
try {
LOG_PADDING = Integer.parseInt(tmp);
}
catch (NumberFormatException e) {
e.printStackTrace();
}
}
String padding = createPadding(LOG_PADDING);
logger.debug("Hello this is a debug message");
logger.info("Hello this is an info message");
double[] metrics = new double[LOOP_ITERATIONS];
double total = 0;
long total_ms = 0;
for (int i=0; i < LOOP_ITERATIONS; i++) {
long startTime = System.currentTimeMillis();
for (int k=0; k < INNER_LOOP_ITERATIONS; k++) {
logger.debug("Hello " + i + " " + padding);
logger.info("Hello " + i + " " + padding);
}
long endTime = System.currentTimeMillis();
long elapsedms = (endTime - startTime);
total_ms += elapsedms;
long seconds = (endTime - startTime)/1000;
long milli = (endTime - startTime) % 1000;
double logspermillisecond = (INNER_LOOP_ITERATIONS * 2)/elapsedms;
total += logspermillisecond;
metrics[i] = logspermillisecond;
System.out.println("Iteration: " + i);
System.out.println("Sent: " + (INNER_LOOP_ITERATIONS * 2) + " logs");
System.out.println("Log size: " + LOG_PADDING + " bytes");
System.out.println("Runtime: " + seconds + "." + milli + "s\nRate: " + logspermillisecond + " logs/ms");
System.out.println("Total execution time: " + (endTime - startTime) + "ms");
System.out.println("_____________");
java.util.concurrent.TimeUnit.SECONDS.sleep(2);
}
System.out.println("AVERAGE RATE: " + (total / LOOP_ITERATIONS) + " logs/ms");
System.out.println("AVERAGE RATE good math: " + ((LOOP_ITERATIONS * INNER_LOOP_ITERATIONS * 2)/ total_ms) + " logs/ms");
double stdev = calculateStandardDeviation(metrics);
System.out.println("STDEV: " + stdev + " logs/ms");
}
private static String createPadding(int msgSize) {
StringBuilder sb = new StringBuilder(msgSize);
for (int i=0; i<msgSize; i++) {
sb.append('x');
}
return sb.toString();
}
private static double calculateStandardDeviation(double[] array) {
// finding the sum of array values
double sum = 0.0;
for (int i = 0; i < array.length; i++) {
sum += array[i];
}
// getting the mean of array.
double mean = sum / array.length;
// calculating the standard deviation
double standardDeviation = 0.0;
for (int i = 0; i < array.length; i++) {
standardDeviation += Math.pow(array[i] - mean, 2);
}
return Math.sqrt(standardDeviation/array.length);
}
}
| 9,551 |
0 | Create_ds/aws-for-fluent-bit/load_tests/logger/tcp_logger/src/test/java/com/mycompany | Create_ds/aws-for-fluent-bit/load_tests/logger/tcp_logger/src/test/java/com/mycompany/app/AppTest.java | package com.mycompany.app;
import static org.junit.Assert.assertTrue;
import org.junit.Test;
/**
* Unit test for simple App.
*/
public class AppTest
{
/**
* Rigorous Test :-)
*/
@Test
public void shouldAnswerWithTrue()
{
assertTrue( true );
}
}
| 9,552 |
0 | Create_ds/aws-for-fluent-bit/load_tests/logger/tcp_logger/src/main/java/com/mycompany | Create_ds/aws-for-fluent-bit/load_tests/logger/tcp_logger/src/main/java/com/mycompany/app/App.java | package com.mycompany.app;
import org.apache.logging.log4j.LogManager;
import java.util.concurrent.TimeUnit;
import org.apache.logging.log4j.Logger;
public class App
{
private static final Logger logger;
private static int TIME;
private static int ITERATION;
private static String ONE_KB_TEXT;
public static void main(final String[] args) throws InterruptedException {
String tmp = System.getenv("TIME");
if (tmp != null) {
try {
App.TIME = Integer.parseInt(tmp);
}
catch (NumberFormatException e) {
e.printStackTrace();
}
}
tmp = System.getenv("ITERATION");
if (tmp != null) {
try {
App.ITERATION = Integer.parseInt(tmp.replace("m", "")) * 1000;
}
catch (NumberFormatException e) {
e.printStackTrace();
}
}
if (System.getenv("DEBUG_TCP_LOGGER") != null && System.getenv("DEBUG_TCP_LOGGER").equals("true")) {
System.out.println("Starting Load Test. Iteration " + App.ITERATION + ". On port: " + System.getenv("LOGGER_PORT") + ". Time: " + App.TIME);
}
final ClassLoader loader = App.class.getClassLoader();
final long testStartTime = System.currentTimeMillis();
long testExpectedTime = System.currentTimeMillis();
for (int i = 0; i < App.TIME; ++i) {
final long batchStartTime = System.currentTimeMillis();
for (int k = 0; k < App.ITERATION; ++k) {
App.logger.info("" + (10000000 + i*App.ITERATION + k) + "_" + batchStartTime + "_" + App.ONE_KB_TEXT);
}
testExpectedTime += 1000L;
final long deltaTime = testExpectedTime - System.currentTimeMillis();
TimeUnit.MILLISECONDS.sleep(deltaTime);
}
}
static {
logger = LogManager.getLogger((Class)App.class);
App.TIME = 10;
App.ITERATION = 1;
App.ONE_KB_TEXT = "RUDQEWDDKBVMHPYVOAHGADVQGRHGCNRDCTLUWQCBFBKFGZHTGEUKFXWNCKXPRWBSVJGHEARMDQGVVRFPVCIBYEORHYPUTQJKUMNZJXIYLDCJUHABJIXFPUNJQDORGPKWFLQZXIGVGCWTZCVWGBFSGVXGEITYKNTWCYZDOAZFOTXDOFRPECXBSCSORSUUNUJZEJZPTODHBXVMOETBRFGNWNZHGINVNYZPKKSFLZHLSSDHFGLTHZEKICPGNYSCTAIHARDDYIJHKLMAOIDLEKRXMFNVJOJVDFYKNVIQKCIGTRFWKJRHQSFDWWKTJNMNKFBOMBMZMRCOHPUFZEPTQTZBLBDBZPJJXRYDFSOWKDVZLZYWSJYFTCKQJFPQOMCWQHKLNHUGWWVBGTRLLVUHTPHTKNBSRUNNOIFGIJPBHPCKYXNGDCQYJEWFFKRRTHJDUBEZPJIXMAOLZQDZQAYEUZFRLTLTXNGAVAGZZDUERZWTJVDTXPKOIRTCKTFOFJAXVFLNKPBYOIYVPHUYBRZZORCEMMAUTZIAUSXVDTKHSUIRTSYWQMYZBMUGSATXPNESEVQMUKHYZFWSLHJDNYUQWOKDUTUKPRXBLIYGSCFGBGXATINMMCWNWBGJTLZTPKGBTPWTHQPUHDJITWPCJLGZFNZTCIEWWVTREFCTPVOUADQCRQCBRHNHDKGQIXHIWGGDGAAFYZRODKFTKQATAUDOMZTSQUYZHGNJOBSUJDHESPBOIJCGXPEZMMQJNFTYBJEYXPZAZICZJKEZKCZEUMZTTSQEHADOVMCDMDEBUJAPKIAEYQEWIYZSAYAWAGFSTBJYCUFZHMJMLCTVTZWGCPDAURQYSXVICLVWKPAOMVTQTESYFPTMNMSNZPUXMDJRDKHDRAIRYELEXRJUAMOLZVWNHGNVFETVUDZEIDJRPSHMXAZDZXDCXMUJTPDTDUHBAZGPIQOUNUHMVLCZCSUUHGTE";
}
}
| 9,553 |
0 | Create_ds/spark/launcher/src/test/java/org/apache/spark | Create_ds/spark/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Pattern;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import static org.junit.Assert.*;
public class SparkSubmitCommandBuilderSuite extends BaseSuite {
private static File dummyPropsFile;
private static SparkSubmitOptionParser parser;
@Rule
public ExpectedException expectedException = ExpectedException.none();
@BeforeClass
public static void setUp() throws Exception {
dummyPropsFile = File.createTempFile("spark", "properties");
parser = new SparkSubmitOptionParser();
}
@AfterClass
public static void cleanUp() throws Exception {
dummyPropsFile.delete();
}
@Test
public void testDriverCmdBuilder() throws Exception {
testCmdBuilder(true, true);
testCmdBuilder(true, false);
}
@Test
public void testClusterCmdBuilder() throws Exception {
testCmdBuilder(false, true);
testCmdBuilder(false, false);
}
@Test
public void testCliHelpAndNoArg() throws Exception {
List<String> helpArgs = Arrays.asList(parser.HELP);
Map<String, String> env = new HashMap<>();
List<String> cmd = buildCommand(helpArgs, env);
assertTrue("--help should be contained in the final cmd.", cmd.contains(parser.HELP));
List<String> sparkEmptyArgs = Collections.emptyList();
cmd = buildCommand(sparkEmptyArgs, env);
assertTrue(
"org.apache.spark.deploy.SparkSubmit should be contained in the final cmd of empty input.",
cmd.contains("org.apache.spark.deploy.SparkSubmit"));
}
@Test
public void testCliKillAndStatus() throws Exception {
List<String> params = Arrays.asList("driver-20160531171222-0000");
testCLIOpts(null, parser.STATUS, params);
testCLIOpts(null, parser.KILL_SUBMISSION, params);
testCLIOpts(SparkSubmitCommandBuilder.RUN_EXAMPLE, parser.STATUS, params);
testCLIOpts(SparkSubmitCommandBuilder.RUN_EXAMPLE, parser.KILL_SUBMISSION, params);
}
@Test
public void testCliParser() throws Exception {
List<String> sparkSubmitArgs = Arrays.asList(
parser.MASTER,
"local",
parser.DRIVER_MEMORY,
"42g",
parser.DRIVER_CLASS_PATH,
"/driverCp",
parser.DRIVER_JAVA_OPTIONS,
"extraJavaOpt",
parser.CONF,
"spark.randomOption=foo",
parser.CONF,
SparkLauncher.DRIVER_EXTRA_LIBRARY_PATH + "=/driverLibPath",
SparkLauncher.NO_RESOURCE);
Map<String, String> env = new HashMap<>();
List<String> cmd = buildCommand(sparkSubmitArgs, env);
assertTrue(findInStringList(env.get(CommandBuilderUtils.getLibPathEnvName()),
File.pathSeparator, "/driverLibPath"));
assertTrue(findInStringList(findArgValue(cmd, "-cp"), File.pathSeparator, "/driverCp"));
assertTrue("Driver -Xmx should be configured.", cmd.contains("-Xmx42g"));
assertTrue("Command should contain user-defined conf.",
Collections.indexOfSubList(cmd, Arrays.asList(parser.CONF, "spark.randomOption=foo")) > 0);
}
@Test
public void testShellCliParser() throws Exception {
List<String> sparkSubmitArgs = Arrays.asList(
parser.CLASS,
"org.apache.spark.repl.Main",
parser.MASTER,
"foo",
"--app-arg",
"bar",
"--app-switch",
parser.FILES,
"baz",
parser.NAME,
"appName");
List<String> args = newCommandBuilder(sparkSubmitArgs).buildSparkSubmitArgs();
List<String> expected = Arrays.asList("spark-shell", "--app-arg", "bar", "--app-switch");
assertEquals(expected, args.subList(args.size() - expected.size(), args.size()));
}
@Test
public void testAlternateSyntaxParsing() throws Exception {
List<String> sparkSubmitArgs = Arrays.asList(
parser.CLASS + "=org.my.Class",
parser.MASTER + "=foo",
parser.DEPLOY_MODE + "=bar",
SparkLauncher.NO_RESOURCE);
List<String> cmd = newCommandBuilder(sparkSubmitArgs).buildSparkSubmitArgs();
assertEquals("org.my.Class", findArgValue(cmd, parser.CLASS));
assertEquals("foo", findArgValue(cmd, parser.MASTER));
assertEquals("bar", findArgValue(cmd, parser.DEPLOY_MODE));
}
@Test
public void testPySparkLauncher() throws Exception {
List<String> sparkSubmitArgs = Arrays.asList(
SparkSubmitCommandBuilder.PYSPARK_SHELL,
"--master=foo",
"--deploy-mode=bar");
Map<String, String> env = new HashMap<>();
List<String> cmd = buildCommand(sparkSubmitArgs, env);
assertTrue(Arrays.asList("python", "python2", "python3").contains(cmd.get(cmd.size() - 1)));
assertEquals(
String.format("\"%s\" \"foo\" \"%s\" \"bar\" \"%s\"",
parser.MASTER, parser.DEPLOY_MODE, SparkSubmitCommandBuilder.PYSPARK_SHELL_RESOURCE),
env.get("PYSPARK_SUBMIT_ARGS"));
}
@Test
public void testPySparkFallback() throws Exception {
List<String> sparkSubmitArgs = Arrays.asList(
"--master=foo",
"--deploy-mode=bar",
"script.py",
"arg1");
Map<String, String> env = new HashMap<>();
List<String> cmd = buildCommand(sparkSubmitArgs, env);
assertEquals("foo", findArgValue(cmd, "--master"));
assertEquals("bar", findArgValue(cmd, "--deploy-mode"));
assertEquals("script.py", cmd.get(cmd.size() - 2));
assertEquals("arg1", cmd.get(cmd.size() - 1));
}
@Test
public void testSparkRShell() throws Exception {
List<String> sparkSubmitArgs = Arrays.asList(
SparkSubmitCommandBuilder.SPARKR_SHELL,
"--master=foo",
"--deploy-mode=bar",
"--conf", "spark.r.shell.command=/usr/bin/R");
Map<String, String> env = new HashMap<>();
List<String> cmd = buildCommand(sparkSubmitArgs, env);
assertEquals("/usr/bin/R", cmd.get(cmd.size() - 1));
assertEquals(
String.format(
"\"%s\" \"foo\" \"%s\" \"bar\" \"--conf\" \"spark.r.shell.command=/usr/bin/R\" \"%s\"",
parser.MASTER, parser.DEPLOY_MODE, SparkSubmitCommandBuilder.SPARKR_SHELL_RESOURCE),
env.get("SPARKR_SUBMIT_ARGS"));
}
@Test(expected = IllegalArgumentException.class)
public void testExamplesRunnerNoArg() throws Exception {
List<String> sparkSubmitArgs = Arrays.asList(SparkSubmitCommandBuilder.RUN_EXAMPLE);
Map<String, String> env = new HashMap<>();
buildCommand(sparkSubmitArgs, env);
}
@Test
public void testExamplesRunnerNoMainClass() throws Exception {
testCLIOpts(SparkSubmitCommandBuilder.RUN_EXAMPLE, parser.HELP, null);
testCLIOpts(SparkSubmitCommandBuilder.RUN_EXAMPLE, parser.USAGE_ERROR, null);
testCLIOpts(SparkSubmitCommandBuilder.RUN_EXAMPLE, parser.VERSION, null);
}
@Test
public void testExamplesRunnerWithMasterNoMainClass() throws Exception {
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("Missing example class name.");
List<String> sparkSubmitArgs = Arrays.asList(
SparkSubmitCommandBuilder.RUN_EXAMPLE,
parser.MASTER + "=foo"
);
Map<String, String> env = new HashMap<>();
buildCommand(sparkSubmitArgs, env);
}
@Test
public void testExamplesRunner() throws Exception {
List<String> sparkSubmitArgs = Arrays.asList(
SparkSubmitCommandBuilder.RUN_EXAMPLE,
parser.MASTER + "=foo",
parser.DEPLOY_MODE + "=bar",
"SparkPi",
"42");
Map<String, String> env = new HashMap<>();
List<String> cmd = buildCommand(sparkSubmitArgs, env);
assertEquals("foo", findArgValue(cmd, parser.MASTER));
assertEquals("bar", findArgValue(cmd, parser.DEPLOY_MODE));
assertEquals(SparkSubmitCommandBuilder.EXAMPLE_CLASS_PREFIX + "SparkPi",
findArgValue(cmd, parser.CLASS));
assertEquals("42", cmd.get(cmd.size() - 1));
}
@Test(expected = IllegalArgumentException.class)
public void testMissingAppResource() {
new SparkSubmitCommandBuilder().buildSparkSubmitArgs();
}
private void testCmdBuilder(boolean isDriver, boolean useDefaultPropertyFile) throws Exception {
String deployMode = isDriver ? "client" : "cluster";
SparkSubmitCommandBuilder launcher =
newCommandBuilder(Collections.emptyList());
launcher.childEnv.put(CommandBuilderUtils.ENV_SPARK_HOME,
System.getProperty("spark.test.home"));
launcher.master = "yarn";
launcher.deployMode = deployMode;
launcher.appResource = "/foo";
launcher.appName = "MyApp";
launcher.mainClass = "my.Class";
launcher.appArgs.add("foo");
launcher.appArgs.add("bar");
launcher.conf.put("spark.foo", "foo");
// either set the property through "--conf" or through default property file
if (!useDefaultPropertyFile) {
launcher.setPropertiesFile(dummyPropsFile.getAbsolutePath());
launcher.conf.put(SparkLauncher.DRIVER_MEMORY, "1g");
launcher.conf.put(SparkLauncher.DRIVER_EXTRA_CLASSPATH, "/driver");
launcher.conf.put(SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS, "-Ddriver");
launcher.conf.put(SparkLauncher.DRIVER_EXTRA_LIBRARY_PATH, "/native");
} else {
launcher.childEnv.put("SPARK_CONF_DIR", System.getProperty("spark.test.home")
+ "/launcher/src/test/resources");
}
Map<String, String> env = new HashMap<>();
List<String> cmd = launcher.buildCommand(env);
// Checks below are different for driver and non-driver mode.
if (isDriver) {
assertTrue("Driver -Xmx should be configured.", cmd.contains("-Xmx1g"));
} else {
boolean found = false;
for (String arg : cmd) {
if (arg.startsWith("-Xmx")) {
found = true;
break;
}
}
assertFalse("Memory arguments should not be set.", found);
}
String[] cp = findArgValue(cmd, "-cp").split(Pattern.quote(File.pathSeparator));
if (isDriver) {
assertTrue("Driver classpath should contain provided entry.", contains("/driver", cp));
} else {
assertFalse("Driver classpath should not be in command.", contains("/driver", cp));
}
String libPath = env.get(CommandBuilderUtils.getLibPathEnvName());
if (isDriver) {
assertNotNull("Native library path should be set.", libPath);
assertTrue("Native library path should contain provided entry.",
contains("/native", libPath.split(Pattern.quote(File.pathSeparator))));
} else {
assertNull("Native library should not be set.", libPath);
}
// Checks below are the same for both driver and non-driver mode.
if (!useDefaultPropertyFile) {
assertEquals(dummyPropsFile.getAbsolutePath(), findArgValue(cmd, parser.PROPERTIES_FILE));
}
assertEquals("yarn", findArgValue(cmd, parser.MASTER));
assertEquals(deployMode, findArgValue(cmd, parser.DEPLOY_MODE));
assertEquals("my.Class", findArgValue(cmd, parser.CLASS));
assertEquals("MyApp", findArgValue(cmd, parser.NAME));
boolean appArgsOk = false;
for (int i = 0; i < cmd.size(); i++) {
if (cmd.get(i).equals("/foo")) {
assertEquals("foo", cmd.get(i + 1));
assertEquals("bar", cmd.get(i + 2));
assertEquals(cmd.size(), i + 3);
appArgsOk = true;
break;
}
}
assertTrue("App resource and args should be added to command.", appArgsOk);
Map<String, String> conf = parseConf(cmd, parser);
assertEquals("foo", conf.get("spark.foo"));
}
private boolean contains(String needle, String[] haystack) {
for (String entry : haystack) {
if (entry.equals(needle)) {
return true;
}
}
return false;
}
private Map<String, String> parseConf(List<String> cmd, SparkSubmitOptionParser parser) {
Map<String, String> conf = new HashMap<>();
for (int i = 0; i < cmd.size(); i++) {
if (cmd.get(i).equals(parser.CONF)) {
String[] val = cmd.get(i + 1).split("=", 2);
conf.put(val[0], val[1]);
i += 1;
}
}
return conf;
}
private String findArgValue(List<String> cmd, String name) {
for (int i = 0; i < cmd.size(); i++) {
if (cmd.get(i).equals(name)) {
return cmd.get(i + 1);
}
}
fail(String.format("arg '%s' not found", name));
return null;
}
private boolean findInStringList(String list, String sep, String needle) {
return contains(needle, list.split(sep));
}
private SparkSubmitCommandBuilder newCommandBuilder(List<String> args) {
SparkSubmitCommandBuilder builder = new SparkSubmitCommandBuilder(args);
builder.childEnv.put(CommandBuilderUtils.ENV_SPARK_HOME, System.getProperty("spark.test.home"));
return builder;
}
private List<String> buildCommand(List<String> args, Map<String, String> env) throws Exception {
return newCommandBuilder(args).buildCommand(env);
}
private void testCLIOpts(String appResource, String opt, List<String> params) throws Exception {
List<String> args = new ArrayList<>();
if (appResource != null) {
args.add(appResource);
}
args.add(opt);
if (params != null) {
args.addAll(params);
}
Map<String, String> env = new HashMap<>();
List<String> cmd = buildCommand(args, env);
assertTrue(opt + " should be contained in the final cmd.",
cmd.contains(opt));
}
}
| 9,554 |
0 | Create_ds/spark/launcher/src/test/java/org/apache/spark | Create_ds/spark/launcher/src/test/java/org/apache/spark/launcher/InProcessLauncherSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.io.IOException;
import java.lang.reflect.Method;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
public class InProcessLauncherSuite extends BaseSuite {
// Arguments passed to the test class to identify the test being run.
private static final String TEST_SUCCESS = "success";
private static final String TEST_FAILURE = "failure";
private static final String TEST_KILL = "kill";
private static final String TEST_FAILURE_MESSAGE = "d'oh";
private static Throwable lastError;
@Before
public void testSetup() {
lastError = null;
}
@Test
public void testLauncher() throws Exception {
SparkAppHandle app = startTest(TEST_SUCCESS);
waitFor(app);
assertNull(lastError);
// Because the test doesn't implement the launcher protocol, the final state here will be
// LOST instead of FINISHED.
assertEquals(SparkAppHandle.State.LOST, app.getState());
}
@Test
public void testKill() throws Exception {
SparkAppHandle app = startTest(TEST_KILL);
app.kill();
waitFor(app);
assertNull(lastError);
assertEquals(SparkAppHandle.State.KILLED, app.getState());
}
@Test
public void testErrorPropagation() throws Exception {
SparkAppHandle app = startTest(TEST_FAILURE);
waitFor(app);
assertEquals(SparkAppHandle.State.FAILED, app.getState());
assertNotNull(lastError);
assertEquals(TEST_FAILURE_MESSAGE, lastError.getMessage());
}
private SparkAppHandle startTest(String test) throws Exception {
return new TestInProcessLauncher()
.addAppArgs(test)
.setAppResource(SparkLauncher.NO_RESOURCE)
.startApplication();
}
public static void runTest(String[] args) {
try {
assertTrue(args.length != 0);
// Make sure at least the launcher-provided config options are in the args array.
final AtomicReference<String> port = new AtomicReference<>();
final AtomicReference<String> secret = new AtomicReference<>();
SparkSubmitOptionParser parser = new SparkSubmitOptionParser() {
@Override
protected boolean handle(String opt, String value) {
if (opt == CONF) {
String[] conf = value.split("=");
switch(conf[0]) {
case LauncherProtocol.CONF_LAUNCHER_PORT:
port.set(conf[1]);
break;
case LauncherProtocol.CONF_LAUNCHER_SECRET:
secret.set(conf[1]);
break;
default:
// no op
}
}
return true;
}
@Override
protected boolean handleUnknown(String opt) {
return true;
}
@Override
protected void handleExtraArgs(List<String> extra) {
// no op.
}
};
parser.parse(Arrays.asList(args));
assertNotNull("Launcher port not found.", port.get());
assertNotNull("Launcher secret not found.", secret.get());
String test = args[args.length - 1];
switch (test) {
case TEST_SUCCESS:
break;
case TEST_FAILURE:
throw new IllegalStateException(TEST_FAILURE_MESSAGE);
case TEST_KILL:
try {
// Wait for a reasonable amount of time to avoid the test hanging forever on failure,
// but still allowing for time outs to hopefully not occur on busy machines.
Thread.sleep(10000);
fail("Did not get expected interrupt after 10s.");
} catch (InterruptedException ie) {
// Expected.
}
break;
default:
fail("Unknown test " + test);
}
} catch (Throwable t) {
lastError = t;
throw new RuntimeException(t);
}
}
private static class TestInProcessLauncher extends InProcessLauncher {
@Override
Method findSparkSubmit() throws IOException {
try {
return InProcessLauncherSuite.class.getMethod("runTest", String[].class);
} catch (Exception e) {
throw new IOException(e);
}
}
}
}
| 9,555 |
0 | Create_ds/spark/launcher/src/test/java/org/apache/spark | Create_ds/spark/launcher/src/test/java/org/apache/spark/launcher/BaseSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.time.Duration;
import org.junit.After;
import org.slf4j.bridge.SLF4JBridgeHandler;
import static org.junit.Assert.*;
/**
* Handles configuring the JUL -> SLF4J bridge, and provides some utility methods for tests.
*/
class BaseSuite {
static {
SLF4JBridgeHandler.removeHandlersForRootLogger();
SLF4JBridgeHandler.install();
}
@After
public void postChecks() {
LauncherServer server = LauncherServer.getServer();
if (server != null) {
// Shut down the server to clean things up for the next test.
try {
server.close();
} catch (Exception e) {
// Ignore.
}
}
assertNull(server);
}
protected void waitFor(final SparkAppHandle handle) throws Exception {
try {
eventually(Duration.ofSeconds(10), Duration.ofMillis(10), () -> {
assertTrue("Handle is not in final state.", handle.getState().isFinal());
});
} finally {
if (!handle.getState().isFinal()) {
handle.kill();
}
}
// Wait until the handle has been marked as disposed, to make sure all cleanup tasks
// have been performed.
AbstractAppHandle ahandle = (AbstractAppHandle) handle;
eventually(Duration.ofSeconds(10), Duration.ofMillis(10), () -> {
assertTrue("Handle is still not marked as disposed.", ahandle.isDisposed());
});
}
/**
* Call a closure that performs a check every "period" until it succeeds, or the timeout
* elapses.
*/
protected void eventually(Duration timeout, Duration period, Runnable check) throws Exception {
assertTrue("Timeout needs to be larger than period.", timeout.compareTo(period) > 0);
long deadline = System.nanoTime() + timeout.toNanos();
int count = 0;
while (true) {
try {
count++;
check.run();
return;
} catch (Throwable t) {
if (System.nanoTime() >= deadline) {
String msg = String.format("Failed check after %d tries: %s.", count, t.getMessage());
throw new IllegalStateException(msg, t);
}
Thread.sleep(period.toMillis());
}
}
}
}
| 9,556 |
0 | Create_ds/spark/launcher/src/test/java/org/apache/spark | Create_ds/spark/launcher/src/test/java/org/apache/spark/launcher/ChildProcAppHandleSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.List;
import java.util.stream.Collectors;
import static java.nio.file.attribute.PosixFilePermission.*;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.spi.LoggingEvent;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.junit.Assume.*;
import static org.apache.spark.launcher.CommandBuilderUtils.*;
public class ChildProcAppHandleSuite extends BaseSuite {
private static final List<String> MESSAGES = new ArrayList<>();
private static final List<String> TEST_SCRIPT = Arrays.asList(
"#!/bin/sh",
"echo \"output\"",
"echo \"error\" 1>&2",
"while [ -n \"$1\" ]; do EC=$1; shift; done",
"exit $EC");
private static File TEST_SCRIPT_PATH;
@AfterClass
public static void cleanupClass() throws Exception {
if (TEST_SCRIPT_PATH != null) {
TEST_SCRIPT_PATH.delete();
TEST_SCRIPT_PATH = null;
}
}
@BeforeClass
public static void setupClass() throws Exception {
TEST_SCRIPT_PATH = File.createTempFile("output-redir-test", ".sh");
Files.setPosixFilePermissions(TEST_SCRIPT_PATH.toPath(),
EnumSet.of(OWNER_READ, OWNER_EXECUTE, OWNER_WRITE));
Files.write(TEST_SCRIPT_PATH.toPath(), TEST_SCRIPT);
}
@Before
public void cleanupLog() {
MESSAGES.clear();
}
@Test
public void testRedirectsSimple() throws Exception {
SparkLauncher launcher = new SparkLauncher();
launcher.redirectError(ProcessBuilder.Redirect.PIPE);
assertNotNull(launcher.errorStream);
assertEquals(launcher.errorStream.type(), ProcessBuilder.Redirect.Type.PIPE);
launcher.redirectOutput(ProcessBuilder.Redirect.PIPE);
assertNotNull(launcher.outputStream);
assertEquals(launcher.outputStream.type(), ProcessBuilder.Redirect.Type.PIPE);
}
@Test
public void testRedirectLastWins() throws Exception {
SparkLauncher launcher = new SparkLauncher();
launcher.redirectError(ProcessBuilder.Redirect.PIPE)
.redirectError(ProcessBuilder.Redirect.INHERIT);
assertEquals(launcher.errorStream.type(), ProcessBuilder.Redirect.Type.INHERIT);
launcher.redirectOutput(ProcessBuilder.Redirect.PIPE)
.redirectOutput(ProcessBuilder.Redirect.INHERIT);
assertEquals(launcher.outputStream.type(), ProcessBuilder.Redirect.Type.INHERIT);
}
@Test
public void testRedirectToLog() throws Exception {
assumeFalse(isWindows());
SparkAppHandle handle = (ChildProcAppHandle) new TestSparkLauncher()
.startApplication();
waitFor(handle);
assertTrue(MESSAGES.contains("output"));
assertTrue(MESSAGES.contains("error"));
}
@Test
public void testRedirectErrorToLog() throws Exception {
assumeFalse(isWindows());
Path err = Files.createTempFile("stderr", "txt");
err.toFile().deleteOnExit();
SparkAppHandle handle = (ChildProcAppHandle) new TestSparkLauncher()
.redirectError(err.toFile())
.startApplication();
waitFor(handle);
assertTrue(MESSAGES.contains("output"));
assertEquals(Arrays.asList("error"), Files.lines(err).collect(Collectors.toList()));
}
@Test
public void testRedirectOutputToLog() throws Exception {
assumeFalse(isWindows());
Path out = Files.createTempFile("stdout", "txt");
out.toFile().deleteOnExit();
SparkAppHandle handle = (ChildProcAppHandle) new TestSparkLauncher()
.redirectOutput(out.toFile())
.startApplication();
waitFor(handle);
assertTrue(MESSAGES.contains("error"));
assertEquals(Arrays.asList("output"), Files.lines(out).collect(Collectors.toList()));
}
@Test
public void testNoRedirectToLog() throws Exception {
assumeFalse(isWindows());
Path out = Files.createTempFile("stdout", "txt");
Path err = Files.createTempFile("stderr", "txt");
out.toFile().deleteOnExit();
err.toFile().deleteOnExit();
ChildProcAppHandle handle = (ChildProcAppHandle) new TestSparkLauncher()
.redirectError(err.toFile())
.redirectOutput(out.toFile())
.startApplication();
waitFor(handle);
assertTrue(MESSAGES.isEmpty());
assertEquals(Arrays.asList("error"), Files.lines(err).collect(Collectors.toList()));
assertEquals(Arrays.asList("output"), Files.lines(out).collect(Collectors.toList()));
}
@Test(expected = IllegalArgumentException.class)
public void testBadLogRedirect() throws Exception {
File out = Files.createTempFile("stdout", "txt").toFile();
out.deleteOnExit();
new SparkLauncher()
.redirectError()
.redirectOutput(out)
.redirectToLog("foo")
.launch()
.waitFor();
}
@Test(expected = IllegalArgumentException.class)
public void testRedirectErrorTwiceFails() throws Exception {
File err = Files.createTempFile("stderr", "txt").toFile();
err.deleteOnExit();
new SparkLauncher()
.redirectError()
.redirectError(err)
.launch()
.waitFor();
}
@Test
public void testProcMonitorWithOutputRedirection() throws Exception {
assumeFalse(isWindows());
File err = Files.createTempFile("out", "txt").toFile();
err.deleteOnExit();
SparkAppHandle handle = new TestSparkLauncher()
.redirectError()
.redirectOutput(err)
.startApplication();
waitFor(handle);
assertEquals(SparkAppHandle.State.LOST, handle.getState());
}
@Test
public void testProcMonitorWithLogRedirection() throws Exception {
assumeFalse(isWindows());
SparkAppHandle handle = new TestSparkLauncher()
.redirectToLog(getClass().getName())
.startApplication();
waitFor(handle);
assertEquals(SparkAppHandle.State.LOST, handle.getState());
}
@Test
public void testFailedChildProc() throws Exception {
assumeFalse(isWindows());
SparkAppHandle handle = new TestSparkLauncher(1)
.redirectToLog(getClass().getName())
.startApplication();
waitFor(handle);
assertEquals(SparkAppHandle.State.FAILED, handle.getState());
}
private static class TestSparkLauncher extends SparkLauncher {
TestSparkLauncher() {
this(0);
}
TestSparkLauncher(int ec) {
setAppResource("outputredirtest");
addAppArgs(String.valueOf(ec));
}
@Override
String findSparkSubmit() {
return TEST_SCRIPT_PATH.getAbsolutePath();
}
}
/**
* A log4j appender used by child apps of this test. It records all messages logged through it in
* memory so the test can check them.
*/
public static class LogAppender extends AppenderSkeleton {
@Override
protected void append(LoggingEvent event) {
MESSAGES.add(event.getMessage().toString());
}
@Override
public boolean requiresLayout() {
return false;
}
@Override
public void close() {
}
}
}
| 9,557 |
0 | Create_ds/spark/launcher/src/test/java/org/apache/spark | Create_ds/spark/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitOptionParserSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.junit.Before;
import org.junit.Test;
import static org.mockito.Mockito.*;
public class SparkSubmitOptionParserSuite extends BaseSuite {
private SparkSubmitOptionParser parser;
@Before
public void setUp() {
parser = spy(new DummyParser());
}
@Test
public void testAllOptions() {
int count = 0;
for (String[] optNames : parser.opts) {
for (String optName : optNames) {
String value = optName + "-value";
parser.parse(Arrays.asList(optName, value));
count++;
verify(parser).handle(eq(optNames[0]), eq(value));
verify(parser, times(count)).handle(anyString(), anyString());
verify(parser, times(count)).handleExtraArgs(eq(Collections.emptyList()));
}
}
for (String[] switchNames : parser.switches) {
int switchCount = 0;
for (String name : switchNames) {
parser.parse(Arrays.asList(name));
count++;
switchCount++;
verify(parser, times(switchCount)).handle(eq(switchNames[0]), same(null));
verify(parser, times(count)).handle(anyString(), any(String.class));
verify(parser, times(count)).handleExtraArgs(eq(Collections.emptyList()));
}
}
}
@Test
public void testExtraOptions() {
List<String> args = Arrays.asList(parser.MASTER, parser.MASTER, "foo", "bar");
parser.parse(args);
verify(parser).handle(eq(parser.MASTER), eq(parser.MASTER));
verify(parser).handleUnknown(eq("foo"));
verify(parser).handleExtraArgs(eq(Arrays.asList("bar")));
}
@Test(expected=IllegalArgumentException.class)
public void testMissingArg() {
parser.parse(Arrays.asList(parser.MASTER));
}
@Test
public void testEqualSeparatedOption() {
List<String> args = Arrays.asList(parser.MASTER + "=" + parser.MASTER);
parser.parse(args);
verify(parser).handle(eq(parser.MASTER), eq(parser.MASTER));
verify(parser).handleExtraArgs(eq(Collections.emptyList()));
}
private static class DummyParser extends SparkSubmitOptionParser {
@Override
protected boolean handle(String opt, String value) {
return true;
}
@Override
protected boolean handleUnknown(String opt) {
return false;
}
@Override
protected void handleExtraArgs(List<String> extra) {
}
}
}
| 9,558 |
0 | Create_ds/spark/launcher/src/test/java/org/apache/spark | Create_ds/spark/launcher/src/test/java/org/apache/spark/launcher/CommandBuilderUtilsSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.apache.spark.launcher.CommandBuilderUtils.*;
public class CommandBuilderUtilsSuite {
@Test
public void testValidOptionStrings() {
testOpt("a b c d e", Arrays.asList("a", "b", "c", "d", "e"));
testOpt("a 'b c' \"d\" e", Arrays.asList("a", "b c", "d", "e"));
testOpt("a 'b\\\"c' \"'d'\" e", Arrays.asList("a", "b\\\"c", "'d'", "e"));
testOpt("a 'b\"c' \"\\\"d\\\"\" e", Arrays.asList("a", "b\"c", "\"d\"", "e"));
testOpt(" a b c \\\\ ", Arrays.asList("a", "b", "c", "\\"));
// Following tests ported from UtilsSuite.scala.
testOpt("", new ArrayList<String>());
testOpt("a", Arrays.asList("a"));
testOpt("aaa", Arrays.asList("aaa"));
testOpt("a b c", Arrays.asList("a", "b", "c"));
testOpt(" a b\t c ", Arrays.asList("a", "b", "c"));
testOpt("a 'b c'", Arrays.asList("a", "b c"));
testOpt("a 'b c' d", Arrays.asList("a", "b c", "d"));
testOpt("'b c'", Arrays.asList("b c"));
testOpt("a \"b c\"", Arrays.asList("a", "b c"));
testOpt("a \"b c\" d", Arrays.asList("a", "b c", "d"));
testOpt("\"b c\"", Arrays.asList("b c"));
testOpt("a 'b\" c' \"d' e\"", Arrays.asList("a", "b\" c", "d' e"));
testOpt("a\t'b\nc'\nd", Arrays.asList("a", "b\nc", "d"));
testOpt("a \"b\\\\c\"", Arrays.asList("a", "b\\c"));
testOpt("a \"b\\\"c\"", Arrays.asList("a", "b\"c"));
testOpt("a 'b\\\"c'", Arrays.asList("a", "b\\\"c"));
testOpt("'a'b", Arrays.asList("ab"));
testOpt("'a''b'", Arrays.asList("ab"));
testOpt("\"a\"b", Arrays.asList("ab"));
testOpt("\"a\"\"b\"", Arrays.asList("ab"));
testOpt("''", Arrays.asList(""));
testOpt("\"\"", Arrays.asList(""));
}
@Test
public void testInvalidOptionStrings() {
testInvalidOpt("\\");
testInvalidOpt("\"abcde");
testInvalidOpt("'abcde");
}
@Test
public void testWindowsBatchQuoting() {
assertEquals("abc", quoteForBatchScript("abc"));
assertEquals("\"a b c\"", quoteForBatchScript("a b c"));
assertEquals("\"a \"\"b\"\" c\"", quoteForBatchScript("a \"b\" c"));
assertEquals("\"a\"\"b\"\"c\"", quoteForBatchScript("a\"b\"c"));
assertEquals("\"ab=\"\"cd\"\"\"", quoteForBatchScript("ab=\"cd\""));
assertEquals("\"a,b,c\"", quoteForBatchScript("a,b,c"));
assertEquals("\"a;b;c\"", quoteForBatchScript("a;b;c"));
assertEquals("\"a,b,c\\\\\"", quoteForBatchScript("a,b,c\\"));
}
@Test
public void testPythonArgQuoting() {
assertEquals("\"abc\"", quoteForCommandString("abc"));
assertEquals("\"a b c\"", quoteForCommandString("a b c"));
assertEquals("\"a \\\"b\\\" c\"", quoteForCommandString("a \"b\" c"));
}
@Test
public void testJavaMajorVersion() {
assertEquals(6, javaMajorVersion("1.6.0_50"));
assertEquals(7, javaMajorVersion("1.7.0_79"));
assertEquals(8, javaMajorVersion("1.8.0_66"));
assertEquals(9, javaMajorVersion("9-ea"));
assertEquals(9, javaMajorVersion("9+100"));
assertEquals(9, javaMajorVersion("9"));
assertEquals(9, javaMajorVersion("9.1.0"));
assertEquals(10, javaMajorVersion("10"));
}
private static void testOpt(String opts, List<String> expected) {
assertEquals(String.format("test string failed to parse: [[ %s ]]", opts),
expected, parseOptionString(opts));
}
private static void testInvalidOpt(String opts) {
try {
parseOptionString(opts);
fail("Expected exception for invalid option string.");
} catch (IllegalArgumentException e) {
// pass.
}
}
}
| 9,559 |
0 | Create_ds/spark/launcher/src/test/java/org/apache/spark | Create_ds/spark/launcher/src/test/java/org/apache/spark/launcher/LauncherServerSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.io.Closeable;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.net.InetAddress;
import java.net.Socket;
import java.net.SocketException;
import java.time.Duration;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.apache.spark.launcher.LauncherProtocol.*;
public class LauncherServerSuite extends BaseSuite {
@Test
public void testLauncherServerReuse() throws Exception {
LauncherServer server1 = LauncherServer.getOrCreateServer();
ChildProcAppHandle handle = new ChildProcAppHandle(server1);
handle.kill();
LauncherServer server2 = LauncherServer.getOrCreateServer();
try {
assertNotSame(server1, server2);
} finally {
server2.unref();
}
}
@Test
public void testCommunication() throws Exception {
LauncherServer server = LauncherServer.getOrCreateServer();
ChildProcAppHandle handle = new ChildProcAppHandle(server);
String secret = server.registerHandle(handle);
TestClient client = null;
try {
Socket s = new Socket(InetAddress.getLoopbackAddress(), server.getPort());
final Semaphore semaphore = new Semaphore(0);
handle.addListener(new SparkAppHandle.Listener() {
@Override
public void stateChanged(SparkAppHandle handle) {
semaphore.release();
}
@Override
public void infoChanged(SparkAppHandle handle) {
semaphore.release();
}
});
client = new TestClient(s);
client.send(new Hello(secret, "1.4.0"));
assertTrue(semaphore.tryAcquire(30, TimeUnit.SECONDS));
// Make sure the server matched the client to the handle.
assertNotNull(handle.getConnection());
client.send(new SetAppId("app-id"));
assertTrue(semaphore.tryAcquire(30, TimeUnit.SECONDS));
assertEquals("app-id", handle.getAppId());
client.send(new SetState(SparkAppHandle.State.RUNNING));
assertTrue(semaphore.tryAcquire(1, TimeUnit.SECONDS));
assertEquals(SparkAppHandle.State.RUNNING, handle.getState());
handle.stop();
Message stopMsg = client.inbound.poll(30, TimeUnit.SECONDS);
assertTrue(stopMsg instanceof Stop);
} finally {
close(client);
handle.kill();
client.clientThread.join();
}
}
@Test
public void testTimeout() throws Exception {
LauncherServer server = LauncherServer.getOrCreateServer();
ChildProcAppHandle handle = new ChildProcAppHandle(server);
String secret = server.registerHandle(handle);
TestClient client = null;
try {
// LauncherServer will immediately close the server-side socket when the timeout is set
// to 0.
SparkLauncher.setConfig(SparkLauncher.CHILD_CONNECTION_TIMEOUT, "0");
Socket s = new Socket(InetAddress.getLoopbackAddress(), server.getPort());
client = new TestClient(s);
waitForError(client, secret);
} finally {
SparkLauncher.launcherConfig.remove(SparkLauncher.CHILD_CONNECTION_TIMEOUT);
handle.kill();
close(client);
}
}
@Test
public void testSparkSubmitVmShutsDown() throws Exception {
LauncherServer server = LauncherServer.getOrCreateServer();
ChildProcAppHandle handle = new ChildProcAppHandle(server);
String secret = server.registerHandle(handle);
TestClient client = null;
final Semaphore semaphore = new Semaphore(0);
try {
Socket s = new Socket(InetAddress.getLoopbackAddress(), server.getPort());
handle.addListener(new SparkAppHandle.Listener() {
public void stateChanged(SparkAppHandle handle) {
semaphore.release();
}
public void infoChanged(SparkAppHandle handle) {
semaphore.release();
}
});
client = new TestClient(s);
client.send(new Hello(secret, "1.4.0"));
assertTrue(semaphore.tryAcquire(30, TimeUnit.SECONDS));
// Make sure the server matched the client to the handle.
assertNotNull(handle.getConnection());
client.close();
handle.dispose();
assertTrue(semaphore.tryAcquire(30, TimeUnit.SECONDS));
assertEquals(SparkAppHandle.State.LOST, handle.getState());
} finally {
handle.kill();
close(client);
client.clientThread.join();
}
}
@Test
public void testStreamFiltering() throws Exception {
LauncherServer server = LauncherServer.getOrCreateServer();
ChildProcAppHandle handle = new ChildProcAppHandle(server);
String secret = server.registerHandle(handle);
TestClient client = null;
try {
Socket s = new Socket(InetAddress.getLoopbackAddress(), server.getPort());
client = new TestClient(s);
try {
client.send(new EvilPayload());
} catch (SocketException se) {
// SPARK-21522: this can happen if the server closes the socket before the full message has
// been written, so it's expected. It may cause false positives though (socket errors
// happening for other reasons).
}
waitForError(client, secret);
assertEquals(0, EvilPayload.EVIL_BIT);
} finally {
handle.kill();
close(client);
client.clientThread.join();
}
}
@Test
public void testAppHandleDisconnect() throws Exception {
LauncherServer server = LauncherServer.getOrCreateServer();
ChildProcAppHandle handle = new ChildProcAppHandle(server);
String secret = server.registerHandle(handle);
TestClient client = null;
try {
Socket s = new Socket(InetAddress.getLoopbackAddress(), server.getPort());
client = new TestClient(s);
client.send(new Hello(secret, "1.4.0"));
client.send(new SetAppId("someId"));
// Wait until we know the server has received the messages and matched the handle to the
// connection before disconnecting.
eventually(Duration.ofSeconds(1), Duration.ofMillis(10), () -> {
assertEquals("someId", handle.getAppId());
});
handle.disconnect();
waitForError(client, secret);
} finally {
handle.kill();
close(client);
client.clientThread.join();
}
}
private void close(Closeable c) {
if (c != null) {
try {
c.close();
} catch (Exception e) {
// no-op.
}
}
}
/**
* Try a few times to get a client-side error, since the client-side socket may not reflect the
* server-side close immediately.
*/
private void waitForError(TestClient client, String secret) throws Exception {
final AtomicBoolean helloSent = new AtomicBoolean();
eventually(Duration.ofSeconds(1), Duration.ofMillis(10), () -> {
try {
if (!helloSent.get()) {
client.send(new Hello(secret, "1.4.0"));
helloSent.set(true);
} else {
client.send(new SetAppId("appId"));
}
fail("Expected error but message went through.");
} catch (IllegalStateException | IOException e) {
// Expected.
}
});
}
private static class TestClient extends LauncherConnection {
final BlockingQueue<Message> inbound;
final Thread clientThread;
TestClient(Socket s) throws IOException {
super(s);
this.inbound = new LinkedBlockingQueue<>();
this.clientThread = new Thread(this);
clientThread.setName("TestClient");
clientThread.setDaemon(true);
clientThread.start();
}
@Override
protected void handle(Message msg) throws IOException {
inbound.offer(msg);
}
}
private static class EvilPayload extends LauncherProtocol.Message {
static int EVIL_BIT = 0;
// This field should cause the launcher server to throw an error and not deserialize the
// message.
private List<String> notAllowedField = Arrays.asList("disallowed");
private void readObject(ObjectInputStream stream) throws IOException, ClassNotFoundException {
stream.defaultReadObject();
EVIL_BIT = 1;
}
}
}
| 9,560 |
0 | Create_ds/spark/launcher/src/main/java/org/apache/spark | Create_ds/spark/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
import static org.apache.spark.launcher.CommandBuilderUtils.*;
/**
* Launcher for Spark applications.
* <p>
* Use this class to start Spark applications programmatically. The class uses a builder pattern
* to allow clients to configure the Spark application and launch it as a child process.
* </p>
*/
public class SparkLauncher extends AbstractLauncher<SparkLauncher> {
/** The Spark master. */
public static final String SPARK_MASTER = "spark.master";
/** The Spark deploy mode. */
public static final String DEPLOY_MODE = "spark.submit.deployMode";
/** Configuration key for the driver memory. */
public static final String DRIVER_MEMORY = "spark.driver.memory";
/** Configuration key for the driver class path. */
public static final String DRIVER_EXTRA_CLASSPATH = "spark.driver.extraClassPath";
/** Configuration key for the driver VM options. */
public static final String DRIVER_EXTRA_JAVA_OPTIONS = "spark.driver.extraJavaOptions";
/** Configuration key for the driver native library path. */
public static final String DRIVER_EXTRA_LIBRARY_PATH = "spark.driver.extraLibraryPath";
/** Configuration key for the executor memory. */
public static final String EXECUTOR_MEMORY = "spark.executor.memory";
/** Configuration key for the executor class path. */
public static final String EXECUTOR_EXTRA_CLASSPATH = "spark.executor.extraClassPath";
/** Configuration key for the executor VM options. */
public static final String EXECUTOR_EXTRA_JAVA_OPTIONS = "spark.executor.extraJavaOptions";
/** Configuration key for the executor native library path. */
public static final String EXECUTOR_EXTRA_LIBRARY_PATH = "spark.executor.extraLibraryPath";
/** Configuration key for the number of executor CPU cores. */
public static final String EXECUTOR_CORES = "spark.executor.cores";
static final String PYSPARK_DRIVER_PYTHON = "spark.pyspark.driver.python";
static final String PYSPARK_PYTHON = "spark.pyspark.python";
static final String SPARKR_R_SHELL = "spark.r.shell.command";
/** Logger name to use when launching a child process. */
public static final String CHILD_PROCESS_LOGGER_NAME = "spark.launcher.childProcLoggerName";
/**
* A special value for the resource that tells Spark to not try to process the app resource as a
* file. This is useful when the class being executed is added to the application using other
* means - for example, by adding jars using the package download feature.
*/
public static final String NO_RESOURCE = "spark-internal";
/**
* Maximum time (in ms) to wait for a child process to connect back to the launcher server
* when using @link{#start()}.
*/
public static final String CHILD_CONNECTION_TIMEOUT = "spark.launcher.childConectionTimeout";
/** Used internally to create unique logger names. */
private static final AtomicInteger COUNTER = new AtomicInteger();
/** Factory for creating OutputRedirector threads. **/
static final ThreadFactory REDIRECTOR_FACTORY = new NamedThreadFactory("launcher-proc-%d");
static final Map<String, String> launcherConfig = new HashMap<>();
/**
* Set a configuration value for the launcher library. These config values do not affect the
* launched application, but rather the behavior of the launcher library itself when managing
* applications.
*
* @since 1.6.0
* @param name Config name.
* @param value Config value.
*/
public static void setConfig(String name, String value) {
launcherConfig.put(name, value);
}
// Visible for testing.
File workingDir;
boolean redirectErrorStream;
ProcessBuilder.Redirect errorStream;
ProcessBuilder.Redirect outputStream;
public SparkLauncher() {
this(null);
}
/**
* Creates a launcher that will set the given environment variables in the child.
*
* @param env Environment variables to set.
*/
public SparkLauncher(Map<String, String> env) {
if (env != null) {
this.builder.childEnv.putAll(env);
}
}
/**
* Set a custom JAVA_HOME for launching the Spark application.
*
* @param javaHome Path to the JAVA_HOME to use.
* @return This launcher.
*/
public SparkLauncher setJavaHome(String javaHome) {
checkNotNull(javaHome, "javaHome");
builder.javaHome = javaHome;
return this;
}
/**
* Set a custom Spark installation location for the application.
*
* @param sparkHome Path to the Spark installation to use.
* @return This launcher.
*/
public SparkLauncher setSparkHome(String sparkHome) {
checkNotNull(sparkHome, "sparkHome");
builder.childEnv.put(ENV_SPARK_HOME, sparkHome);
return this;
}
/**
* Sets the working directory of spark-submit.
*
* @param dir The directory to set as spark-submit's working directory.
* @return This launcher.
*/
public SparkLauncher directory(File dir) {
workingDir = dir;
return this;
}
/**
* Specifies that stderr in spark-submit should be redirected to stdout.
*
* @return This launcher.
*/
public SparkLauncher redirectError() {
redirectErrorStream = true;
return this;
}
/**
* Redirects error output to the specified Redirect.
*
* @param to The method of redirection.
* @return This launcher.
*/
public SparkLauncher redirectError(ProcessBuilder.Redirect to) {
errorStream = to;
return this;
}
/**
* Redirects standard output to the specified Redirect.
*
* @param to The method of redirection.
* @return This launcher.
*/
public SparkLauncher redirectOutput(ProcessBuilder.Redirect to) {
outputStream = to;
return this;
}
/**
* Redirects error output to the specified File.
*
* @param errFile The file to which stderr is written.
* @return This launcher.
*/
public SparkLauncher redirectError(File errFile) {
errorStream = ProcessBuilder.Redirect.to(errFile);
return this;
}
/**
* Redirects error output to the specified File.
*
* @param outFile The file to which stdout is written.
* @return This launcher.
*/
public SparkLauncher redirectOutput(File outFile) {
outputStream = ProcessBuilder.Redirect.to(outFile);
return this;
}
/**
* Sets all output to be logged and redirected to a logger with the specified name.
*
* @param loggerName The name of the logger to log stdout and stderr.
* @return This launcher.
*/
public SparkLauncher redirectToLog(String loggerName) {
setConf(CHILD_PROCESS_LOGGER_NAME, loggerName);
return this;
}
// The following methods just delegate to the parent class, but they are needed to keep
// binary compatibility with previous versions of this class.
@Override
public SparkLauncher setPropertiesFile(String path) {
return super.setPropertiesFile(path);
}
@Override
public SparkLauncher setConf(String key, String value) {
return super.setConf(key, value);
}
@Override
public SparkLauncher setAppName(String appName) {
return super.setAppName(appName);
}
@Override
public SparkLauncher setMaster(String master) {
return super.setMaster(master);
}
@Override
public SparkLauncher setDeployMode(String mode) {
return super.setDeployMode(mode);
}
@Override
public SparkLauncher setAppResource(String resource) {
return super.setAppResource(resource);
}
@Override
public SparkLauncher setMainClass(String mainClass) {
return super.setMainClass(mainClass);
}
@Override
public SparkLauncher addSparkArg(String arg) {
return super.addSparkArg(arg);
}
@Override
public SparkLauncher addSparkArg(String name, String value) {
return super.addSparkArg(name, value);
}
@Override
public SparkLauncher addAppArgs(String... args) {
return super.addAppArgs(args);
}
@Override
public SparkLauncher addJar(String jar) {
return super.addJar(jar);
}
@Override
public SparkLauncher addFile(String file) {
return super.addFile(file);
}
@Override
public SparkLauncher addPyFile(String file) {
return super.addPyFile(file);
}
@Override
public SparkLauncher setVerbose(boolean verbose) {
return super.setVerbose(verbose);
}
/**
* Launches a sub-process that will start the configured Spark application.
* <p>
* The {@link #startApplication(SparkAppHandle.Listener...)} method is preferred when launching
* Spark, since it provides better control of the child application.
*
* @return A process handle for the Spark app.
*/
public Process launch() throws IOException {
ProcessBuilder pb = createBuilder();
boolean outputToLog = outputStream == null;
boolean errorToLog = !redirectErrorStream && errorStream == null;
String loggerName = getLoggerName();
if (loggerName != null && outputToLog && errorToLog) {
pb.redirectErrorStream(true);
}
Process childProc = pb.start();
if (loggerName != null) {
InputStream logStream = outputToLog ? childProc.getInputStream() : childProc.getErrorStream();
new OutputRedirector(logStream, loggerName, REDIRECTOR_FACTORY);
}
return childProc;
}
/**
* Starts a Spark application.
*
* <p>
* Applications launched by this launcher run as child processes. The child's stdout and stderr
* are merged and written to a logger (see <code>java.util.logging</code>) only if redirection
* has not otherwise been configured on this <code>SparkLauncher</code>. The logger's name can be
* defined by setting {@link #CHILD_PROCESS_LOGGER_NAME} in the app's configuration. If that
* option is not set, the code will try to derive a name from the application's name or main
* class / script file. If those cannot be determined, an internal, unique name will be used.
* In all cases, the logger name will start with "org.apache.spark.launcher.app", to fit more
* easily into the configuration of commonly-used logging systems.
*
* @since 1.6.0
* @see AbstractLauncher#startApplication(SparkAppHandle.Listener...)
* @param listeners Listeners to add to the handle before the app is launched.
* @return A handle for the launched application.
*/
@Override
public SparkAppHandle startApplication(SparkAppHandle.Listener... listeners) throws IOException {
LauncherServer server = LauncherServer.getOrCreateServer();
ChildProcAppHandle handle = new ChildProcAppHandle(server);
for (SparkAppHandle.Listener l : listeners) {
handle.addListener(l);
}
String secret = server.registerHandle(handle);
String loggerName = getLoggerName();
ProcessBuilder pb = createBuilder();
boolean outputToLog = outputStream == null;
boolean errorToLog = !redirectErrorStream && errorStream == null;
// Only setup stderr + stdout to logger redirection if user has not otherwise configured output
// redirection.
if (loggerName == null && (outputToLog || errorToLog)) {
String appName;
if (builder.appName != null) {
appName = builder.appName;
} else if (builder.mainClass != null) {
int dot = builder.mainClass.lastIndexOf(".");
if (dot >= 0 && dot < builder.mainClass.length() - 1) {
appName = builder.mainClass.substring(dot + 1, builder.mainClass.length());
} else {
appName = builder.mainClass;
}
} else if (builder.appResource != null) {
appName = new File(builder.appResource).getName();
} else {
appName = String.valueOf(COUNTER.incrementAndGet());
}
String loggerPrefix = getClass().getPackage().getName();
loggerName = String.format("%s.app.%s", loggerPrefix, appName);
}
if (outputToLog && errorToLog) {
pb.redirectErrorStream(true);
}
pb.environment().put(LauncherProtocol.ENV_LAUNCHER_PORT, String.valueOf(server.getPort()));
pb.environment().put(LauncherProtocol.ENV_LAUNCHER_SECRET, secret);
try {
Process child = pb.start();
InputStream logStream = null;
if (loggerName != null) {
logStream = outputToLog ? child.getInputStream() : child.getErrorStream();
}
handle.setChildProc(child, loggerName, logStream);
} catch (IOException ioe) {
handle.kill();
throw ioe;
}
return handle;
}
private ProcessBuilder createBuilder() throws IOException {
List<String> cmd = new ArrayList<>();
cmd.add(findSparkSubmit());
cmd.addAll(builder.buildSparkSubmitArgs());
// Since the child process is a batch script, let's quote things so that special characters are
// preserved, otherwise the batch interpreter will mess up the arguments. Batch scripts are
// weird.
if (isWindows()) {
List<String> winCmd = new ArrayList<>();
for (String arg : cmd) {
winCmd.add(quoteForBatchScript(arg));
}
cmd = winCmd;
}
ProcessBuilder pb = new ProcessBuilder(cmd.toArray(new String[cmd.size()]));
for (Map.Entry<String, String> e : builder.childEnv.entrySet()) {
pb.environment().put(e.getKey(), e.getValue());
}
if (workingDir != null) {
pb.directory(workingDir);
}
// Only one of redirectError and redirectError(...) can be specified.
// Similarly, if redirectToLog is specified, no other redirections should be specified.
checkState(!redirectErrorStream || errorStream == null,
"Cannot specify both redirectError() and redirectError(...) ");
checkState(getLoggerName() == null ||
((!redirectErrorStream && errorStream == null) || outputStream == null),
"Cannot used redirectToLog() in conjunction with other redirection methods.");
if (redirectErrorStream) {
pb.redirectErrorStream(true);
}
if (errorStream != null) {
pb.redirectError(errorStream);
}
if (outputStream != null) {
pb.redirectOutput(outputStream);
}
return pb;
}
@Override
SparkLauncher self() {
return this;
}
// Visible for testing.
String findSparkSubmit() {
String script = isWindows() ? "spark-submit.cmd" : "spark-submit";
return join(File.separator, builder.getSparkHome(), "bin", script);
}
private String getLoggerName() throws IOException {
return builder.getEffectiveConfig().get(CHILD_PROCESS_LOGGER_NAME);
}
}
| 9,561 |
0 | Create_ds/spark/launcher/src/main/java/org/apache/spark | Create_ds/spark/launcher/src/main/java/org/apache/spark/launcher/SparkClassCommandBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import static org.apache.spark.launcher.CommandBuilderUtils.*;
/**
* Command builder for internal Spark classes.
* <p>
* This class handles building the command to launch all internal Spark classes except for
* SparkSubmit (which is handled by {@link SparkSubmitCommandBuilder} class.
*/
class SparkClassCommandBuilder extends AbstractCommandBuilder {
private final String className;
private final List<String> classArgs;
SparkClassCommandBuilder(String className, List<String> classArgs) {
this.className = className;
this.classArgs = classArgs;
}
@Override
public List<String> buildCommand(Map<String, String> env)
throws IOException, IllegalArgumentException {
List<String> javaOptsKeys = new ArrayList<>();
String memKey = null;
String extraClassPath = null;
// Master, Worker, HistoryServer, ExternalShuffleService, MesosClusterDispatcher use
// SPARK_DAEMON_JAVA_OPTS (and specific opts) + SPARK_DAEMON_MEMORY.
switch (className) {
case "org.apache.spark.deploy.master.Master":
javaOptsKeys.add("SPARK_DAEMON_JAVA_OPTS");
javaOptsKeys.add("SPARK_MASTER_OPTS");
extraClassPath = getenv("SPARK_DAEMON_CLASSPATH");
memKey = "SPARK_DAEMON_MEMORY";
break;
case "org.apache.spark.deploy.worker.Worker":
javaOptsKeys.add("SPARK_DAEMON_JAVA_OPTS");
javaOptsKeys.add("SPARK_WORKER_OPTS");
extraClassPath = getenv("SPARK_DAEMON_CLASSPATH");
memKey = "SPARK_DAEMON_MEMORY";
break;
case "org.apache.spark.deploy.history.HistoryServer":
javaOptsKeys.add("SPARK_DAEMON_JAVA_OPTS");
javaOptsKeys.add("SPARK_HISTORY_OPTS");
extraClassPath = getenv("SPARK_DAEMON_CLASSPATH");
memKey = "SPARK_DAEMON_MEMORY";
break;
case "org.apache.spark.executor.CoarseGrainedExecutorBackend":
javaOptsKeys.add("SPARK_EXECUTOR_OPTS");
memKey = "SPARK_EXECUTOR_MEMORY";
extraClassPath = getenv("SPARK_EXECUTOR_CLASSPATH");
break;
case "org.apache.spark.executor.MesosExecutorBackend":
javaOptsKeys.add("SPARK_EXECUTOR_OPTS");
memKey = "SPARK_EXECUTOR_MEMORY";
extraClassPath = getenv("SPARK_EXECUTOR_CLASSPATH");
break;
case "org.apache.spark.deploy.mesos.MesosClusterDispatcher":
javaOptsKeys.add("SPARK_DAEMON_JAVA_OPTS");
extraClassPath = getenv("SPARK_DAEMON_CLASSPATH");
memKey = "SPARK_DAEMON_MEMORY";
break;
case "org.apache.spark.deploy.ExternalShuffleService":
case "org.apache.spark.deploy.mesos.MesosExternalShuffleService":
javaOptsKeys.add("SPARK_DAEMON_JAVA_OPTS");
javaOptsKeys.add("SPARK_SHUFFLE_OPTS");
extraClassPath = getenv("SPARK_DAEMON_CLASSPATH");
memKey = "SPARK_DAEMON_MEMORY";
break;
default:
memKey = "SPARK_DRIVER_MEMORY";
break;
}
List<String> cmd = buildJavaCommand(extraClassPath);
for (String key : javaOptsKeys) {
String envValue = System.getenv(key);
if (!isEmpty(envValue) && envValue.contains("Xmx")) {
String msg = String.format("%s is not allowed to specify max heap(Xmx) memory settings " +
"(was %s). Use the corresponding configuration instead.", key, envValue);
throw new IllegalArgumentException(msg);
}
addOptionString(cmd, envValue);
}
String mem = firstNonEmpty(memKey != null ? System.getenv(memKey) : null, DEFAULT_MEM);
cmd.add("-Xmx" + mem);
cmd.add(className);
cmd.addAll(classArgs);
return cmd;
}
}
| 9,562 |
0 | Create_ds/spark/launcher/src/main/java/org/apache/spark | Create_ds/spark/launcher/src/main/java/org/apache/spark/launcher/InProcessLauncher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.io.IOException;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.util.List;
import java.util.logging.Logger;
/**
* In-process launcher for Spark applications.
* <p>
* Use this class to start Spark applications programmatically. Applications launched using this
* class will run in the same process as the caller.
* <p>
* Because Spark only supports a single active instance of <code>SparkContext</code> per JVM, code
* that uses this class should be careful about which applications are launched. It's recommended
* that this launcher only be used to launch applications in cluster mode.
* <p>
* Also note that, when running applications in client mode, JVM-related configurations (like
* driver memory or configs which modify the driver's class path) do not take effect. Logging
* configuration is also inherited from the parent application.
*
* @since Spark 2.3.0
*/
public class InProcessLauncher extends AbstractLauncher<InProcessLauncher> {
private static final Logger LOG = Logger.getLogger(InProcessLauncher.class.getName());
/**
* Starts a Spark application.
*
* @see AbstractLauncher#startApplication(SparkAppHandle.Listener...)
* @param listeners Listeners to add to the handle before the app is launched.
* @return A handle for the launched application.
*/
@Override
public SparkAppHandle startApplication(SparkAppHandle.Listener... listeners) throws IOException {
if (builder.isClientMode(builder.getEffectiveConfig())) {
LOG.warning("It's not recommended to run client-mode applications using InProcessLauncher.");
}
Method main = findSparkSubmit();
LauncherServer server = LauncherServer.getOrCreateServer();
InProcessAppHandle handle = new InProcessAppHandle(server);
for (SparkAppHandle.Listener l : listeners) {
handle.addListener(l);
}
String secret = server.registerHandle(handle);
setConf(LauncherProtocol.CONF_LAUNCHER_PORT, String.valueOf(server.getPort()));
setConf(LauncherProtocol.CONF_LAUNCHER_SECRET, secret);
List<String> sparkArgs = builder.buildSparkSubmitArgs();
String[] argv = sparkArgs.toArray(new String[sparkArgs.size()]);
String appName = CommandBuilderUtils.firstNonEmpty(builder.appName, builder.mainClass,
"<unknown>");
handle.start(appName, main, argv);
return handle;
}
@Override
InProcessLauncher self() {
return this;
}
// Visible for testing.
Method findSparkSubmit() throws IOException {
ClassLoader cl = Thread.currentThread().getContextClassLoader();
if (cl == null) {
cl = getClass().getClassLoader();
}
Class<?> sparkSubmit;
// SPARK-22941: first try the new SparkSubmit interface that has better error handling,
// but fall back to the old interface in case someone is mixing & matching launcher and
// Spark versions.
try {
sparkSubmit = cl.loadClass("org.apache.spark.deploy.InProcessSparkSubmit");
} catch (Exception e1) {
try {
sparkSubmit = cl.loadClass("org.apache.spark.deploy.SparkSubmit");
} catch (Exception e2) {
throw new IOException("Cannot find SparkSubmit; make sure necessary jars are available.",
e2);
}
}
Method main;
try {
main = sparkSubmit.getMethod("main", String[].class);
} catch (Exception e) {
throw new IOException("Cannot find SparkSubmit main method.", e);
}
CommandBuilderUtils.checkState(Modifier.isStatic(main.getModifiers()),
"main method is not static.");
return main;
}
}
| 9,563 |
0 | Create_ds/spark/launcher/src/main/java/org/apache/spark | Create_ds/spark/launcher/src/main/java/org/apache/spark/launcher/AbstractCommandBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.InputStreamReader;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.regex.Pattern;
import static org.apache.spark.launcher.CommandBuilderUtils.*;
/**
* Abstract Spark command builder that defines common functionality.
*/
abstract class AbstractCommandBuilder {
boolean verbose;
String appName;
String appResource;
String deployMode;
String javaHome;
String mainClass;
String master;
protected String propertiesFile;
final List<String> appArgs;
final List<String> jars;
final List<String> files;
final List<String> pyFiles;
final Map<String, String> childEnv;
final Map<String, String> conf;
// The merged configuration for the application. Cached to avoid having to read / parse
// properties files multiple times.
private Map<String, String> effectiveConfig;
AbstractCommandBuilder() {
this.appArgs = new ArrayList<>();
this.childEnv = new HashMap<>();
this.conf = new HashMap<>();
this.files = new ArrayList<>();
this.jars = new ArrayList<>();
this.pyFiles = new ArrayList<>();
}
/**
* Builds the command to execute.
*
* @param env A map containing environment variables for the child process. It may already contain
* entries defined by the user (such as SPARK_HOME, or those defined by the
* SparkLauncher constructor that takes an environment), and may be modified to
* include other variables needed by the process to be executed.
*/
abstract List<String> buildCommand(Map<String, String> env)
throws IOException, IllegalArgumentException;
/**
* Builds a list of arguments to run java.
*
* This method finds the java executable to use and appends JVM-specific options for running a
* class with Spark in the classpath. It also loads options from the "java-opts" file in the
* configuration directory being used.
*
* Callers should still add at least the class to run, as well as any arguments to pass to the
* class.
*/
List<String> buildJavaCommand(String extraClassPath) throws IOException {
List<String> cmd = new ArrayList<>();
String envJavaHome;
if (javaHome != null) {
cmd.add(join(File.separator, javaHome, "bin", "java"));
} else if ((envJavaHome = System.getenv("JAVA_HOME")) != null) {
cmd.add(join(File.separator, envJavaHome, "bin", "java"));
} else {
cmd.add(join(File.separator, System.getProperty("java.home"), "bin", "java"));
}
// Load extra JAVA_OPTS from conf/java-opts, if it exists.
File javaOpts = new File(join(File.separator, getConfDir(), "java-opts"));
if (javaOpts.isFile()) {
try (BufferedReader br = new BufferedReader(new InputStreamReader(
new FileInputStream(javaOpts), StandardCharsets.UTF_8))) {
String line;
while ((line = br.readLine()) != null) {
addOptionString(cmd, line);
}
}
}
cmd.add("-cp");
cmd.add(join(File.pathSeparator, buildClassPath(extraClassPath)));
return cmd;
}
void addOptionString(List<String> cmd, String options) {
if (!isEmpty(options)) {
for (String opt : parseOptionString(options)) {
cmd.add(opt);
}
}
}
/**
* Builds the classpath for the application. Returns a list with one classpath entry per element;
* each entry is formatted in the way expected by <i>java.net.URLClassLoader</i> (more
* specifically, with trailing slashes for directories).
*/
List<String> buildClassPath(String appClassPath) throws IOException {
String sparkHome = getSparkHome();
Set<String> cp = new LinkedHashSet<>();
addToClassPath(cp, appClassPath);
addToClassPath(cp, getConfDir());
boolean prependClasses = !isEmpty(getenv("SPARK_PREPEND_CLASSES"));
boolean isTesting = "1".equals(getenv("SPARK_TESTING"));
if (prependClasses || isTesting) {
String scala = getScalaVersion();
List<String> projects = Arrays.asList(
"common/kvstore",
"common/network-common",
"common/network-shuffle",
"common/network-yarn",
"common/sketch",
"common/tags",
"common/unsafe",
"core",
"examples",
"graphx",
"launcher",
"mllib",
"repl",
"resource-managers/mesos",
"resource-managers/yarn",
"sql/catalyst",
"sql/core",
"sql/hive",
"sql/hive-thriftserver",
"streaming"
);
if (prependClasses) {
if (!isTesting) {
System.err.println(
"NOTE: SPARK_PREPEND_CLASSES is set, placing locally compiled Spark classes ahead of " +
"assembly.");
}
for (String project : projects) {
addToClassPath(cp, String.format("%s/%s/target/scala-%s/classes", sparkHome, project,
scala));
}
}
if (isTesting) {
for (String project : projects) {
addToClassPath(cp, String.format("%s/%s/target/scala-%s/test-classes", sparkHome,
project, scala));
}
}
// Add this path to include jars that are shaded in the final deliverable created during
// the maven build. These jars are copied to this directory during the build.
addToClassPath(cp, String.format("%s/core/target/jars/*", sparkHome));
addToClassPath(cp, String.format("%s/mllib/target/jars/*", sparkHome));
}
// Add Spark jars to the classpath. For the testing case, we rely on the test code to set and
// propagate the test classpath appropriately. For normal invocation, look for the jars
// directory under SPARK_HOME.
boolean isTestingSql = "1".equals(getenv("SPARK_SQL_TESTING"));
String jarsDir = findJarsDir(getSparkHome(), getScalaVersion(), !isTesting && !isTestingSql);
if (jarsDir != null) {
addToClassPath(cp, join(File.separator, jarsDir, "*"));
}
addToClassPath(cp, getenv("HADOOP_CONF_DIR"));
addToClassPath(cp, getenv("YARN_CONF_DIR"));
addToClassPath(cp, getenv("SPARK_DIST_CLASSPATH"));
return new ArrayList<>(cp);
}
/**
* Adds entries to the classpath.
*
* @param cp List to which the new entries are appended.
* @param entries New classpath entries (separated by File.pathSeparator).
*/
private void addToClassPath(Set<String> cp, String entries) {
if (isEmpty(entries)) {
return;
}
String[] split = entries.split(Pattern.quote(File.pathSeparator));
for (String entry : split) {
if (!isEmpty(entry)) {
if (new File(entry).isDirectory() && !entry.endsWith(File.separator)) {
entry += File.separator;
}
cp.add(entry);
}
}
}
String getScalaVersion() {
String scala = getenv("SPARK_SCALA_VERSION");
if (scala != null) {
return scala;
}
String sparkHome = getSparkHome();
File scala212 = new File(sparkHome, "launcher/target/scala-2.12");
File scala211 = new File(sparkHome, "launcher/target/scala-2.11");
checkState(!scala212.isDirectory() || !scala211.isDirectory(),
"Presence of build for multiple Scala versions detected.\n" +
"Either clean one of them or set SPARK_SCALA_VERSION in your environment.");
if (scala212.isDirectory()) {
return "2.12";
} else {
checkState(scala211.isDirectory(), "Cannot find any build directories.");
return "2.11";
}
}
String getSparkHome() {
String path = getenv(ENV_SPARK_HOME);
if (path == null && "1".equals(getenv("SPARK_TESTING"))) {
path = System.getProperty("spark.test.home");
}
checkState(path != null,
"Spark home not found; set it explicitly or use the SPARK_HOME environment variable.");
return path;
}
String getenv(String key) {
return firstNonEmpty(childEnv.get(key), System.getenv(key));
}
void setPropertiesFile(String path) {
effectiveConfig = null;
this.propertiesFile = path;
}
Map<String, String> getEffectiveConfig() throws IOException {
if (effectiveConfig == null) {
effectiveConfig = new HashMap<>(conf);
Properties p = loadPropertiesFile();
for (String key : p.stringPropertyNames()) {
if (!effectiveConfig.containsKey(key)) {
effectiveConfig.put(key, p.getProperty(key));
}
}
}
return effectiveConfig;
}
/**
* Loads the configuration file for the application, if it exists. This is either the
* user-specified properties file, or the spark-defaults.conf file under the Spark configuration
* directory.
*/
private Properties loadPropertiesFile() throws IOException {
Properties props = new Properties();
File propsFile;
if (propertiesFile != null) {
propsFile = new File(propertiesFile);
checkArgument(propsFile.isFile(), "Invalid properties file '%s'.", propertiesFile);
} else {
propsFile = new File(getConfDir(), DEFAULT_PROPERTIES_FILE);
}
if (propsFile.isFile()) {
try (InputStreamReader isr = new InputStreamReader(
new FileInputStream(propsFile), StandardCharsets.UTF_8)) {
props.load(isr);
for (Map.Entry<Object, Object> e : props.entrySet()) {
e.setValue(e.getValue().toString().trim());
}
}
}
return props;
}
private String getConfDir() {
String confDir = getenv("SPARK_CONF_DIR");
return confDir != null ? confDir : join(File.separator, getSparkHome(), "conf");
}
}
| 9,564 |
0 | Create_ds/spark/launcher/src/main/java/org/apache/spark | Create_ds/spark/launcher/src/main/java/org/apache/spark/launcher/LauncherProtocol.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.io.Serializable;
/**
* Message definitions for the launcher communication protocol. These messages must remain
* backwards-compatible, so that the launcher can talk to older versions of Spark that support
* the protocol.
*/
final class LauncherProtocol {
/** Environment variable where the server port is stored. */
static final String ENV_LAUNCHER_PORT = "_SPARK_LAUNCHER_PORT";
/** Environment variable where the secret for connecting back to the server is stored. */
static final String ENV_LAUNCHER_SECRET = "_SPARK_LAUNCHER_SECRET";
/** Spark conf key used to propagate the server port for in-process launches. */
static final String CONF_LAUNCHER_PORT = "spark.launcher.port";
/** Spark conf key used to propagate the app secret for in-process launches. */
static final String CONF_LAUNCHER_SECRET = "spark.launcher.secret";
static class Message implements Serializable {
}
/**
* Hello message, sent from client to server.
*/
static class Hello extends Message {
final String secret;
final String sparkVersion;
Hello(String secret, String version) {
this.secret = secret;
this.sparkVersion = version;
}
}
/**
* SetAppId message, sent from client to server.
*/
static class SetAppId extends Message {
final String appId;
SetAppId(String appId) {
this.appId = appId;
}
}
/**
* SetState message, sent from client to server.
*/
static class SetState extends Message {
final SparkAppHandle.State state;
SetState(SparkAppHandle.State state) {
this.state = state;
}
}
/**
* Stop message, send from server to client to stop the application.
*/
static class Stop extends Message {
}
}
| 9,565 |
0 | Create_ds/spark/launcher/src/main/java/org/apache/spark | Create_ds/spark/launcher/src/main/java/org/apache/spark/launcher/NamedThreadFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicLong;
class NamedThreadFactory implements ThreadFactory {
private final String nameFormat;
private final AtomicLong threadIds;
NamedThreadFactory(String nameFormat) {
this.nameFormat = nameFormat;
this.threadIds = new AtomicLong();
}
@Override
public Thread newThread(Runnable r) {
Thread t = new Thread(r, String.format(nameFormat, threadIds.incrementAndGet()));
t.setDaemon(true);
return t;
}
}
| 9,566 |
0 | Create_ds/spark/launcher/src/main/java/org/apache/spark | Create_ds/spark/launcher/src/main/java/org/apache/spark/launcher/LauncherServer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.net.Socket;
import java.security.SecureRandom;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicLong;
import java.util.logging.Level;
import java.util.logging.Logger;
import static org.apache.spark.launcher.LauncherProtocol.*;
/**
* A server that listens locally for connections from client launched by the library. Each client
* has a secret that it needs to send to the server to identify itself and establish the session.
*
* I/O is currently blocking (one thread per client). Clients have a limited time to connect back
* to the server, otherwise the server will ignore the connection.
*
* === Architecture Overview ===
*
* The launcher server is used when Spark apps are launched as separate processes than the calling
* app. It looks more or less like the following:
*
* ----------------------- -----------------------
* | User App | spark-submit | Spark App |
* | | -------------------> | |
* | ------------| |------------- |
* | | | hello | | |
* | | L. Server |<----------------------| L. Backend | |
* | | | | | |
* | ------------- -----------------------
* | | | ^
* | v | |
* | -------------| |
* | | | <per-app channel> |
* | | App Handle |<------------------------------
* | | |
* -----------------------
*
* The server is started on demand and remains active while there are active or outstanding clients,
* to avoid opening too many ports when multiple clients are launched. Each client is given a unique
* secret, and have a limited amount of time to connect back
* ({@link SparkLauncher#CHILD_CONNECTION_TIMEOUT}), at which point the server will throw away
* that client's state. A client is only allowed to connect back to the server once.
*
* The launcher server listens on the localhost only, so it doesn't need access controls (aside from
* the per-app secret) nor encryption. It thus requires that the launched app has a local process
* that communicates with the server. In cluster mode, this means that the client that launches the
* application must remain alive for the duration of the application (or until the app handle is
* disconnected).
*/
class LauncherServer implements Closeable {
private static final Logger LOG = Logger.getLogger(LauncherServer.class.getName());
private static final String THREAD_NAME_FMT = "LauncherServer-%d";
private static final long DEFAULT_CONNECT_TIMEOUT = 10000L;
/** For creating secrets used for communication with child processes. */
private static final SecureRandom RND = new SecureRandom();
private static volatile LauncherServer serverInstance;
static synchronized LauncherServer getOrCreateServer() throws IOException {
LauncherServer server;
do {
server = serverInstance != null ? serverInstance : new LauncherServer();
} while (!server.running);
server.ref();
serverInstance = server;
return server;
}
// For testing.
static synchronized LauncherServer getServer() {
return serverInstance;
}
private final AtomicLong refCount;
private final AtomicLong threadIds;
private final ConcurrentMap<String, AbstractAppHandle> secretToPendingApps;
private final List<ServerConnection> clients;
private final ServerSocket server;
private final Thread serverThread;
private final ThreadFactory factory;
private final Timer timeoutTimer;
private volatile boolean running;
private LauncherServer() throws IOException {
this.refCount = new AtomicLong(0);
ServerSocket server = new ServerSocket();
try {
server.setReuseAddress(true);
server.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0));
this.clients = new ArrayList<>();
this.threadIds = new AtomicLong();
this.factory = new NamedThreadFactory(THREAD_NAME_FMT);
this.secretToPendingApps = new ConcurrentHashMap<>();
this.timeoutTimer = new Timer("LauncherServer-TimeoutTimer", true);
this.server = server;
this.running = true;
this.serverThread = factory.newThread(this::acceptConnections);
serverThread.start();
} catch (IOException ioe) {
close();
throw ioe;
} catch (Exception e) {
close();
throw new IOException(e);
}
}
/**
* Registers a handle with the server, and returns the secret the child app needs to connect
* back.
*/
synchronized String registerHandle(AbstractAppHandle handle) {
String secret = createSecret();
secretToPendingApps.put(secret, handle);
return secret;
}
@Override
public void close() throws IOException {
synchronized (this) {
if (!running) {
return;
}
running = false;
}
synchronized(LauncherServer.class) {
serverInstance = null;
}
timeoutTimer.cancel();
server.close();
synchronized (clients) {
List<ServerConnection> copy = new ArrayList<>(clients);
clients.clear();
for (ServerConnection client : copy) {
client.close();
}
}
if (serverThread != null) {
try {
serverThread.join();
} catch (InterruptedException ie) {
// no-op
}
}
}
void ref() {
refCount.incrementAndGet();
}
void unref() {
synchronized(LauncherServer.class) {
if (refCount.decrementAndGet() == 0) {
try {
close();
} catch (IOException ioe) {
// no-op.
}
}
}
}
int getPort() {
return server.getLocalPort();
}
/**
* Removes the client handle from the pending list (in case it's still there), and unrefs
* the server.
*/
void unregister(AbstractAppHandle handle) {
for (Map.Entry<String, AbstractAppHandle> e : secretToPendingApps.entrySet()) {
if (e.getValue().equals(handle)) {
String secret = e.getKey();
secretToPendingApps.remove(secret);
break;
}
}
unref();
}
private void acceptConnections() {
try {
while (running) {
final Socket client = server.accept();
TimerTask timeout = new TimerTask() {
@Override
public void run() {
LOG.warning("Timed out waiting for hello message from client.");
try {
client.close();
} catch (IOException ioe) {
// no-op.
}
}
};
ServerConnection clientConnection = new ServerConnection(client, timeout);
Thread clientThread = factory.newThread(clientConnection);
clientConnection.setConnectionThread(clientThread);
synchronized (clients) {
clients.add(clientConnection);
}
long timeoutMs = getConnectionTimeout();
// 0 is used for testing to avoid issues with clock resolution / thread scheduling,
// and force an immediate timeout.
if (timeoutMs > 0) {
timeoutTimer.schedule(timeout, timeoutMs);
} else {
timeout.run();
}
clientThread.start();
}
} catch (IOException ioe) {
if (running) {
LOG.log(Level.SEVERE, "Error in accept loop.", ioe);
}
}
}
private long getConnectionTimeout() {
String value = SparkLauncher.launcherConfig.get(SparkLauncher.CHILD_CONNECTION_TIMEOUT);
return (value != null) ? Long.parseLong(value) : DEFAULT_CONNECT_TIMEOUT;
}
private String createSecret() {
while (true) {
byte[] secret = new byte[128];
RND.nextBytes(secret);
StringBuilder sb = new StringBuilder();
for (byte b : secret) {
int ival = b >= 0 ? b : Byte.MAX_VALUE - b;
if (ival < 0x10) {
sb.append("0");
}
sb.append(Integer.toHexString(ival));
}
String secretStr = sb.toString();
if (!secretToPendingApps.containsKey(secretStr)) {
return secretStr;
}
}
}
class ServerConnection extends LauncherConnection {
private TimerTask timeout;
private volatile Thread connectionThread;
private volatile AbstractAppHandle handle;
ServerConnection(Socket socket, TimerTask timeout) throws IOException {
super(socket);
this.timeout = timeout;
}
void setConnectionThread(Thread t) {
this.connectionThread = t;
}
@Override
protected void handle(Message msg) throws IOException {
try {
if (msg instanceof Hello) {
timeout.cancel();
timeout = null;
Hello hello = (Hello) msg;
AbstractAppHandle handle = secretToPendingApps.remove(hello.secret);
if (handle != null) {
handle.setConnection(this);
handle.setState(SparkAppHandle.State.CONNECTED);
this.handle = handle;
} else {
throw new IllegalArgumentException("Received Hello for unknown client.");
}
} else {
if (handle == null) {
throw new IllegalArgumentException("Expected hello, got: " +
msg != null ? msg.getClass().getName() : null);
}
if (msg instanceof SetAppId) {
SetAppId set = (SetAppId) msg;
handle.setAppId(set.appId);
} else if (msg instanceof SetState) {
handle.setState(((SetState)msg).state);
} else {
throw new IllegalArgumentException("Invalid message: " +
msg != null ? msg.getClass().getName() : null);
}
}
} catch (Exception e) {
LOG.log(Level.INFO, "Error handling message from client.", e);
if (timeout != null) {
timeout.cancel();
}
close();
if (handle != null) {
handle.dispose();
}
} finally {
timeoutTimer.purge();
}
}
@Override
public void close() throws IOException {
if (!isOpen()) {
return;
}
synchronized (clients) {
clients.remove(this);
}
super.close();
}
/**
* Wait for the remote side to close the connection so that any pending data is processed.
* This ensures any changes reported by the child application take effect.
*
* This method allows a short period for the above to happen (same amount of time as the
* connection timeout, which is configurable). This should be fine for well-behaved
* applications, where they close the connection arond the same time the app handle detects the
* app has finished.
*
* In case the connection is not closed within the grace period, this method forcefully closes
* it and any subsequent data that may arrive will be ignored.
*/
public void waitForClose() throws IOException {
Thread connThread = this.connectionThread;
if (Thread.currentThread() != connThread) {
try {
connThread.join(getConnectionTimeout());
} catch (InterruptedException ie) {
// Ignore.
}
if (connThread.isAlive()) {
LOG.log(Level.WARNING, "Timed out waiting for child connection to close.");
close();
}
}
}
}
}
| 9,567 |
0 | Create_ds/spark/launcher/src/main/java/org/apache/spark | Create_ds/spark/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitOptionParser.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Parser for spark-submit command line options.
* <p>
* This class encapsulates the parsing code for spark-submit command line options, so that there
* is a single list of options that needs to be maintained (well, sort of, but it makes it harder
* to break things).
*/
class SparkSubmitOptionParser {
// The following constants define the "main" name for the available options. They're defined
// to avoid copy & paste of the raw strings where they're needed.
//
// The fields are not static so that they're exposed to Scala code that uses this class. See
// SparkSubmitArguments.scala. That is also why this class is not abstract - to allow code to
// easily use these constants without having to create dummy implementations of this class.
protected final String CLASS = "--class";
protected final String CONF = "--conf";
protected final String DEPLOY_MODE = "--deploy-mode";
protected final String DRIVER_CLASS_PATH = "--driver-class-path";
protected final String DRIVER_CORES = "--driver-cores";
protected final String DRIVER_JAVA_OPTIONS = "--driver-java-options";
protected final String DRIVER_LIBRARY_PATH = "--driver-library-path";
protected final String DRIVER_MEMORY = "--driver-memory";
protected final String EXECUTOR_MEMORY = "--executor-memory";
protected final String FILES = "--files";
protected final String JARS = "--jars";
protected final String KILL_SUBMISSION = "--kill";
protected final String MASTER = "--master";
protected final String NAME = "--name";
protected final String PACKAGES = "--packages";
protected final String PACKAGES_EXCLUDE = "--exclude-packages";
protected final String PROPERTIES_FILE = "--properties-file";
protected final String PROXY_USER = "--proxy-user";
protected final String PY_FILES = "--py-files";
protected final String REPOSITORIES = "--repositories";
protected final String STATUS = "--status";
protected final String TOTAL_EXECUTOR_CORES = "--total-executor-cores";
// Options that do not take arguments.
protected final String HELP = "--help";
protected final String SUPERVISE = "--supervise";
protected final String USAGE_ERROR = "--usage-error";
protected final String VERBOSE = "--verbose";
protected final String VERSION = "--version";
// Standalone-only options.
// YARN-only options.
protected final String ARCHIVES = "--archives";
protected final String EXECUTOR_CORES = "--executor-cores";
protected final String KEYTAB = "--keytab";
protected final String NUM_EXECUTORS = "--num-executors";
protected final String PRINCIPAL = "--principal";
protected final String QUEUE = "--queue";
/**
* This is the canonical list of spark-submit options. Each entry in the array contains the
* different aliases for the same option; the first element of each entry is the "official"
* name of the option, passed to {@link #handle(String, String)}.
* <p>
* Options not listed here nor in the "switch" list below will result in a call to
* {@link #handleUnknown(String)}.
* <p>
* These two arrays are visible for tests.
*/
final String[][] opts = {
{ ARCHIVES },
{ CLASS },
{ CONF, "-c" },
{ DEPLOY_MODE },
{ DRIVER_CLASS_PATH },
{ DRIVER_CORES },
{ DRIVER_JAVA_OPTIONS },
{ DRIVER_LIBRARY_PATH },
{ DRIVER_MEMORY },
{ EXECUTOR_CORES },
{ EXECUTOR_MEMORY },
{ FILES },
{ JARS },
{ KEYTAB },
{ KILL_SUBMISSION },
{ MASTER },
{ NAME },
{ NUM_EXECUTORS },
{ PACKAGES },
{ PACKAGES_EXCLUDE },
{ PRINCIPAL },
{ PROPERTIES_FILE },
{ PROXY_USER },
{ PY_FILES },
{ QUEUE },
{ REPOSITORIES },
{ STATUS },
{ TOTAL_EXECUTOR_CORES },
};
/**
* List of switches (command line options that do not take parameters) recognized by spark-submit.
*/
final String[][] switches = {
{ HELP, "-h" },
{ SUPERVISE },
{ USAGE_ERROR },
{ VERBOSE, "-v" },
{ VERSION },
};
/**
* Parse a list of spark-submit command line options.
* <p>
* See SparkSubmitArguments.scala for a more formal description of available options.
*
* @throws IllegalArgumentException If an error is found during parsing.
*/
protected final void parse(List<String> args) {
Pattern eqSeparatedOpt = Pattern.compile("(--[^=]+)=(.+)");
int idx = 0;
for (idx = 0; idx < args.size(); idx++) {
String arg = args.get(idx);
String value = null;
Matcher m = eqSeparatedOpt.matcher(arg);
if (m.matches()) {
arg = m.group(1);
value = m.group(2);
}
// Look for options with a value.
String name = findCliOption(arg, opts);
if (name != null) {
if (value == null) {
if (idx == args.size() - 1) {
throw new IllegalArgumentException(
String.format("Missing argument for option '%s'.", arg));
}
idx++;
value = args.get(idx);
}
if (!handle(name, value)) {
break;
}
continue;
}
// Look for a switch.
name = findCliOption(arg, switches);
if (name != null) {
if (!handle(name, null)) {
break;
}
continue;
}
if (!handleUnknown(arg)) {
break;
}
}
if (idx < args.size()) {
idx++;
}
handleExtraArgs(args.subList(idx, args.size()));
}
/**
* Callback for when an option with an argument is parsed.
*
* @param opt The long name of the cli option (might differ from actual command line).
* @param value The value. This will be <i>null</i> if the option does not take a value.
* @return Whether to continue parsing the argument list.
*/
protected boolean handle(String opt, String value) {
throw new UnsupportedOperationException();
}
/**
* Callback for when an unrecognized option is parsed.
*
* @param opt Unrecognized option from the command line.
* @return Whether to continue parsing the argument list.
*/
protected boolean handleUnknown(String opt) {
throw new UnsupportedOperationException();
}
/**
* Callback for remaining command line arguments after either {@link #handle(String, String)} or
* {@link #handleUnknown(String)} return "false". This will be called at the end of parsing even
* when there are no remaining arguments.
*
* @param extra List of remaining arguments.
*/
protected void handleExtraArgs(List<String> extra) {
throw new UnsupportedOperationException();
}
private String findCliOption(String name, String[][] available) {
for (String[] candidates : available) {
for (String candidate : candidates) {
if (candidate.equals(name)) {
return candidates[0];
}
}
}
return null;
}
}
| 9,568 |
0 | Create_ds/spark/launcher/src/main/java/org/apache/spark | Create_ds/spark/launcher/src/main/java/org/apache/spark/launcher/AbstractAppHandle.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.atomic.AtomicReference;
import java.util.logging.Level;
import java.util.logging.Logger;
abstract class AbstractAppHandle implements SparkAppHandle {
private static final Logger LOG = Logger.getLogger(AbstractAppHandle.class.getName());
private final LauncherServer server;
private LauncherServer.ServerConnection connection;
private List<Listener> listeners;
private AtomicReference<State> state;
private volatile String appId;
private volatile boolean disposed;
protected AbstractAppHandle(LauncherServer server) {
this.server = server;
this.state = new AtomicReference<>(State.UNKNOWN);
}
@Override
public synchronized void addListener(Listener l) {
if (listeners == null) {
listeners = new CopyOnWriteArrayList<>();
}
listeners.add(l);
}
@Override
public State getState() {
return state.get();
}
@Override
public String getAppId() {
return appId;
}
@Override
public void stop() {
CommandBuilderUtils.checkState(connection != null, "Application is still not connected.");
try {
connection.send(new LauncherProtocol.Stop());
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
@Override
public synchronized void disconnect() {
if (connection != null && connection.isOpen()) {
try {
connection.close();
} catch (IOException ioe) {
// no-op.
}
}
dispose();
}
void setConnection(LauncherServer.ServerConnection connection) {
this.connection = connection;
}
LauncherConnection getConnection() {
return connection;
}
boolean isDisposed() {
return disposed;
}
/**
* Mark the handle as disposed, and set it as LOST in case the current state is not final.
*
* This method should be called only when there's a reasonable expectation that the communication
* with the child application is not needed anymore, either because the code managing the handle
* has said so, or because the child application is finished.
*/
synchronized void dispose() {
if (!isDisposed()) {
// First wait for all data from the connection to be read. Then unregister the handle.
// Otherwise, unregistering might cause the server to be stopped and all child connections
// to be closed.
if (connection != null) {
try {
connection.waitForClose();
} catch (IOException ioe) {
// no-op.
}
}
server.unregister(this);
// Set state to LOST if not yet final.
setState(State.LOST, false);
this.disposed = true;
}
}
void setState(State s) {
setState(s, false);
}
void setState(State s, boolean force) {
if (force) {
state.set(s);
fireEvent(false);
return;
}
State current = state.get();
while (!current.isFinal()) {
if (state.compareAndSet(current, s)) {
fireEvent(false);
return;
}
current = state.get();
}
if (s != State.LOST) {
LOG.log(Level.WARNING, "Backend requested transition from final state {0} to {1}.",
new Object[] { current, s });
}
}
void setAppId(String appId) {
this.appId = appId;
fireEvent(true);
}
private void fireEvent(boolean isInfoChanged) {
if (listeners != null) {
for (Listener l : listeners) {
if (isInfoChanged) {
l.infoChanged(this);
} else {
l.stateChanged(this);
}
}
}
}
}
| 9,569 |
0 | Create_ds/spark/launcher/src/main/java/org/apache/spark | Create_ds/spark/launcher/src/main/java/org/apache/spark/launcher/FilteredObjectInputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.io.InputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectStreamClass;
import java.util.Arrays;
import java.util.List;
/**
* An object input stream that only allows classes used by the launcher protocol to be in the
* serialized stream. See SPARK-20922.
*/
class FilteredObjectInputStream extends ObjectInputStream {
private static final List<String> ALLOWED_PACKAGES = Arrays.asList(
"org.apache.spark.launcher.",
"java.lang.");
FilteredObjectInputStream(InputStream is) throws IOException {
super(is);
}
@Override
protected Class<?> resolveClass(ObjectStreamClass desc)
throws IOException, ClassNotFoundException {
boolean isValid = ALLOWED_PACKAGES.stream().anyMatch(p -> desc.getName().startsWith(p));
if (!isValid) {
throw new IllegalArgumentException(
String.format("Unexpected class in stream: %s", desc.getName()));
}
return super.resolveClass(desc);
}
}
| 9,570 |
0 | Create_ds/spark/launcher/src/main/java/org/apache/spark | Create_ds/spark/launcher/src/main/java/org/apache/spark/launcher/LauncherConnection.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.io.Closeable;
import java.io.EOFException;
import java.io.IOException;
import java.io.ObjectOutputStream;
import java.net.Socket;
import java.util.logging.Level;
import java.util.logging.Logger;
import static org.apache.spark.launcher.LauncherProtocol.*;
/**
* Encapsulates a connection between a launcher server and client. This takes care of the
* communication (sending and receiving messages), while processing of messages is left for
* the implementations.
*/
abstract class LauncherConnection implements Closeable, Runnable {
private static final Logger LOG = Logger.getLogger(LauncherConnection.class.getName());
private final Socket socket;
private final ObjectOutputStream out;
private volatile boolean closed;
LauncherConnection(Socket socket) throws IOException {
this.socket = socket;
this.out = new ObjectOutputStream(socket.getOutputStream());
this.closed = false;
}
protected abstract void handle(Message msg) throws IOException;
@Override
public void run() {
try {
FilteredObjectInputStream in = new FilteredObjectInputStream(socket.getInputStream());
while (isOpen()) {
Message msg = (Message) in.readObject();
handle(msg);
}
} catch (EOFException eof) {
// Remote side has closed the connection, just cleanup.
try {
close();
} catch (Exception unused) {
// no-op.
}
} catch (Exception e) {
if (!closed) {
LOG.log(Level.WARNING, "Error in inbound message handling.", e);
try {
close();
} catch (Exception unused) {
// no-op.
}
}
}
}
protected synchronized void send(Message msg) throws IOException {
try {
CommandBuilderUtils.checkState(!closed, "Disconnected.");
out.writeObject(msg);
out.flush();
} catch (IOException ioe) {
if (!closed) {
LOG.log(Level.WARNING, "Error when sending message.", ioe);
try {
close();
} catch (Exception unused) {
// no-op.
}
}
throw ioe;
}
}
@Override
public synchronized void close() throws IOException {
if (isOpen()) {
closed = true;
socket.close();
}
}
boolean isOpen() {
return !closed;
}
}
| 9,571 |
0 | Create_ds/spark/launcher/src/main/java/org/apache/spark | Create_ds/spark/launcher/src/main/java/org/apache/spark/launcher/InProcessAppHandle.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.lang.reflect.Method;
import java.util.concurrent.atomic.AtomicLong;
import java.util.logging.Level;
import java.util.logging.Logger;
class InProcessAppHandle extends AbstractAppHandle {
private static final String THREAD_NAME_FMT = "spark-app-%d: '%s'";
private static final Logger LOG = Logger.getLogger(InProcessAppHandle.class.getName());
private static final AtomicLong THREAD_IDS = new AtomicLong();
// Avoid really long thread names.
private static final int MAX_APP_NAME_LEN = 16;
private Thread app;
InProcessAppHandle(LauncherServer server) {
super(server);
}
@Override
public synchronized void kill() {
if (!isDisposed()) {
LOG.warning("kill() may leave the underlying app running in in-process mode.");
setState(State.KILLED);
disconnect();
// Interrupt the thread. This is not guaranteed to kill the app, though.
if (app != null) {
app.interrupt();
}
}
}
synchronized void start(String appName, Method main, String[] args) {
CommandBuilderUtils.checkState(app == null, "Handle already started.");
if (appName.length() > MAX_APP_NAME_LEN) {
appName = "..." + appName.substring(appName.length() - MAX_APP_NAME_LEN);
}
app = new Thread(() -> {
try {
main.invoke(null, (Object) args);
} catch (Throwable t) {
LOG.log(Level.WARNING, "Application failed with exception.", t);
setState(State.FAILED);
}
dispose();
});
app.setName(String.format(THREAD_NAME_FMT, THREAD_IDS.incrementAndGet(), appName));
app.start();
}
}
| 9,572 |
0 | Create_ds/spark/launcher/src/main/java/org/apache/spark | Create_ds/spark/launcher/src/main/java/org/apache/spark/launcher/OutputRedirector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.io.BufferedReader;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.concurrent.ThreadFactory;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Redirects lines read from a given input stream to a j.u.l.Logger (at INFO level).
*/
class OutputRedirector {
private final BufferedReader reader;
private final Logger sink;
private final Thread thread;
private final ChildProcAppHandle callback;
private volatile boolean active;
OutputRedirector(InputStream in, String loggerName, ThreadFactory tf) {
this(in, loggerName, tf, null);
}
OutputRedirector(
InputStream in,
String loggerName,
ThreadFactory tf,
ChildProcAppHandle callback) {
this.active = true;
this.reader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8));
this.thread = tf.newThread(this::redirect);
this.sink = Logger.getLogger(loggerName);
this.callback = callback;
thread.start();
}
private void redirect() {
try {
String line;
while ((line = reader.readLine()) != null) {
if (active) {
sink.info(line.replaceFirst("\\s*$", ""));
}
}
} catch (IOException e) {
sink.log(Level.FINE, "Error reading child process output.", e);
} finally {
if (callback != null) {
callback.monitorChild();
}
}
}
/**
* This method just stops the output of the process from showing up in the local logs.
* The child's output will still be read (and, thus, the redirect thread will still be
* alive) to avoid the child process hanging because of lack of output buffer.
*/
void stop() {
active = false;
}
boolean isAlive() {
return thread.isAlive();
}
}
| 9,573 |
0 | Create_ds/spark/launcher/src/main/java/org/apache/spark | Create_ds/spark/launcher/src/main/java/org/apache/spark/launcher/AbstractLauncher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import static org.apache.spark.launcher.CommandBuilderUtils.*;
/**
* Base class for launcher implementations.
*
* @since Spark 2.3.0
*/
public abstract class AbstractLauncher<T extends AbstractLauncher<T>> {
final SparkSubmitCommandBuilder builder;
AbstractLauncher() {
this.builder = new SparkSubmitCommandBuilder();
}
/**
* Set a custom properties file with Spark configuration for the application.
*
* @param path Path to custom properties file to use.
* @return This launcher.
*/
public T setPropertiesFile(String path) {
checkNotNull(path, "path");
builder.setPropertiesFile(path);
return self();
}
/**
* Set a single configuration value for the application.
*
* @param key Configuration key.
* @param value The value to use.
* @return This launcher.
*/
public T setConf(String key, String value) {
checkNotNull(key, "key");
checkNotNull(value, "value");
checkArgument(key.startsWith("spark."), "'key' must start with 'spark.'");
builder.conf.put(key, value);
return self();
}
/**
* Set the application name.
*
* @param appName Application name.
* @return This launcher.
*/
public T setAppName(String appName) {
checkNotNull(appName, "appName");
builder.appName = appName;
return self();
}
/**
* Set the Spark master for the application.
*
* @param master Spark master.
* @return This launcher.
*/
public T setMaster(String master) {
checkNotNull(master, "master");
builder.master = master;
return self();
}
/**
* Set the deploy mode for the application.
*
* @param mode Deploy mode.
* @return This launcher.
*/
public T setDeployMode(String mode) {
checkNotNull(mode, "mode");
builder.deployMode = mode;
return self();
}
/**
* Set the main application resource. This should be the location of a jar file for Scala/Java
* applications, or a python script for PySpark applications.
*
* @param resource Path to the main application resource.
* @return This launcher.
*/
public T setAppResource(String resource) {
checkNotNull(resource, "resource");
builder.appResource = resource;
return self();
}
/**
* Sets the application class name for Java/Scala applications.
*
* @param mainClass Application's main class.
* @return This launcher.
*/
public T setMainClass(String mainClass) {
checkNotNull(mainClass, "mainClass");
builder.mainClass = mainClass;
return self();
}
/**
* Adds a no-value argument to the Spark invocation. If the argument is known, this method
* validates whether the argument is indeed a no-value argument, and throws an exception
* otherwise.
* <p>
* Use this method with caution. It is possible to create an invalid Spark command by passing
* unknown arguments to this method, since those are allowed for forward compatibility.
*
* @since 1.5.0
* @param arg Argument to add.
* @return This launcher.
*/
public T addSparkArg(String arg) {
SparkSubmitOptionParser validator = new ArgumentValidator(false);
validator.parse(Arrays.asList(arg));
builder.userArgs.add(arg);
return self();
}
/**
* Adds an argument with a value to the Spark invocation. If the argument name corresponds to
* a known argument, the code validates that the argument actually expects a value, and throws
* an exception otherwise.
* <p>
* It is safe to add arguments modified by other methods in this class (such as
* {@link #setMaster(String)} - the last invocation will be the one to take effect.
* <p>
* Use this method with caution. It is possible to create an invalid Spark command by passing
* unknown arguments to this method, since those are allowed for forward compatibility.
*
* @since 1.5.0
* @param name Name of argument to add.
* @param value Value of the argument.
* @return This launcher.
*/
public T addSparkArg(String name, String value) {
SparkSubmitOptionParser validator = new ArgumentValidator(true);
if (validator.MASTER.equals(name)) {
setMaster(value);
} else if (validator.PROPERTIES_FILE.equals(name)) {
setPropertiesFile(value);
} else if (validator.CONF.equals(name)) {
String[] vals = value.split("=", 2);
setConf(vals[0], vals[1]);
} else if (validator.CLASS.equals(name)) {
setMainClass(value);
} else if (validator.JARS.equals(name)) {
builder.jars.clear();
for (String jar : value.split(",")) {
addJar(jar);
}
} else if (validator.FILES.equals(name)) {
builder.files.clear();
for (String file : value.split(",")) {
addFile(file);
}
} else if (validator.PY_FILES.equals(name)) {
builder.pyFiles.clear();
for (String file : value.split(",")) {
addPyFile(file);
}
} else {
validator.parse(Arrays.asList(name, value));
builder.userArgs.add(name);
builder.userArgs.add(value);
}
return self();
}
/**
* Adds command line arguments for the application.
*
* @param args Arguments to pass to the application's main class.
* @return This launcher.
*/
public T addAppArgs(String... args) {
for (String arg : args) {
checkNotNull(arg, "arg");
builder.appArgs.add(arg);
}
return self();
}
/**
* Adds a jar file to be submitted with the application.
*
* @param jar Path to the jar file.
* @return This launcher.
*/
public T addJar(String jar) {
checkNotNull(jar, "jar");
builder.jars.add(jar);
return self();
}
/**
* Adds a file to be submitted with the application.
*
* @param file Path to the file.
* @return This launcher.
*/
public T addFile(String file) {
checkNotNull(file, "file");
builder.files.add(file);
return self();
}
/**
* Adds a python file / zip / egg to be submitted with the application.
*
* @param file Path to the file.
* @return This launcher.
*/
public T addPyFile(String file) {
checkNotNull(file, "file");
builder.pyFiles.add(file);
return self();
}
/**
* Enables verbose reporting for SparkSubmit.
*
* @param verbose Whether to enable verbose output.
* @return This launcher.
*/
public T setVerbose(boolean verbose) {
builder.verbose = verbose;
return self();
}
/**
* Starts a Spark application.
*
* <p>
* This method returns a handle that provides information about the running application and can
* be used to do basic interaction with it.
* <p>
* The returned handle assumes that the application will instantiate a single SparkContext
* during its lifetime. Once that context reports a final state (one that indicates the
* SparkContext has stopped), the handle will not perform new state transitions, so anything
* that happens after that cannot be monitored. If the underlying application is launched as
* a child process, {@link SparkAppHandle#kill()} can still be used to kill the child process.
*
* @since 1.6.0
* @param listeners Listeners to add to the handle before the app is launched.
* @return A handle for the launched application.
*/
public abstract SparkAppHandle startApplication(SparkAppHandle.Listener... listeners)
throws IOException;
abstract T self();
private static class ArgumentValidator extends SparkSubmitOptionParser {
private final boolean hasValue;
ArgumentValidator(boolean hasValue) {
this.hasValue = hasValue;
}
@Override
protected boolean handle(String opt, String value) {
if (value == null && hasValue) {
throw new IllegalArgumentException(String.format("'%s' expects a value.", opt));
}
return true;
}
@Override
protected boolean handleUnknown(String opt) {
// Do not fail on unknown arguments, to support future arguments added to SparkSubmit.
return true;
}
protected void handleExtraArgs(List<String> extra) {
// No op.
}
}
}
| 9,574 |
0 | Create_ds/spark/launcher/src/main/java/org/apache/spark | Create_ds/spark/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.io.File;
import java.io.IOException;
import java.util.*;
import static org.apache.spark.launcher.CommandBuilderUtils.*;
/**
* Special command builder for handling a CLI invocation of SparkSubmit.
* <p>
* This builder adds command line parsing compatible with SparkSubmit. It handles setting
* driver-side options and special parsing behavior needed for the special-casing certain internal
* Spark applications.
* <p>
* This class has also some special features to aid launching shells (pyspark and sparkR) and also
* examples.
*/
class SparkSubmitCommandBuilder extends AbstractCommandBuilder {
/**
* Name of the app resource used to identify the PySpark shell. The command line parser expects
* the resource name to be the very first argument to spark-submit in this case.
*
* NOTE: this cannot be "pyspark-shell" since that identifies the PySpark shell to SparkSubmit
* (see java_gateway.py), and can cause this code to enter into an infinite loop.
*/
static final String PYSPARK_SHELL = "pyspark-shell-main";
/**
* This is the actual resource name that identifies the PySpark shell to SparkSubmit.
*/
static final String PYSPARK_SHELL_RESOURCE = "pyspark-shell";
/**
* Name of the app resource used to identify the SparkR shell. The command line parser expects
* the resource name to be the very first argument to spark-submit in this case.
*
* NOTE: this cannot be "sparkr-shell" since that identifies the SparkR shell to SparkSubmit
* (see sparkR.R), and can cause this code to enter into an infinite loop.
*/
static final String SPARKR_SHELL = "sparkr-shell-main";
/**
* This is the actual resource name that identifies the SparkR shell to SparkSubmit.
*/
static final String SPARKR_SHELL_RESOURCE = "sparkr-shell";
/**
* Name of app resource used to identify examples. When running examples, args[0] should be
* this name. The app resource will identify the example class to run.
*/
static final String RUN_EXAMPLE = "run-example";
/**
* Prefix for example class names.
*/
static final String EXAMPLE_CLASS_PREFIX = "org.apache.spark.examples.";
/**
* This map must match the class names for available special classes, since this modifies the way
* command line parsing works. This maps the class name to the resource to use when calling
* spark-submit.
*/
private static final Map<String, String> specialClasses = new HashMap<>();
static {
specialClasses.put("org.apache.spark.repl.Main", "spark-shell");
specialClasses.put("org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver",
SparkLauncher.NO_RESOURCE);
specialClasses.put("org.apache.spark.sql.hive.thriftserver.HiveThriftServer2",
SparkLauncher.NO_RESOURCE);
}
final List<String> userArgs;
private final List<String> parsedArgs;
// Special command means no appResource and no mainClass required
private final boolean isSpecialCommand;
private final boolean isExample;
/**
* Controls whether mixing spark-submit arguments with app arguments is allowed. This is needed
* to parse the command lines for things like bin/spark-shell, which allows users to mix and
* match arguments (e.g. "bin/spark-shell SparkShellArg --master foo").
*/
private boolean allowsMixedArguments;
/**
* This constructor is used when creating a user-configurable launcher. It allows the
* spark-submit argument list to be modified after creation.
*/
SparkSubmitCommandBuilder() {
this.isSpecialCommand = false;
this.isExample = false;
this.parsedArgs = new ArrayList<>();
this.userArgs = new ArrayList<>();
}
/**
* This constructor is used when invoking spark-submit; it parses and validates arguments
* provided by the user on the command line.
*/
SparkSubmitCommandBuilder(List<String> args) {
this.allowsMixedArguments = false;
this.parsedArgs = new ArrayList<>();
boolean isExample = false;
List<String> submitArgs = args;
this.userArgs = Collections.emptyList();
if (args.size() > 0) {
switch (args.get(0)) {
case PYSPARK_SHELL:
this.allowsMixedArguments = true;
appResource = PYSPARK_SHELL;
submitArgs = args.subList(1, args.size());
break;
case SPARKR_SHELL:
this.allowsMixedArguments = true;
appResource = SPARKR_SHELL;
submitArgs = args.subList(1, args.size());
break;
case RUN_EXAMPLE:
isExample = true;
appResource = SparkLauncher.NO_RESOURCE;
submitArgs = args.subList(1, args.size());
}
this.isExample = isExample;
OptionParser parser = new OptionParser(true);
parser.parse(submitArgs);
this.isSpecialCommand = parser.isSpecialCommand;
} else {
this.isExample = isExample;
this.isSpecialCommand = true;
}
}
@Override
public List<String> buildCommand(Map<String, String> env)
throws IOException, IllegalArgumentException {
if (PYSPARK_SHELL.equals(appResource) && !isSpecialCommand) {
return buildPySparkShellCommand(env);
} else if (SPARKR_SHELL.equals(appResource) && !isSpecialCommand) {
return buildSparkRCommand(env);
} else {
return buildSparkSubmitCommand(env);
}
}
List<String> buildSparkSubmitArgs() {
List<String> args = new ArrayList<>();
OptionParser parser = new OptionParser(false);
final boolean isSpecialCommand;
// If the user args array is not empty, we need to parse it to detect exactly what
// the user is trying to run, so that checks below are correct.
if (!userArgs.isEmpty()) {
parser.parse(userArgs);
isSpecialCommand = parser.isSpecialCommand;
} else {
isSpecialCommand = this.isSpecialCommand;
}
if (!allowsMixedArguments && !isSpecialCommand) {
checkArgument(appResource != null, "Missing application resource.");
}
if (verbose) {
args.add(parser.VERBOSE);
}
if (master != null) {
args.add(parser.MASTER);
args.add(master);
}
if (deployMode != null) {
args.add(parser.DEPLOY_MODE);
args.add(deployMode);
}
if (appName != null) {
args.add(parser.NAME);
args.add(appName);
}
for (Map.Entry<String, String> e : conf.entrySet()) {
args.add(parser.CONF);
args.add(String.format("%s=%s", e.getKey(), e.getValue()));
}
if (propertiesFile != null) {
args.add(parser.PROPERTIES_FILE);
args.add(propertiesFile);
}
if (isExample) {
jars.addAll(findExamplesJars());
}
if (!jars.isEmpty()) {
args.add(parser.JARS);
args.add(join(",", jars));
}
if (!files.isEmpty()) {
args.add(parser.FILES);
args.add(join(",", files));
}
if (!pyFiles.isEmpty()) {
args.add(parser.PY_FILES);
args.add(join(",", pyFiles));
}
if (isExample && !isSpecialCommand) {
checkArgument(mainClass != null, "Missing example class name.");
}
if (mainClass != null) {
args.add(parser.CLASS);
args.add(mainClass);
}
args.addAll(parsedArgs);
if (appResource != null) {
args.add(appResource);
}
args.addAll(appArgs);
return args;
}
private List<String> buildSparkSubmitCommand(Map<String, String> env)
throws IOException, IllegalArgumentException {
// Load the properties file and check whether spark-submit will be running the app's driver
// or just launching a cluster app. When running the driver, the JVM's argument will be
// modified to cover the driver's configuration.
Map<String, String> config = getEffectiveConfig();
boolean isClientMode = isClientMode(config);
String extraClassPath = isClientMode ? config.get(SparkLauncher.DRIVER_EXTRA_CLASSPATH) : null;
List<String> cmd = buildJavaCommand(extraClassPath);
// Take Thrift Server as daemon
if (isThriftServer(mainClass)) {
addOptionString(cmd, System.getenv("SPARK_DAEMON_JAVA_OPTS"));
}
addOptionString(cmd, System.getenv("SPARK_SUBMIT_OPTS"));
// We don't want the client to specify Xmx. These have to be set by their corresponding
// memory flag --driver-memory or configuration entry spark.driver.memory
String driverExtraJavaOptions = config.get(SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS);
if (!isEmpty(driverExtraJavaOptions) && driverExtraJavaOptions.contains("Xmx")) {
String msg = String.format("Not allowed to specify max heap(Xmx) memory settings through " +
"java options (was %s). Use the corresponding --driver-memory or " +
"spark.driver.memory configuration instead.", driverExtraJavaOptions);
throw new IllegalArgumentException(msg);
}
if (isClientMode) {
// Figuring out where the memory value come from is a little tricky due to precedence.
// Precedence is observed in the following order:
// - explicit configuration (setConf()), which also covers --driver-memory cli argument.
// - properties file.
// - SPARK_DRIVER_MEMORY env variable
// - SPARK_MEM env variable
// - default value (1g)
// Take Thrift Server as daemon
String tsMemory =
isThriftServer(mainClass) ? System.getenv("SPARK_DAEMON_MEMORY") : null;
String memory = firstNonEmpty(tsMemory, config.get(SparkLauncher.DRIVER_MEMORY),
System.getenv("SPARK_DRIVER_MEMORY"), System.getenv("SPARK_MEM"), DEFAULT_MEM);
cmd.add("-Xmx" + memory);
addOptionString(cmd, driverExtraJavaOptions);
mergeEnvPathList(env, getLibPathEnvName(),
config.get(SparkLauncher.DRIVER_EXTRA_LIBRARY_PATH));
}
cmd.add("org.apache.spark.deploy.SparkSubmit");
cmd.addAll(buildSparkSubmitArgs());
return cmd;
}
private List<String> buildPySparkShellCommand(Map<String, String> env) throws IOException {
// For backwards compatibility, if a script is specified in
// the pyspark command line, then run it using spark-submit.
if (!appArgs.isEmpty() && appArgs.get(0).endsWith(".py")) {
System.err.println(
"Running python applications through 'pyspark' is not supported as of Spark 2.0.\n" +
"Use ./bin/spark-submit <python file>");
System.exit(-1);
}
checkArgument(appArgs.isEmpty(), "pyspark does not support any application options.");
// When launching the pyspark shell, the spark-submit arguments should be stored in the
// PYSPARK_SUBMIT_ARGS env variable.
appResource = PYSPARK_SHELL_RESOURCE;
constructEnvVarArgs(env, "PYSPARK_SUBMIT_ARGS");
// Will pick up the binary executable in the following order
// 1. conf spark.pyspark.driver.python
// 2. conf spark.pyspark.python
// 3. environment variable PYSPARK_DRIVER_PYTHON
// 4. environment variable PYSPARK_PYTHON
// 5. python
List<String> pyargs = new ArrayList<>();
pyargs.add(firstNonEmpty(conf.get(SparkLauncher.PYSPARK_DRIVER_PYTHON),
conf.get(SparkLauncher.PYSPARK_PYTHON),
System.getenv("PYSPARK_DRIVER_PYTHON"),
System.getenv("PYSPARK_PYTHON"),
"python"));
String pyOpts = System.getenv("PYSPARK_DRIVER_PYTHON_OPTS");
if (conf.containsKey(SparkLauncher.PYSPARK_PYTHON)) {
// pass conf spark.pyspark.python to python by environment variable.
env.put("PYSPARK_PYTHON", conf.get(SparkLauncher.PYSPARK_PYTHON));
}
if (!isEmpty(pyOpts)) {
pyargs.addAll(parseOptionString(pyOpts));
}
return pyargs;
}
private List<String> buildSparkRCommand(Map<String, String> env) throws IOException {
if (!appArgs.isEmpty() && appArgs.get(0).endsWith(".R")) {
System.err.println(
"Running R applications through 'sparkR' is not supported as of Spark 2.0.\n" +
"Use ./bin/spark-submit <R file>");
System.exit(-1);
}
// When launching the SparkR shell, store the spark-submit arguments in the SPARKR_SUBMIT_ARGS
// env variable.
appResource = SPARKR_SHELL_RESOURCE;
constructEnvVarArgs(env, "SPARKR_SUBMIT_ARGS");
// Set shell.R as R_PROFILE_USER to load the SparkR package when the shell comes up.
String sparkHome = System.getenv("SPARK_HOME");
env.put("R_PROFILE_USER",
join(File.separator, sparkHome, "R", "lib", "SparkR", "profile", "shell.R"));
List<String> args = new ArrayList<>();
args.add(firstNonEmpty(conf.get(SparkLauncher.SPARKR_R_SHELL),
System.getenv("SPARKR_DRIVER_R"), "R"));
return args;
}
private void constructEnvVarArgs(
Map<String, String> env,
String submitArgsEnvVariable) throws IOException {
mergeEnvPathList(env, getLibPathEnvName(),
getEffectiveConfig().get(SparkLauncher.DRIVER_EXTRA_LIBRARY_PATH));
StringBuilder submitArgs = new StringBuilder();
for (String arg : buildSparkSubmitArgs()) {
if (submitArgs.length() > 0) {
submitArgs.append(" ");
}
submitArgs.append(quoteForCommandString(arg));
}
env.put(submitArgsEnvVariable, submitArgs.toString());
}
boolean isClientMode(Map<String, String> userProps) {
String userMaster = firstNonEmpty(master, userProps.get(SparkLauncher.SPARK_MASTER));
String userDeployMode = firstNonEmpty(deployMode, userProps.get(SparkLauncher.DEPLOY_MODE));
// Default master is "local[*]", so assume client mode in that case
return userMaster == null ||
"client".equals(userDeployMode) ||
(!userMaster.equals("yarn-cluster") && userDeployMode == null);
}
/**
* Return whether the given main class represents a thrift server.
*/
private boolean isThriftServer(String mainClass) {
return (mainClass != null &&
mainClass.equals("org.apache.spark.sql.hive.thriftserver.HiveThriftServer2"));
}
private List<String> findExamplesJars() {
boolean isTesting = "1".equals(getenv("SPARK_TESTING"));
List<String> examplesJars = new ArrayList<>();
String sparkHome = getSparkHome();
File jarsDir;
if (new File(sparkHome, "RELEASE").isFile()) {
jarsDir = new File(sparkHome, "examples/jars");
} else {
jarsDir = new File(sparkHome,
String.format("examples/target/scala-%s/jars", getScalaVersion()));
}
boolean foundDir = jarsDir.isDirectory();
checkState(isTesting || foundDir, "Examples jars directory '%s' does not exist.",
jarsDir.getAbsolutePath());
if (foundDir) {
for (File f: jarsDir.listFiles()) {
examplesJars.add(f.getAbsolutePath());
}
}
return examplesJars;
}
private class OptionParser extends SparkSubmitOptionParser {
boolean isSpecialCommand = false;
private final boolean errorOnUnknownArgs;
OptionParser(boolean errorOnUnknownArgs) {
this.errorOnUnknownArgs = errorOnUnknownArgs;
}
@Override
protected boolean handle(String opt, String value) {
switch (opt) {
case MASTER:
master = value;
break;
case DEPLOY_MODE:
deployMode = value;
break;
case PROPERTIES_FILE:
propertiesFile = value;
break;
case DRIVER_MEMORY:
conf.put(SparkLauncher.DRIVER_MEMORY, value);
break;
case DRIVER_JAVA_OPTIONS:
conf.put(SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS, value);
break;
case DRIVER_LIBRARY_PATH:
conf.put(SparkLauncher.DRIVER_EXTRA_LIBRARY_PATH, value);
break;
case DRIVER_CLASS_PATH:
conf.put(SparkLauncher.DRIVER_EXTRA_CLASSPATH, value);
break;
case CONF:
String[] setConf = value.split("=", 2);
checkArgument(setConf.length == 2, "Invalid argument to %s: %s", CONF, value);
conf.put(setConf[0], setConf[1]);
break;
case CLASS:
// The special classes require some special command line handling, since they allow
// mixing spark-submit arguments with arguments that should be propagated to the shell
// itself. Note that for this to work, the "--class" argument must come before any
// non-spark-submit arguments.
mainClass = value;
if (specialClasses.containsKey(value)) {
allowsMixedArguments = true;
appResource = specialClasses.get(value);
}
break;
case KILL_SUBMISSION:
case STATUS:
isSpecialCommand = true;
parsedArgs.add(opt);
parsedArgs.add(value);
break;
case HELP:
case USAGE_ERROR:
case VERSION:
isSpecialCommand = true;
parsedArgs.add(opt);
break;
default:
parsedArgs.add(opt);
if (value != null) {
parsedArgs.add(value);
}
break;
}
return true;
}
@Override
protected boolean handleUnknown(String opt) {
// When mixing arguments, add unrecognized parameters directly to the user arguments list. In
// normal mode, any unrecognized parameter triggers the end of command line parsing, and the
// parameter itself will be interpreted by SparkSubmit as the application resource. The
// remaining params will be appended to the list of SparkSubmit arguments.
if (allowsMixedArguments) {
appArgs.add(opt);
return true;
} else if (isExample) {
String className = opt;
if (!className.startsWith(EXAMPLE_CLASS_PREFIX)) {
className = EXAMPLE_CLASS_PREFIX + className;
}
mainClass = className;
appResource = SparkLauncher.NO_RESOURCE;
return false;
} else if (errorOnUnknownArgs) {
checkArgument(!opt.startsWith("-"), "Unrecognized option: %s", opt);
checkState(appResource == null, "Found unrecognized argument but resource is already set.");
appResource = opt;
return false;
}
return true;
}
@Override
protected void handleExtraArgs(List<String> extra) {
appArgs.addAll(extra);
}
}
}
| 9,575 |
0 | Create_ds/spark/launcher/src/main/java/org/apache/spark | Create_ds/spark/launcher/src/main/java/org/apache/spark/launcher/Main.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.apache.spark.launcher.CommandBuilderUtils.*;
/**
* Command line interface for the Spark launcher. Used internally by Spark scripts.
*/
class Main {
/**
* Usage: Main [class] [class args]
* <p>
* This CLI works in two different modes:
* <ul>
* <li>"spark-submit": if <i>class</i> is "org.apache.spark.deploy.SparkSubmit", the
* {@link SparkLauncher} class is used to launch a Spark application.</li>
* <li>"spark-class": if another class is provided, an internal Spark class is run.</li>
* </ul>
*
* This class works in tandem with the "bin/spark-class" script on Unix-like systems, and
* "bin/spark-class2.cmd" batch script on Windows to execute the final command.
* <p>
* On Unix-like systems, the output is a list of command arguments, separated by the NULL
* character. On Windows, the output is a command line suitable for direct execution from the
* script.
*/
public static void main(String[] argsArray) throws Exception {
checkArgument(argsArray.length > 0, "Not enough arguments: missing class name.");
List<String> args = new ArrayList<>(Arrays.asList(argsArray));
String className = args.remove(0);
boolean printLaunchCommand = !isEmpty(System.getenv("SPARK_PRINT_LAUNCH_COMMAND"));
Map<String, String> env = new HashMap<>();
List<String> cmd;
if (className.equals("org.apache.spark.deploy.SparkSubmit")) {
try {
AbstractCommandBuilder builder = new SparkSubmitCommandBuilder(args);
cmd = buildCommand(builder, env, printLaunchCommand);
} catch (IllegalArgumentException e) {
printLaunchCommand = false;
System.err.println("Error: " + e.getMessage());
System.err.println();
MainClassOptionParser parser = new MainClassOptionParser();
try {
parser.parse(args);
} catch (Exception ignored) {
// Ignore parsing exceptions.
}
List<String> help = new ArrayList<>();
if (parser.className != null) {
help.add(parser.CLASS);
help.add(parser.className);
}
help.add(parser.USAGE_ERROR);
AbstractCommandBuilder builder = new SparkSubmitCommandBuilder(help);
cmd = buildCommand(builder, env, printLaunchCommand);
}
} else {
AbstractCommandBuilder builder = new SparkClassCommandBuilder(className, args);
cmd = buildCommand(builder, env, printLaunchCommand);
}
if (isWindows()) {
System.out.println(prepareWindowsCommand(cmd, env));
} else {
// In bash, use NULL as the arg separator since it cannot be used in an argument.
List<String> bashCmd = prepareBashCommand(cmd, env);
for (String c : bashCmd) {
System.out.print(c);
System.out.print('\0');
}
}
}
/**
* Prepare spark commands with the appropriate command builder.
* If printLaunchCommand is set then the commands will be printed to the stderr.
*/
private static List<String> buildCommand(
AbstractCommandBuilder builder,
Map<String, String> env,
boolean printLaunchCommand) throws IOException, IllegalArgumentException {
List<String> cmd = builder.buildCommand(env);
if (printLaunchCommand) {
System.err.println("Spark Command: " + join(" ", cmd));
System.err.println("========================================");
}
return cmd;
}
/**
* Prepare a command line for execution from a Windows batch script.
*
* The method quotes all arguments so that spaces are handled as expected. Quotes within arguments
* are "double quoted" (which is batch for escaping a quote). This page has more details about
* quoting and other batch script fun stuff: http://ss64.com/nt/syntax-esc.html
*/
private static String prepareWindowsCommand(List<String> cmd, Map<String, String> childEnv) {
StringBuilder cmdline = new StringBuilder();
for (Map.Entry<String, String> e : childEnv.entrySet()) {
cmdline.append(String.format("set %s=%s", e.getKey(), e.getValue()));
cmdline.append(" && ");
}
for (String arg : cmd) {
cmdline.append(quoteForBatchScript(arg));
cmdline.append(" ");
}
return cmdline.toString();
}
/**
* Prepare the command for execution from a bash script. The final command will have commands to
* set up any needed environment variables needed by the child process.
*/
private static List<String> prepareBashCommand(List<String> cmd, Map<String, String> childEnv) {
if (childEnv.isEmpty()) {
return cmd;
}
List<String> newCmd = new ArrayList<>();
newCmd.add("env");
for (Map.Entry<String, String> e : childEnv.entrySet()) {
newCmd.add(String.format("%s=%s", e.getKey(), e.getValue()));
}
newCmd.addAll(cmd);
return newCmd;
}
/**
* A parser used when command line parsing fails for spark-submit. It's used as a best-effort
* at trying to identify the class the user wanted to invoke, since that may require special
* usage strings (handled by SparkSubmitArguments).
*/
private static class MainClassOptionParser extends SparkSubmitOptionParser {
String className;
@Override
protected boolean handle(String opt, String value) {
if (CLASS.equals(opt)) {
className = value;
}
return false;
}
@Override
protected boolean handleUnknown(String opt) {
return false;
}
@Override
protected void handleExtraArgs(List<String> extra) {
}
}
}
| 9,576 |
0 | Create_ds/spark/launcher/src/main/java/org/apache/spark | Create_ds/spark/launcher/src/main/java/org/apache/spark/launcher/CommandBuilderUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* Helper methods for command builders.
*/
class CommandBuilderUtils {
static final String DEFAULT_MEM = "1g";
static final String DEFAULT_PROPERTIES_FILE = "spark-defaults.conf";
static final String ENV_SPARK_HOME = "SPARK_HOME";
/** The set of known JVM vendors. */
enum JavaVendor {
Oracle, IBM, OpenJDK, Unknown
}
/** Returns whether the given string is null or empty. */
static boolean isEmpty(String s) {
return s == null || s.isEmpty();
}
/** Joins a list of strings using the given separator. */
static String join(String sep, String... elements) {
StringBuilder sb = new StringBuilder();
for (String e : elements) {
if (e != null) {
if (sb.length() > 0) {
sb.append(sep);
}
sb.append(e);
}
}
return sb.toString();
}
/** Joins a list of strings using the given separator. */
static String join(String sep, Iterable<String> elements) {
StringBuilder sb = new StringBuilder();
for (String e : elements) {
if (e != null) {
if (sb.length() > 0) {
sb.append(sep);
}
sb.append(e);
}
}
return sb.toString();
}
/**
* Returns the first non-empty value mapped to the given key in the given maps, or null otherwise.
*/
static String firstNonEmptyValue(String key, Map<?, ?>... maps) {
for (Map<?, ?> map : maps) {
String value = (String) map.get(key);
if (!isEmpty(value)) {
return value;
}
}
return null;
}
/** Returns the first non-empty, non-null string in the given list, or null otherwise. */
static String firstNonEmpty(String... candidates) {
for (String s : candidates) {
if (!isEmpty(s)) {
return s;
}
}
return null;
}
/** Returns the name of the env variable that holds the native library path. */
static String getLibPathEnvName() {
if (isWindows()) {
return "PATH";
}
String os = System.getProperty("os.name");
if (os.startsWith("Mac OS X")) {
return "DYLD_LIBRARY_PATH";
} else {
return "LD_LIBRARY_PATH";
}
}
/** Returns whether the OS is Windows. */
static boolean isWindows() {
String os = System.getProperty("os.name");
return os.startsWith("Windows");
}
/** Returns an enum value indicating whose JVM is being used. */
static JavaVendor getJavaVendor() {
String vendorString = System.getProperty("java.vendor");
if (vendorString.contains("Oracle")) {
return JavaVendor.Oracle;
}
if (vendorString.contains("IBM")) {
return JavaVendor.IBM;
}
if (vendorString.contains("OpenJDK")) {
return JavaVendor.OpenJDK;
}
return JavaVendor.Unknown;
}
/**
* Updates the user environment, appending the given pathList to the existing value of the given
* environment variable (or setting it if it hasn't yet been set).
*/
static void mergeEnvPathList(Map<String, String> userEnv, String envKey, String pathList) {
if (!isEmpty(pathList)) {
String current = firstNonEmpty(userEnv.get(envKey), System.getenv(envKey));
userEnv.put(envKey, join(File.pathSeparator, current, pathList));
}
}
/**
* Parse a string as if it were a list of arguments, following bash semantics.
* For example:
*
* Input: "\"ab cd\" efgh 'i \" j'"
* Output: [ "ab cd", "efgh", "i \" j" ]
*/
static List<String> parseOptionString(String s) {
List<String> opts = new ArrayList<>();
StringBuilder opt = new StringBuilder();
boolean inOpt = false;
boolean inSingleQuote = false;
boolean inDoubleQuote = false;
boolean escapeNext = false;
// This is needed to detect when a quoted empty string is used as an argument ("" or '').
boolean hasData = false;
for (int i = 0; i < s.length(); i++) {
int c = s.codePointAt(i);
if (escapeNext) {
opt.appendCodePoint(c);
escapeNext = false;
} else if (inOpt) {
switch (c) {
case '\\':
if (inSingleQuote) {
opt.appendCodePoint(c);
} else {
escapeNext = true;
}
break;
case '\'':
if (inDoubleQuote) {
opt.appendCodePoint(c);
} else {
inSingleQuote = !inSingleQuote;
}
break;
case '"':
if (inSingleQuote) {
opt.appendCodePoint(c);
} else {
inDoubleQuote = !inDoubleQuote;
}
break;
default:
if (!Character.isWhitespace(c) || inSingleQuote || inDoubleQuote) {
opt.appendCodePoint(c);
} else {
opts.add(opt.toString());
opt.setLength(0);
inOpt = false;
hasData = false;
}
}
} else {
switch (c) {
case '\'':
inSingleQuote = true;
inOpt = true;
hasData = true;
break;
case '"':
inDoubleQuote = true;
inOpt = true;
hasData = true;
break;
case '\\':
escapeNext = true;
inOpt = true;
hasData = true;
break;
default:
if (!Character.isWhitespace(c)) {
inOpt = true;
hasData = true;
opt.appendCodePoint(c);
}
}
}
}
checkArgument(!inSingleQuote && !inDoubleQuote && !escapeNext, "Invalid option string: %s", s);
if (hasData) {
opts.add(opt.toString());
}
return opts;
}
/** Throws IllegalArgumentException if the given object is null. */
static void checkNotNull(Object o, String arg) {
if (o == null) {
throw new IllegalArgumentException(String.format("'%s' must not be null.", arg));
}
}
/** Throws IllegalArgumentException with the given message if the check is false. */
static void checkArgument(boolean check, String msg, Object... args) {
if (!check) {
throw new IllegalArgumentException(String.format(msg, args));
}
}
/** Throws IllegalStateException with the given message if the check is false. */
static void checkState(boolean check, String msg, Object... args) {
if (!check) {
throw new IllegalStateException(String.format(msg, args));
}
}
/**
* Quote a command argument for a command to be run by a Windows batch script, if the argument
* needs quoting. Arguments only seem to need quotes in batch scripts if they have certain
* special characters, some of which need extra (and different) escaping.
*
* For example:
* original single argument: ab="cde fgh"
* quoted: "ab^=""cde fgh"""
*/
static String quoteForBatchScript(String arg) {
boolean needsQuotes = false;
for (int i = 0; i < arg.length(); i++) {
int c = arg.codePointAt(i);
if (Character.isWhitespace(c) || c == '"' || c == '=' || c == ',' || c == ';') {
needsQuotes = true;
break;
}
}
if (!needsQuotes) {
return arg;
}
StringBuilder quoted = new StringBuilder();
quoted.append("\"");
for (int i = 0; i < arg.length(); i++) {
int cp = arg.codePointAt(i);
switch (cp) {
case '"':
quoted.append('"');
break;
default:
break;
}
quoted.appendCodePoint(cp);
}
if (arg.codePointAt(arg.length() - 1) == '\\') {
quoted.append("\\");
}
quoted.append("\"");
return quoted.toString();
}
/**
* Quotes a string so that it can be used in a command string.
* Basically, just add simple escapes. E.g.:
* original single argument : ab "cd" ef
* after: "ab \"cd\" ef"
*
* This can be parsed back into a single argument by python's "shlex.split()" function.
*/
static String quoteForCommandString(String s) {
StringBuilder quoted = new StringBuilder().append('"');
for (int i = 0; i < s.length(); i++) {
int cp = s.codePointAt(i);
if (cp == '"' || cp == '\\') {
quoted.appendCodePoint('\\');
}
quoted.appendCodePoint(cp);
}
return quoted.append('"').toString();
}
/**
* Get the major version of the java version string supplied. This method
* accepts any JEP-223-compliant strings (9-ea, 9+100), as well as legacy
* version strings such as 1.7.0_79
*/
static int javaMajorVersion(String javaVersion) {
String[] version = javaVersion.split("[+.\\-]+");
int major = Integer.parseInt(version[0]);
// if major > 1, we're using the JEP-223 version string, e.g., 9-ea, 9+120
// otherwise the second number is the major version
if (major > 1) {
return major;
} else {
return Integer.parseInt(version[1]);
}
}
/**
* Find the location of the Spark jars dir, depending on whether we're looking at a build
* or a distribution directory.
*/
static String findJarsDir(String sparkHome, String scalaVersion, boolean failIfNotFound) {
// TODO: change to the correct directory once the assembly build is changed.
File libdir = new File(sparkHome, "jars");
if (!libdir.isDirectory()) {
libdir = new File(sparkHome, String.format("assembly/target/scala-%s/jars", scalaVersion));
if (!libdir.isDirectory()) {
checkState(!failIfNotFound,
"Library directory '%s' does not exist; make sure Spark is built.",
libdir.getAbsolutePath());
return null;
}
}
return libdir.getAbsolutePath();
}
}
| 9,577 |
0 | Create_ds/spark/launcher/src/main/java/org/apache/spark | Create_ds/spark/launcher/src/main/java/org/apache/spark/launcher/ChildProcAppHandle.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.io.InputStream;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Handle implementation for monitoring apps started as a child process.
*/
class ChildProcAppHandle extends AbstractAppHandle {
private static final Logger LOG = Logger.getLogger(ChildProcAppHandle.class.getName());
private volatile Process childProc;
private OutputRedirector redirector;
ChildProcAppHandle(LauncherServer server) {
super(server);
}
@Override
public synchronized void disconnect() {
try {
super.disconnect();
} finally {
if (redirector != null) {
redirector.stop();
}
}
}
@Override
public synchronized void kill() {
if (!isDisposed()) {
setState(State.KILLED);
disconnect();
if (childProc != null) {
if (childProc.isAlive()) {
childProc.destroyForcibly();
}
childProc = null;
}
}
}
void setChildProc(Process childProc, String loggerName, InputStream logStream) {
this.childProc = childProc;
if (logStream != null) {
this.redirector = new OutputRedirector(logStream, loggerName,
SparkLauncher.REDIRECTOR_FACTORY, this);
} else {
// If there is no log redirection, spawn a thread that will wait for the child process
// to finish.
SparkLauncher.REDIRECTOR_FACTORY.newThread(this::monitorChild).start();
}
}
/**
* Wait for the child process to exit and update the handle's state if necessary, according to
* the exit code.
*/
void monitorChild() {
Process proc = childProc;
if (proc == null) {
// Process may have already been disposed of, e.g. by calling kill().
return;
}
while (proc.isAlive()) {
try {
proc.waitFor();
} catch (Exception e) {
LOG.log(Level.WARNING, "Exception waiting for child process to exit.", e);
}
}
synchronized (this) {
if (isDisposed()) {
return;
}
int ec;
try {
ec = proc.exitValue();
} catch (Exception e) {
LOG.log(Level.WARNING, "Exception getting child process exit code, assuming failure.", e);
ec = 1;
}
if (ec != 0) {
State currState = getState();
// Override state with failure if the current state is not final, or is success.
if (!currState.isFinal() || currState == State.FINISHED) {
setState(State.FAILED, true);
}
}
dispose();
}
}
}
| 9,578 |
0 | Create_ds/spark/launcher/src/main/java/org/apache/spark | Create_ds/spark/launcher/src/main/java/org/apache/spark/launcher/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Library for launching Spark applications programmatically.
*
* <p>
* There are two ways to start applications with this library: as a child process, using
* {@link org.apache.spark.launcher.SparkLauncher}, or in-process, using
* {@link org.apache.spark.launcher.InProcessLauncher}.
* </p>
*
* <p>
* The {@link org.apache.spark.launcher.AbstractLauncher#startApplication(
* org.apache.spark.launcher.SparkAppHandle.Listener...)} method can be used to start Spark and
* provide a handle to monitor and control the running application:
* </p>
*
* <pre>
* {@code
* import org.apache.spark.launcher.SparkAppHandle;
* import org.apache.spark.launcher.SparkLauncher;
*
* public class MyLauncher {
* public static void main(String[] args) throws Exception {
* SparkAppHandle handle = new SparkLauncher()
* .setAppResource("/my/app.jar")
* .setMainClass("my.spark.app.Main")
* .setMaster("local")
* .setConf(SparkLauncher.DRIVER_MEMORY, "2g")
* .startApplication();
* // Use handle API to monitor / control application.
* }
* }
* }
* </pre>
*
* <p>
* Launching applications as a child process requires a full Spark installation. The installation
* directory can be provided to the launcher explicitly in the launcher's configuration, or by
* setting the <i>SPARK_HOME</i> environment variable.
* </p>
*
* <p>
* Launching applications in-process is only recommended in cluster mode, since Spark cannot run
* multiple client-mode applications concurrently in the same process. The in-process launcher
* requires the necessary Spark dependencies (such as spark-core and cluster manager-specific
* modules) to be present in the caller thread's class loader.
* </p>
*
* <p>
* It's also possible to launch a raw child process, without the extra monitoring, using the
* {@link org.apache.spark.launcher.SparkLauncher#launch()} method:
* </p>
*
* <pre>
* {@code
* import org.apache.spark.launcher.SparkLauncher;
*
* public class MyLauncher {
* public static void main(String[] args) throws Exception {
* Process spark = new SparkLauncher()
* .setAppResource("/my/app.jar")
* .setMainClass("my.spark.app.Main")
* .setMaster("local")
* .setConf(SparkLauncher.DRIVER_MEMORY, "2g")
* .launch();
* spark.waitFor();
* }
* }
* }
* </pre>
*
* <p>This method requires the calling code to manually manage the child process, including its
* output streams (to avoid possible deadlocks). It's recommended that
* {@link org.apache.spark.launcher.SparkLauncher#startApplication(
* org.apache.spark.launcher.SparkAppHandle.Listener...)} be used instead.</p>
*/
package org.apache.spark.launcher;
| 9,579 |
0 | Create_ds/spark/launcher/src/main/java/org/apache/spark | Create_ds/spark/launcher/src/main/java/org/apache/spark/launcher/SparkAppHandle.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
/**
* A handle to a running Spark application.
* <p>
* Provides runtime information about the underlying Spark application, and actions to control it.
*
* @since 1.6.0
*/
public interface SparkAppHandle {
/**
* Represents the application's state. A state can be "final", in which case it will not change
* after it's reached, and means the application is not running anymore.
*
* @since 1.6.0
*/
enum State {
/** The application has not reported back yet. */
UNKNOWN(false),
/** The application has connected to the handle. */
CONNECTED(false),
/** The application has been submitted to the cluster. */
SUBMITTED(false),
/** The application is running. */
RUNNING(false),
/** The application finished with a successful status. */
FINISHED(true),
/** The application finished with a failed status. */
FAILED(true),
/** The application was killed. */
KILLED(true),
/** The Spark Submit JVM exited with a unknown status. */
LOST(true);
private final boolean isFinal;
State(boolean isFinal) {
this.isFinal = isFinal;
}
/**
* Whether this state is a final state, meaning the application is not running anymore
* once it's reached.
*/
public boolean isFinal() {
return isFinal;
}
}
/**
* Adds a listener to be notified of changes to the handle's information. Listeners will be called
* from the thread processing updates from the application, so they should avoid blocking or
* long-running operations.
*
* @param l Listener to add.
*/
void addListener(Listener l);
/** Returns the current application state. */
State getState();
/** Returns the application ID, or <code>null</code> if not yet known. */
String getAppId();
/**
* Asks the application to stop. This is best-effort, since the application may fail to receive
* or act on the command. Callers should watch for a state transition that indicates the
* application has really stopped.
*/
void stop();
/**
* Tries to kill the underlying application. Implies {@link #disconnect()}. This will not send
* a {@link #stop()} message to the application, so it's recommended that users first try to
* stop the application cleanly and only resort to this method if that fails.
*/
void kill();
/**
* Disconnects the handle from the application, without stopping it. After this method is called,
* the handle will not be able to communicate with the application anymore.
*/
void disconnect();
/**
* Listener for updates to a handle's state. The callbacks do not receive information about
* what exactly has changed, just that an update has occurred.
*
* @since 1.6.0
*/
public interface Listener {
/**
* Callback for changes in the handle's state.
*
* @param handle The updated handle.
* @see SparkAppHandle#getState()
*/
void stateChanged(SparkAppHandle handle);
/**
* Callback for changes in any information that is not the handle's state.
*
* @param handle The updated handle.
*/
void infoChanged(SparkAppHandle handle);
}
}
| 9,580 |
0 | Create_ds/spark/core/src/test/java/test/org/apache | Create_ds/spark/core/src/test/java/test/org/apache/spark/Java8RDDAPISuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package test.org.apache.spark;
import java.io.File;
import java.io.Serializable;
import java.util.*;
import scala.Tuple2;
import com.google.common.collect.Iterables;
import com.google.common.io.Files;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.apache.spark.api.java.JavaDoubleRDD;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.Optional;
import org.apache.spark.api.java.function.*;
import org.apache.spark.util.Utils;
/**
* Most of these tests replicate org.apache.spark.JavaAPISuite using java 8
* lambda syntax.
*/
public class Java8RDDAPISuite implements Serializable {
private static int foreachCalls = 0;
private transient JavaSparkContext sc;
@Before
public void setUp() {
sc = new JavaSparkContext("local", "JavaAPISuite");
}
@After
public void tearDown() {
sc.stop();
sc = null;
}
@Test
public void foreachWithAnonymousClass() {
foreachCalls = 0;
JavaRDD<String> rdd = sc.parallelize(Arrays.asList("Hello", "World"));
rdd.foreach(s -> foreachCalls++);
Assert.assertEquals(2, foreachCalls);
}
@Test
public void foreach() {
foreachCalls = 0;
JavaRDD<String> rdd = sc.parallelize(Arrays.asList("Hello", "World"));
rdd.foreach(x -> foreachCalls++);
Assert.assertEquals(2, foreachCalls);
}
@Test
public void groupBy() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 1, 2, 3, 5, 8, 13));
Function<Integer, Boolean> isOdd = x -> x % 2 == 0;
JavaPairRDD<Boolean, Iterable<Integer>> oddsAndEvens = rdd.groupBy(isOdd);
Assert.assertEquals(2, oddsAndEvens.count());
Assert.assertEquals(2, Iterables.size(oddsAndEvens.lookup(true).get(0))); // Evens
Assert.assertEquals(5, Iterables.size(oddsAndEvens.lookup(false).get(0))); // Odds
oddsAndEvens = rdd.groupBy(isOdd, 1);
Assert.assertEquals(2, oddsAndEvens.count());
Assert.assertEquals(2, Iterables.size(oddsAndEvens.lookup(true).get(0))); // Evens
Assert.assertEquals(5, Iterables.size(oddsAndEvens.lookup(false).get(0))); // Odds
}
@Test
public void leftOuterJoin() {
JavaPairRDD<Integer, Integer> rdd1 = sc.parallelizePairs(Arrays.asList(
new Tuple2<>(1, 1),
new Tuple2<>(1, 2),
new Tuple2<>(2, 1),
new Tuple2<>(3, 1)
));
JavaPairRDD<Integer, Character> rdd2 = sc.parallelizePairs(Arrays.asList(
new Tuple2<>(1, 'x'),
new Tuple2<>(2, 'y'),
new Tuple2<>(2, 'z'),
new Tuple2<>(4, 'w')
));
List<Tuple2<Integer, Tuple2<Integer, Optional<Character>>>> joined =
rdd1.leftOuterJoin(rdd2).collect();
Assert.assertEquals(5, joined.size());
Tuple2<Integer, Tuple2<Integer, Optional<Character>>> firstUnmatched =
rdd1.leftOuterJoin(rdd2).filter(tup -> !tup._2()._2().isPresent()).first();
Assert.assertEquals(3, firstUnmatched._1().intValue());
}
@Test
public void foldReduce() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 1, 2, 3, 5, 8, 13));
Function2<Integer, Integer, Integer> add = (a, b) -> a + b;
int sum = rdd.fold(0, add);
Assert.assertEquals(33, sum);
sum = rdd.reduce(add);
Assert.assertEquals(33, sum);
}
@Test
public void foldByKey() {
List<Tuple2<Integer, Integer>> pairs = Arrays.asList(
new Tuple2<>(2, 1),
new Tuple2<>(2, 1),
new Tuple2<>(1, 1),
new Tuple2<>(3, 2),
new Tuple2<>(3, 1)
);
JavaPairRDD<Integer, Integer> rdd = sc.parallelizePairs(pairs);
JavaPairRDD<Integer, Integer> sums = rdd.foldByKey(0, (a, b) -> a + b);
Assert.assertEquals(1, sums.lookup(1).get(0).intValue());
Assert.assertEquals(2, sums.lookup(2).get(0).intValue());
Assert.assertEquals(3, sums.lookup(3).get(0).intValue());
}
@Test
public void reduceByKey() {
List<Tuple2<Integer, Integer>> pairs = Arrays.asList(
new Tuple2<>(2, 1),
new Tuple2<>(2, 1),
new Tuple2<>(1, 1),
new Tuple2<>(3, 2),
new Tuple2<>(3, 1)
);
JavaPairRDD<Integer, Integer> rdd = sc.parallelizePairs(pairs);
JavaPairRDD<Integer, Integer> counts = rdd.reduceByKey((a, b) -> a + b);
Assert.assertEquals(1, counts.lookup(1).get(0).intValue());
Assert.assertEquals(2, counts.lookup(2).get(0).intValue());
Assert.assertEquals(3, counts.lookup(3).get(0).intValue());
Map<Integer, Integer> localCounts = counts.collectAsMap();
Assert.assertEquals(1, localCounts.get(1).intValue());
Assert.assertEquals(2, localCounts.get(2).intValue());
Assert.assertEquals(3, localCounts.get(3).intValue());
localCounts = rdd.reduceByKeyLocally((a, b) -> a + b);
Assert.assertEquals(1, localCounts.get(1).intValue());
Assert.assertEquals(2, localCounts.get(2).intValue());
Assert.assertEquals(3, localCounts.get(3).intValue());
}
@Test
public void map() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5));
JavaDoubleRDD doubles = rdd.mapToDouble(x -> 1.0 * x).cache();
doubles.collect();
JavaPairRDD<Integer, Integer> pairs = rdd.mapToPair(x -> new Tuple2<>(x, x))
.cache();
pairs.collect();
JavaRDD<String> strings = rdd.map(Object::toString).cache();
strings.collect();
}
@Test
public void flatMap() {
JavaRDD<String> rdd = sc.parallelize(Arrays.asList("Hello World!",
"The quick brown fox jumps over the lazy dog."));
JavaRDD<String> words = rdd.flatMap(x -> Arrays.asList(x.split(" ")).iterator());
Assert.assertEquals("Hello", words.first());
Assert.assertEquals(11, words.count());
JavaPairRDD<String, String> pairs = rdd.flatMapToPair(s -> {
List<Tuple2<String, String>> pairs2 = new LinkedList<>();
for (String word : s.split(" ")) {
pairs2.add(new Tuple2<>(word, word));
}
return pairs2.iterator();
});
Assert.assertEquals(new Tuple2<>("Hello", "Hello"), pairs.first());
Assert.assertEquals(11, pairs.count());
JavaDoubleRDD doubles = rdd.flatMapToDouble(s -> {
List<Double> lengths = new LinkedList<>();
for (String word : s.split(" ")) {
lengths.add((double) word.length());
}
return lengths.iterator();
});
Assert.assertEquals(5.0, doubles.first(), 0.01);
Assert.assertEquals(11, pairs.count());
}
@Test
public void mapsFromPairsToPairs() {
List<Tuple2<Integer, String>> pairs = Arrays.asList(
new Tuple2<>(1, "a"),
new Tuple2<>(2, "aa"),
new Tuple2<>(3, "aaa")
);
JavaPairRDD<Integer, String> pairRDD = sc.parallelizePairs(pairs);
// Regression test for SPARK-668:
JavaPairRDD<String, Integer> swapped =
pairRDD.flatMapToPair(x -> Collections.singletonList(x.swap()).iterator());
swapped.collect();
// There was never a bug here, but it's worth testing:
pairRDD.map(Tuple2::swap).collect();
}
@Test
public void mapPartitions() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4), 2);
JavaRDD<Integer> partitionSums = rdd.mapPartitions(iter -> {
int sum = 0;
while (iter.hasNext()) {
sum += iter.next();
}
return Collections.singletonList(sum).iterator();
});
Assert.assertEquals("[3, 7]", partitionSums.collect().toString());
}
@Test
public void sequenceFile() {
File tempDir = Files.createTempDir();
tempDir.deleteOnExit();
String outputDir = new File(tempDir, "output").getAbsolutePath();
List<Tuple2<Integer, String>> pairs = Arrays.asList(
new Tuple2<>(1, "a"),
new Tuple2<>(2, "aa"),
new Tuple2<>(3, "aaa")
);
JavaPairRDD<Integer, String> rdd = sc.parallelizePairs(pairs);
rdd.mapToPair(pair -> new Tuple2<>(new IntWritable(pair._1()), new Text(pair._2())))
.saveAsHadoopFile(outputDir, IntWritable.class, Text.class, SequenceFileOutputFormat.class);
// Try reading the output back as an object file
JavaPairRDD<Integer, String> readRDD = sc.sequenceFile(outputDir, IntWritable.class, Text.class)
.mapToPair(pair -> new Tuple2<>(pair._1().get(), pair._2().toString()));
Assert.assertEquals(pairs, readRDD.collect());
Utils.deleteRecursively(tempDir);
}
@Test
public void zip() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5));
JavaDoubleRDD doubles = rdd.mapToDouble(x -> 1.0 * x);
JavaPairRDD<Integer, Double> zipped = rdd.zip(doubles);
zipped.count();
}
@Test
public void zipPartitions() {
JavaRDD<Integer> rdd1 = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6), 2);
JavaRDD<String> rdd2 = sc.parallelize(Arrays.asList("1", "2", "3", "4"), 2);
FlatMapFunction2<Iterator<Integer>, Iterator<String>, Integer> sizesFn =
(Iterator<Integer> i, Iterator<String> s) -> {
int sizeI = 0;
while (i.hasNext()) {
sizeI += 1;
i.next();
}
int sizeS = 0;
while (s.hasNext()) {
sizeS += 1;
s.next();
}
return Arrays.asList(sizeI, sizeS).iterator();
};
JavaRDD<Integer> sizes = rdd1.zipPartitions(rdd2, sizesFn);
Assert.assertEquals("[3, 2, 3, 2]", sizes.collect().toString());
}
@Test
public void keyBy() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2));
List<Tuple2<String, Integer>> s = rdd.keyBy(Object::toString).collect();
Assert.assertEquals(new Tuple2<>("1", 1), s.get(0));
Assert.assertEquals(new Tuple2<>("2", 2), s.get(1));
}
@Test
public void mapOnPairRDD() {
JavaRDD<Integer> rdd1 = sc.parallelize(Arrays.asList(1, 2, 3, 4));
JavaPairRDD<Integer, Integer> rdd2 =
rdd1.mapToPair(i -> new Tuple2<>(i, i % 2));
JavaPairRDD<Integer, Integer> rdd3 =
rdd2.mapToPair(in -> new Tuple2<>(in._2(), in._1()));
Assert.assertEquals(Arrays.asList(
new Tuple2<>(1, 1),
new Tuple2<>(0, 2),
new Tuple2<>(1, 3),
new Tuple2<>(0, 4)), rdd3.collect());
}
@Test
public void collectPartitions() {
JavaRDD<Integer> rdd1 = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6, 7), 3);
JavaPairRDD<Integer, Integer> rdd2 =
rdd1.mapToPair(i -> new Tuple2<>(i, i % 2));
List<Integer>[] parts = rdd1.collectPartitions(new int[]{0});
Assert.assertEquals(Arrays.asList(1, 2), parts[0]);
parts = rdd1.collectPartitions(new int[]{1, 2});
Assert.assertEquals(Arrays.asList(3, 4), parts[0]);
Assert.assertEquals(Arrays.asList(5, 6, 7), parts[1]);
Assert.assertEquals(Arrays.asList(new Tuple2<>(1, 1), new Tuple2<>(2, 0)),
rdd2.collectPartitions(new int[]{0})[0]);
List<Tuple2<Integer, Integer>>[] parts2 = rdd2.collectPartitions(new int[]{1, 2});
Assert.assertEquals(Arrays.asList(new Tuple2<>(3, 1), new Tuple2<>(4, 0)), parts2[0]);
Assert.assertEquals(Arrays.asList(new Tuple2<>(5, 1), new Tuple2<>(6, 0), new Tuple2<>(7, 1)),
parts2[1]);
}
@Test
public void collectAsMapWithIntArrayValues() {
// Regression test for SPARK-1040
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1));
JavaPairRDD<Integer, int[]> pairRDD =
rdd.mapToPair(x -> new Tuple2<>(x, new int[]{x}));
pairRDD.collect(); // Works fine
pairRDD.collectAsMap(); // Used to crash with ClassCastException
}
}
| 9,581 |
0 | Create_ds/spark/core/src/test/java/test/org/apache | Create_ds/spark/core/src/test/java/test/org/apache/spark/JavaSparkContextSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package test.org.apache.spark;
import java.io.*;
import scala.collection.immutable.List;
import scala.collection.immutable.List$;
import scala.collection.immutable.Map;
import scala.collection.immutable.Map$;
import org.junit.Test;
import org.apache.spark.api.java.*;
import org.apache.spark.*;
/**
* Java apps can use both Java-friendly JavaSparkContext and Scala SparkContext.
*/
public class JavaSparkContextSuite implements Serializable {
@Test
public void javaSparkContext() {
String[] jars = new String[] {};
java.util.Map<String, String> environment = new java.util.HashMap<>();
new JavaSparkContext(new SparkConf().setMaster("local").setAppName("name")).stop();
new JavaSparkContext("local", "name", new SparkConf()).stop();
new JavaSparkContext("local", "name").stop();
new JavaSparkContext("local", "name", "sparkHome", "jarFile").stop();
new JavaSparkContext("local", "name", "sparkHome", jars).stop();
new JavaSparkContext("local", "name", "sparkHome", jars, environment).stop();
}
@Test
public void scalaSparkContext() {
List<String> jars = List$.MODULE$.empty();
Map<String, String> environment = Map$.MODULE$.empty();
new SparkContext(new SparkConf().setMaster("local").setAppName("name")).stop();
new SparkContext("local", "name", new SparkConf()).stop();
new SparkContext("local", "name").stop();
new SparkContext("local", "name", "sparkHome").stop();
new SparkContext("local", "name", "sparkHome", jars).stop();
new SparkContext("local", "name", "sparkHome", jars, environment).stop();
}
}
| 9,582 |
0 | Create_ds/spark/core/src/test/java/test/org/apache | Create_ds/spark/core/src/test/java/test/org/apache/spark/JavaAPISuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package test.org.apache.spark;
import java.io.*;
import java.nio.channels.FileChannel;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.*;
import org.apache.spark.Accumulator;
import org.apache.spark.AccumulatorParam;
import org.apache.spark.Partitioner;
import org.apache.spark.SparkConf;
import org.apache.spark.TaskContext;
import org.apache.spark.TaskContext$;
import scala.Tuple2;
import scala.Tuple3;
import scala.Tuple4;
import scala.collection.JavaConverters;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.google.common.base.Throwables;
import com.google.common.io.Files;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.junit.After;
import static org.junit.Assert.*;
import org.junit.Before;
import org.junit.Test;
import org.apache.spark.api.java.JavaDoubleRDD;
import org.apache.spark.api.java.JavaFutureAction;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.Optional;
import org.apache.spark.api.java.function.*;
import org.apache.spark.input.PortableDataStream;
import org.apache.spark.partial.BoundedDouble;
import org.apache.spark.partial.PartialResult;
import org.apache.spark.rdd.RDD;
import org.apache.spark.serializer.KryoSerializer;
import org.apache.spark.storage.StorageLevel;
import org.apache.spark.util.LongAccumulator;
import org.apache.spark.util.StatCounter;
// The test suite itself is Serializable so that anonymous Function implementations can be
// serialized, as an alternative to converting these anonymous classes to static inner classes;
// see http://stackoverflow.com/questions/758570/.
public class JavaAPISuite implements Serializable {
private transient JavaSparkContext sc;
private transient File tempDir;
@Before
public void setUp() {
sc = new JavaSparkContext("local", "JavaAPISuite");
tempDir = Files.createTempDir();
tempDir.deleteOnExit();
}
@After
public void tearDown() {
sc.stop();
sc = null;
}
@SuppressWarnings("unchecked")
@Test
public void sparkContextUnion() {
// Union of non-specialized JavaRDDs
List<String> strings = Arrays.asList("Hello", "World");
JavaRDD<String> s1 = sc.parallelize(strings);
JavaRDD<String> s2 = sc.parallelize(strings);
// Varargs
JavaRDD<String> sUnion = sc.union(s1, s2);
assertEquals(4, sUnion.count());
// List
List<JavaRDD<String>> list = new ArrayList<>();
list.add(s2);
sUnion = sc.union(s1, list);
assertEquals(4, sUnion.count());
// Union of JavaDoubleRDDs
List<Double> doubles = Arrays.asList(1.0, 2.0);
JavaDoubleRDD d1 = sc.parallelizeDoubles(doubles);
JavaDoubleRDD d2 = sc.parallelizeDoubles(doubles);
JavaDoubleRDD dUnion = sc.union(d1, d2);
assertEquals(4, dUnion.count());
// Union of JavaPairRDDs
List<Tuple2<Integer, Integer>> pairs = new ArrayList<>();
pairs.add(new Tuple2<>(1, 2));
pairs.add(new Tuple2<>(3, 4));
JavaPairRDD<Integer, Integer> p1 = sc.parallelizePairs(pairs);
JavaPairRDD<Integer, Integer> p2 = sc.parallelizePairs(pairs);
JavaPairRDD<Integer, Integer> pUnion = sc.union(p1, p2);
assertEquals(4, pUnion.count());
}
@SuppressWarnings("unchecked")
@Test
public void intersection() {
List<Integer> ints1 = Arrays.asList(1, 10, 2, 3, 4, 5);
List<Integer> ints2 = Arrays.asList(1, 6, 2, 3, 7, 8);
JavaRDD<Integer> s1 = sc.parallelize(ints1);
JavaRDD<Integer> s2 = sc.parallelize(ints2);
JavaRDD<Integer> intersections = s1.intersection(s2);
assertEquals(3, intersections.count());
JavaRDD<Integer> empty = sc.emptyRDD();
JavaRDD<Integer> emptyIntersection = empty.intersection(s2);
assertEquals(0, emptyIntersection.count());
List<Double> doubles = Arrays.asList(1.0, 2.0);
JavaDoubleRDD d1 = sc.parallelizeDoubles(doubles);
JavaDoubleRDD d2 = sc.parallelizeDoubles(doubles);
JavaDoubleRDD dIntersection = d1.intersection(d2);
assertEquals(2, dIntersection.count());
List<Tuple2<Integer, Integer>> pairs = new ArrayList<>();
pairs.add(new Tuple2<>(1, 2));
pairs.add(new Tuple2<>(3, 4));
JavaPairRDD<Integer, Integer> p1 = sc.parallelizePairs(pairs);
JavaPairRDD<Integer, Integer> p2 = sc.parallelizePairs(pairs);
JavaPairRDD<Integer, Integer> pIntersection = p1.intersection(p2);
assertEquals(2, pIntersection.count());
}
@Test
public void sample() {
List<Integer> ints = Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
JavaRDD<Integer> rdd = sc.parallelize(ints);
// the seeds here are "magic" to make this work out nicely
JavaRDD<Integer> sample20 = rdd.sample(true, 0.2, 8);
assertEquals(2, sample20.count());
JavaRDD<Integer> sample20WithoutReplacement = rdd.sample(false, 0.2, 2);
assertEquals(2, sample20WithoutReplacement.count());
}
@Test
public void randomSplit() {
List<Integer> ints = new ArrayList<>(1000);
for (int i = 0; i < 1000; i++) {
ints.add(i);
}
JavaRDD<Integer> rdd = sc.parallelize(ints);
JavaRDD<Integer>[] splits = rdd.randomSplit(new double[] { 0.4, 0.6, 1.0 }, 31);
// the splits aren't perfect -- not enough data for them to be -- just check they're about right
assertEquals(3, splits.length);
long s0 = splits[0].count();
long s1 = splits[1].count();
long s2 = splits[2].count();
assertTrue(s0 + " not within expected range", s0 > 150 && s0 < 250);
assertTrue(s1 + " not within expected range", s1 > 250 && s0 < 350);
assertTrue(s2 + " not within expected range", s2 > 430 && s2 < 570);
}
@Test
public void sortByKey() {
List<Tuple2<Integer, Integer>> pairs = new ArrayList<>();
pairs.add(new Tuple2<>(0, 4));
pairs.add(new Tuple2<>(3, 2));
pairs.add(new Tuple2<>(-1, 1));
JavaPairRDD<Integer, Integer> rdd = sc.parallelizePairs(pairs);
// Default comparator
JavaPairRDD<Integer, Integer> sortedRDD = rdd.sortByKey();
assertEquals(new Tuple2<>(-1, 1), sortedRDD.first());
List<Tuple2<Integer, Integer>> sortedPairs = sortedRDD.collect();
assertEquals(new Tuple2<>(0, 4), sortedPairs.get(1));
assertEquals(new Tuple2<>(3, 2), sortedPairs.get(2));
// Custom comparator
sortedRDD = rdd.sortByKey(Collections.reverseOrder(), false);
assertEquals(new Tuple2<>(-1, 1), sortedRDD.first());
sortedPairs = sortedRDD.collect();
assertEquals(new Tuple2<>(0, 4), sortedPairs.get(1));
assertEquals(new Tuple2<>(3, 2), sortedPairs.get(2));
}
@SuppressWarnings("unchecked")
@Test
public void repartitionAndSortWithinPartitions() {
List<Tuple2<Integer, Integer>> pairs = new ArrayList<>();
pairs.add(new Tuple2<>(0, 5));
pairs.add(new Tuple2<>(3, 8));
pairs.add(new Tuple2<>(2, 6));
pairs.add(new Tuple2<>(0, 8));
pairs.add(new Tuple2<>(3, 8));
pairs.add(new Tuple2<>(1, 3));
JavaPairRDD<Integer, Integer> rdd = sc.parallelizePairs(pairs);
Partitioner partitioner = new Partitioner() {
@Override
public int numPartitions() {
return 2;
}
@Override
public int getPartition(Object key) {
return (Integer) key % 2;
}
};
JavaPairRDD<Integer, Integer> repartitioned =
rdd.repartitionAndSortWithinPartitions(partitioner);
assertTrue(repartitioned.partitioner().isPresent());
assertEquals(repartitioned.partitioner().get(), partitioner);
List<List<Tuple2<Integer, Integer>>> partitions = repartitioned.glom().collect();
assertEquals(partitions.get(0),
Arrays.asList(new Tuple2<>(0, 5), new Tuple2<>(0, 8), new Tuple2<>(2, 6)));
assertEquals(partitions.get(1),
Arrays.asList(new Tuple2<>(1, 3), new Tuple2<>(3, 8), new Tuple2<>(3, 8)));
}
@Test
public void emptyRDD() {
JavaRDD<String> rdd = sc.emptyRDD();
assertEquals("Empty RDD shouldn't have any values", 0, rdd.count());
}
@Test
public void sortBy() {
List<Tuple2<Integer, Integer>> pairs = new ArrayList<>();
pairs.add(new Tuple2<>(0, 4));
pairs.add(new Tuple2<>(3, 2));
pairs.add(new Tuple2<>(-1, 1));
JavaRDD<Tuple2<Integer, Integer>> rdd = sc.parallelize(pairs);
// compare on first value
JavaRDD<Tuple2<Integer, Integer>> sortedRDD = rdd.sortBy(Tuple2::_1, true, 2);
assertEquals(new Tuple2<>(-1, 1), sortedRDD.first());
List<Tuple2<Integer, Integer>> sortedPairs = sortedRDD.collect();
assertEquals(new Tuple2<>(0, 4), sortedPairs.get(1));
assertEquals(new Tuple2<>(3, 2), sortedPairs.get(2));
// compare on second value
sortedRDD = rdd.sortBy(Tuple2::_2, true, 2);
assertEquals(new Tuple2<>(-1, 1), sortedRDD.first());
sortedPairs = sortedRDD.collect();
assertEquals(new Tuple2<>(3, 2), sortedPairs.get(1));
assertEquals(new Tuple2<>(0, 4), sortedPairs.get(2));
}
@Test
public void foreach() {
LongAccumulator accum = sc.sc().longAccumulator();
JavaRDD<String> rdd = sc.parallelize(Arrays.asList("Hello", "World"));
rdd.foreach(s -> accum.add(1));
assertEquals(2, accum.value().intValue());
}
@Test
public void foreachPartition() {
LongAccumulator accum = sc.sc().longAccumulator();
JavaRDD<String> rdd = sc.parallelize(Arrays.asList("Hello", "World"));
rdd.foreachPartition(iter -> {
while (iter.hasNext()) {
iter.next();
accum.add(1);
}
});
assertEquals(2, accum.value().intValue());
}
@Test
public void toLocalIterator() {
List<Integer> correct = Arrays.asList(1, 2, 3, 4);
JavaRDD<Integer> rdd = sc.parallelize(correct);
List<Integer> result = Lists.newArrayList(rdd.toLocalIterator());
assertEquals(correct, result);
}
@Test
public void zipWithUniqueId() {
List<Integer> dataArray = Arrays.asList(1, 2, 3, 4);
JavaPairRDD<Integer, Long> zip = sc.parallelize(dataArray).zipWithUniqueId();
JavaRDD<Long> indexes = zip.values();
assertEquals(4, new HashSet<>(indexes.collect()).size());
}
@Test
public void zipWithIndex() {
List<Integer> dataArray = Arrays.asList(1, 2, 3, 4);
JavaPairRDD<Integer, Long> zip = sc.parallelize(dataArray).zipWithIndex();
JavaRDD<Long> indexes = zip.values();
List<Long> correctIndexes = Arrays.asList(0L, 1L, 2L, 3L);
assertEquals(correctIndexes, indexes.collect());
}
@SuppressWarnings("unchecked")
@Test
public void lookup() {
JavaPairRDD<String, String> categories = sc.parallelizePairs(Arrays.asList(
new Tuple2<>("Apples", "Fruit"),
new Tuple2<>("Oranges", "Fruit"),
new Tuple2<>("Oranges", "Citrus")
));
assertEquals(2, categories.lookup("Oranges").size());
assertEquals(2, Iterables.size(categories.groupByKey().lookup("Oranges").get(0)));
}
@Test
public void groupBy() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 1, 2, 3, 5, 8, 13));
Function<Integer, Boolean> isOdd = x -> x % 2 == 0;
JavaPairRDD<Boolean, Iterable<Integer>> oddsAndEvens = rdd.groupBy(isOdd);
assertEquals(2, oddsAndEvens.count());
assertEquals(2, Iterables.size(oddsAndEvens.lookup(true).get(0))); // Evens
assertEquals(5, Iterables.size(oddsAndEvens.lookup(false).get(0))); // Odds
oddsAndEvens = rdd.groupBy(isOdd, 1);
assertEquals(2, oddsAndEvens.count());
assertEquals(2, Iterables.size(oddsAndEvens.lookup(true).get(0))); // Evens
assertEquals(5, Iterables.size(oddsAndEvens.lookup(false).get(0))); // Odds
}
@Test
public void groupByOnPairRDD() {
// Regression test for SPARK-4459
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 1, 2, 3, 5, 8, 13));
Function<Tuple2<Integer, Integer>, Boolean> areOdd =
x -> (x._1() % 2 == 0) && (x._2() % 2 == 0);
JavaPairRDD<Integer, Integer> pairRDD = rdd.zip(rdd);
JavaPairRDD<Boolean, Iterable<Tuple2<Integer, Integer>>> oddsAndEvens = pairRDD.groupBy(areOdd);
assertEquals(2, oddsAndEvens.count());
assertEquals(2, Iterables.size(oddsAndEvens.lookup(true).get(0))); // Evens
assertEquals(5, Iterables.size(oddsAndEvens.lookup(false).get(0))); // Odds
oddsAndEvens = pairRDD.groupBy(areOdd, 1);
assertEquals(2, oddsAndEvens.count());
assertEquals(2, Iterables.size(oddsAndEvens.lookup(true).get(0))); // Evens
assertEquals(5, Iterables.size(oddsAndEvens.lookup(false).get(0))); // Odds
}
@SuppressWarnings("unchecked")
@Test
public void keyByOnPairRDD() {
// Regression test for SPARK-4459
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 1, 2, 3, 5, 8, 13));
Function<Tuple2<Integer, Integer>, String> sumToString = x -> String.valueOf(x._1() + x._2());
JavaPairRDD<Integer, Integer> pairRDD = rdd.zip(rdd);
JavaPairRDD<String, Tuple2<Integer, Integer>> keyed = pairRDD.keyBy(sumToString);
assertEquals(7, keyed.count());
assertEquals(1, (long) keyed.lookup("2").get(0)._1());
}
@SuppressWarnings("unchecked")
@Test
public void cogroup() {
JavaPairRDD<String, String> categories = sc.parallelizePairs(Arrays.asList(
new Tuple2<>("Apples", "Fruit"),
new Tuple2<>("Oranges", "Fruit"),
new Tuple2<>("Oranges", "Citrus")
));
JavaPairRDD<String, Integer> prices = sc.parallelizePairs(Arrays.asList(
new Tuple2<>("Oranges", 2),
new Tuple2<>("Apples", 3)
));
JavaPairRDD<String, Tuple2<Iterable<String>, Iterable<Integer>>> cogrouped =
categories.cogroup(prices);
assertEquals("[Fruit, Citrus]", Iterables.toString(cogrouped.lookup("Oranges").get(0)._1()));
assertEquals("[2]", Iterables.toString(cogrouped.lookup("Oranges").get(0)._2()));
cogrouped.collect();
}
@SuppressWarnings("unchecked")
@Test
public void cogroup3() {
JavaPairRDD<String, String> categories = sc.parallelizePairs(Arrays.asList(
new Tuple2<>("Apples", "Fruit"),
new Tuple2<>("Oranges", "Fruit"),
new Tuple2<>("Oranges", "Citrus")
));
JavaPairRDD<String, Integer> prices = sc.parallelizePairs(Arrays.asList(
new Tuple2<>("Oranges", 2),
new Tuple2<>("Apples", 3)
));
JavaPairRDD<String, Integer> quantities = sc.parallelizePairs(Arrays.asList(
new Tuple2<>("Oranges", 21),
new Tuple2<>("Apples", 42)
));
JavaPairRDD<String, Tuple3<Iterable<String>, Iterable<Integer>, Iterable<Integer>>> cogrouped =
categories.cogroup(prices, quantities);
assertEquals("[Fruit, Citrus]", Iterables.toString(cogrouped.lookup("Oranges").get(0)._1()));
assertEquals("[2]", Iterables.toString(cogrouped.lookup("Oranges").get(0)._2()));
assertEquals("[42]", Iterables.toString(cogrouped.lookup("Apples").get(0)._3()));
cogrouped.collect();
}
@SuppressWarnings("unchecked")
@Test
public void cogroup4() {
JavaPairRDD<String, String> categories = sc.parallelizePairs(Arrays.asList(
new Tuple2<>("Apples", "Fruit"),
new Tuple2<>("Oranges", "Fruit"),
new Tuple2<>("Oranges", "Citrus")
));
JavaPairRDD<String, Integer> prices = sc.parallelizePairs(Arrays.asList(
new Tuple2<>("Oranges", 2),
new Tuple2<>("Apples", 3)
));
JavaPairRDD<String, Integer> quantities = sc.parallelizePairs(Arrays.asList(
new Tuple2<>("Oranges", 21),
new Tuple2<>("Apples", 42)
));
JavaPairRDD<String, String> countries = sc.parallelizePairs(Arrays.asList(
new Tuple2<>("Oranges", "BR"),
new Tuple2<>("Apples", "US")
));
JavaPairRDD<String, Tuple4<Iterable<String>, Iterable<Integer>, Iterable<Integer>,
Iterable<String>>> cogrouped = categories.cogroup(prices, quantities, countries);
assertEquals("[Fruit, Citrus]", Iterables.toString(cogrouped.lookup("Oranges").get(0)._1()));
assertEquals("[2]", Iterables.toString(cogrouped.lookup("Oranges").get(0)._2()));
assertEquals("[42]", Iterables.toString(cogrouped.lookup("Apples").get(0)._3()));
assertEquals("[BR]", Iterables.toString(cogrouped.lookup("Oranges").get(0)._4()));
cogrouped.collect();
}
@SuppressWarnings("unchecked")
@Test
public void leftOuterJoin() {
JavaPairRDD<Integer, Integer> rdd1 = sc.parallelizePairs(Arrays.asList(
new Tuple2<>(1, 1),
new Tuple2<>(1, 2),
new Tuple2<>(2, 1),
new Tuple2<>(3, 1)
));
JavaPairRDD<Integer, Character> rdd2 = sc.parallelizePairs(Arrays.asList(
new Tuple2<>(1, 'x'),
new Tuple2<>(2, 'y'),
new Tuple2<>(2, 'z'),
new Tuple2<>(4, 'w')
));
List<Tuple2<Integer,Tuple2<Integer,Optional<Character>>>> joined =
rdd1.leftOuterJoin(rdd2).collect();
assertEquals(5, joined.size());
Tuple2<Integer,Tuple2<Integer,Optional<Character>>> firstUnmatched =
rdd1.leftOuterJoin(rdd2).filter(tup -> !tup._2()._2().isPresent()).first();
assertEquals(3, firstUnmatched._1().intValue());
}
@Test
public void foldReduce() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 1, 2, 3, 5, 8, 13));
Function2<Integer, Integer, Integer> add = (a, b) -> a + b;
int sum = rdd.fold(0, add);
assertEquals(33, sum);
sum = rdd.reduce(add);
assertEquals(33, sum);
}
@Test
public void treeReduce() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(-5, -4, -3, -2, -1, 1, 2, 3, 4), 10);
Function2<Integer, Integer, Integer> add = (a, b) -> a + b;
for (int depth = 1; depth <= 10; depth++) {
int sum = rdd.treeReduce(add, depth);
assertEquals(-5, sum);
}
}
@Test
public void treeAggregate() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(-5, -4, -3, -2, -1, 1, 2, 3, 4), 10);
Function2<Integer, Integer, Integer> add = (a, b) -> a + b;
for (int depth = 1; depth <= 10; depth++) {
int sum = rdd.treeAggregate(0, add, add, depth);
assertEquals(-5, sum);
}
}
@SuppressWarnings("unchecked")
@Test
public void aggregateByKey() {
JavaPairRDD<Integer, Integer> pairs = sc.parallelizePairs(
Arrays.asList(
new Tuple2<>(1, 1),
new Tuple2<>(1, 1),
new Tuple2<>(3, 2),
new Tuple2<>(5, 1),
new Tuple2<>(5, 3)), 2);
Map<Integer, HashSet<Integer>> sets = pairs.aggregateByKey(new HashSet<Integer>(),
(a, b) -> {
a.add(b);
return a;
},
(a, b) -> {
a.addAll(b);
return a;
}).collectAsMap();
assertEquals(3, sets.size());
assertEquals(new HashSet<>(Arrays.asList(1)), sets.get(1));
assertEquals(new HashSet<>(Arrays.asList(2)), sets.get(3));
assertEquals(new HashSet<>(Arrays.asList(1, 3)), sets.get(5));
}
@SuppressWarnings("unchecked")
@Test
public void foldByKey() {
List<Tuple2<Integer, Integer>> pairs = Arrays.asList(
new Tuple2<>(2, 1),
new Tuple2<>(2, 1),
new Tuple2<>(1, 1),
new Tuple2<>(3, 2),
new Tuple2<>(3, 1)
);
JavaPairRDD<Integer, Integer> rdd = sc.parallelizePairs(pairs);
JavaPairRDD<Integer, Integer> sums = rdd.foldByKey(0, (a, b) -> a + b);
assertEquals(1, sums.lookup(1).get(0).intValue());
assertEquals(2, sums.lookup(2).get(0).intValue());
assertEquals(3, sums.lookup(3).get(0).intValue());
}
@SuppressWarnings("unchecked")
@Test
public void reduceByKey() {
List<Tuple2<Integer, Integer>> pairs = Arrays.asList(
new Tuple2<>(2, 1),
new Tuple2<>(2, 1),
new Tuple2<>(1, 1),
new Tuple2<>(3, 2),
new Tuple2<>(3, 1)
);
JavaPairRDD<Integer, Integer> rdd = sc.parallelizePairs(pairs);
JavaPairRDD<Integer, Integer> counts = rdd.reduceByKey((a, b) -> a + b);
assertEquals(1, counts.lookup(1).get(0).intValue());
assertEquals(2, counts.lookup(2).get(0).intValue());
assertEquals(3, counts.lookup(3).get(0).intValue());
Map<Integer, Integer> localCounts = counts.collectAsMap();
assertEquals(1, localCounts.get(1).intValue());
assertEquals(2, localCounts.get(2).intValue());
assertEquals(3, localCounts.get(3).intValue());
localCounts = rdd.reduceByKeyLocally((a, b) -> a + b);
assertEquals(1, localCounts.get(1).intValue());
assertEquals(2, localCounts.get(2).intValue());
assertEquals(3, localCounts.get(3).intValue());
}
@Test
public void approximateResults() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 1, 2, 3, 5, 8, 13));
Map<Integer, Long> countsByValue = rdd.countByValue();
assertEquals(2, countsByValue.get(1).longValue());
assertEquals(1, countsByValue.get(13).longValue());
PartialResult<Map<Integer, BoundedDouble>> approx = rdd.countByValueApprox(1);
Map<Integer, BoundedDouble> finalValue = approx.getFinalValue();
assertEquals(2.0, finalValue.get(1).mean(), 0.01);
assertEquals(1.0, finalValue.get(13).mean(), 0.01);
}
@Test
public void take() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 1, 2, 3, 5, 8, 13));
assertEquals(1, rdd.first().intValue());
rdd.take(2);
rdd.takeSample(false, 2, 42);
}
@Test
public void isEmpty() {
assertTrue(sc.emptyRDD().isEmpty());
assertTrue(sc.parallelize(new ArrayList<Integer>()).isEmpty());
assertFalse(sc.parallelize(Arrays.asList(1)).isEmpty());
assertTrue(sc.parallelize(Arrays.asList(1, 2, 3), 3).filter(i -> i < 0).isEmpty());
assertFalse(sc.parallelize(Arrays.asList(1, 2, 3)).filter(i -> i > 1).isEmpty());
}
@Test
public void cartesian() {
JavaDoubleRDD doubleRDD = sc.parallelizeDoubles(Arrays.asList(1.0, 1.0, 2.0, 3.0, 5.0, 8.0));
JavaRDD<String> stringRDD = sc.parallelize(Arrays.asList("Hello", "World"));
JavaPairRDD<String, Double> cartesian = stringRDD.cartesian(doubleRDD);
assertEquals(new Tuple2<>("Hello", 1.0), cartesian.first());
}
@Test
public void javaDoubleRDD() {
JavaDoubleRDD rdd = sc.parallelizeDoubles(Arrays.asList(1.0, 1.0, 2.0, 3.0, 5.0, 8.0));
JavaDoubleRDD distinct = rdd.distinct();
assertEquals(5, distinct.count());
JavaDoubleRDD filter = rdd.filter(x -> x > 2.0);
assertEquals(3, filter.count());
JavaDoubleRDD union = rdd.union(rdd);
assertEquals(12, union.count());
union = union.cache();
assertEquals(12, union.count());
assertEquals(20, rdd.sum(), 0.01);
StatCounter stats = rdd.stats();
assertEquals(20, stats.sum(), 0.01);
assertEquals(20/6.0, rdd.mean(), 0.01);
assertEquals(20/6.0, rdd.mean(), 0.01);
assertEquals(6.22222, rdd.variance(), 0.01);
assertEquals(rdd.variance(), rdd.popVariance(), 1e-14);
assertEquals(7.46667, rdd.sampleVariance(), 0.01);
assertEquals(2.49444, rdd.stdev(), 0.01);
assertEquals(rdd.stdev(), rdd.popStdev(), 1e-14);
assertEquals(2.73252, rdd.sampleStdev(), 0.01);
rdd.first();
rdd.take(5);
}
@Test
public void javaDoubleRDDHistoGram() {
JavaDoubleRDD rdd = sc.parallelizeDoubles(Arrays.asList(1.0, 2.0, 3.0, 4.0));
// Test using generated buckets
Tuple2<double[], long[]> results = rdd.histogram(2);
double[] expected_buckets = {1.0, 2.5, 4.0};
long[] expected_counts = {2, 2};
assertArrayEquals(expected_buckets, results._1(), 0.1);
assertArrayEquals(expected_counts, results._2());
// Test with provided buckets
long[] histogram = rdd.histogram(expected_buckets);
assertArrayEquals(expected_counts, histogram);
// SPARK-5744
assertArrayEquals(
new long[] {0},
sc.parallelizeDoubles(new ArrayList<>(0), 1).histogram(new double[]{0.0, 1.0}));
}
private static class DoubleComparator implements Comparator<Double>, Serializable {
@Override
public int compare(Double o1, Double o2) {
return o1.compareTo(o2);
}
}
@Test
public void max() {
JavaDoubleRDD rdd = sc.parallelizeDoubles(Arrays.asList(1.0, 2.0, 3.0, 4.0));
double max = rdd.max(new DoubleComparator());
assertEquals(4.0, max, 0.001);
}
@Test
public void min() {
JavaDoubleRDD rdd = sc.parallelizeDoubles(Arrays.asList(1.0, 2.0, 3.0, 4.0));
double max = rdd.min(new DoubleComparator());
assertEquals(1.0, max, 0.001);
}
@Test
public void naturalMax() {
JavaDoubleRDD rdd = sc.parallelizeDoubles(Arrays.asList(1.0, 2.0, 3.0, 4.0));
double max = rdd.max();
assertEquals(4.0, max, 0.0);
}
@Test
public void naturalMin() {
JavaDoubleRDD rdd = sc.parallelizeDoubles(Arrays.asList(1.0, 2.0, 3.0, 4.0));
double max = rdd.min();
assertEquals(1.0, max, 0.0);
}
@Test
public void takeOrdered() {
JavaDoubleRDD rdd = sc.parallelizeDoubles(Arrays.asList(1.0, 2.0, 3.0, 4.0));
assertEquals(Arrays.asList(1.0, 2.0), rdd.takeOrdered(2, new DoubleComparator()));
assertEquals(Arrays.asList(1.0, 2.0), rdd.takeOrdered(2));
}
@Test
public void top() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4));
List<Integer> top2 = rdd.top(2);
assertEquals(Arrays.asList(4, 3), top2);
}
private static class AddInts implements Function2<Integer, Integer, Integer> {
@Override
public Integer call(Integer a, Integer b) {
return a + b;
}
}
@Test
public void reduce() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4));
int sum = rdd.reduce(new AddInts());
assertEquals(10, sum);
}
@Test
public void reduceOnJavaDoubleRDD() {
JavaDoubleRDD rdd = sc.parallelizeDoubles(Arrays.asList(1.0, 2.0, 3.0, 4.0));
double sum = rdd.reduce((v1, v2) -> v1 + v2);
assertEquals(10.0, sum, 0.001);
}
@Test
public void fold() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4));
int sum = rdd.fold(0, new AddInts());
assertEquals(10, sum);
}
@Test
public void aggregate() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4));
int sum = rdd.aggregate(0, new AddInts(), new AddInts());
assertEquals(10, sum);
}
@Test
public void map() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5));
JavaDoubleRDD doubles = rdd.mapToDouble(Integer::doubleValue).cache();
doubles.collect();
JavaPairRDD<Integer, Integer> pairs = rdd.mapToPair(x -> new Tuple2<>(x, x)).cache();
pairs.collect();
JavaRDD<String> strings = rdd.map(Object::toString).cache();
strings.collect();
}
@Test
public void flatMap() {
JavaRDD<String> rdd = sc.parallelize(Arrays.asList("Hello World!",
"The quick brown fox jumps over the lazy dog."));
JavaRDD<String> words = rdd.flatMap(x -> Arrays.asList(x.split(" ")).iterator());
assertEquals("Hello", words.first());
assertEquals(11, words.count());
JavaPairRDD<String, String> pairsRDD = rdd.flatMapToPair(s -> {
List<Tuple2<String, String>> pairs = new LinkedList<>();
for (String word : s.split(" ")) {
pairs.add(new Tuple2<>(word, word));
}
return pairs.iterator();
}
);
assertEquals(new Tuple2<>("Hello", "Hello"), pairsRDD.first());
assertEquals(11, pairsRDD.count());
JavaDoubleRDD doubles = rdd.flatMapToDouble(s -> {
List<Double> lengths = new LinkedList<>();
for (String word : s.split(" ")) {
lengths.add((double) word.length());
}
return lengths.iterator();
});
assertEquals(5.0, doubles.first(), 0.01);
assertEquals(11, pairsRDD.count());
}
@SuppressWarnings("unchecked")
@Test
public void mapsFromPairsToPairs() {
List<Tuple2<Integer, String>> pairs = Arrays.asList(
new Tuple2<>(1, "a"),
new Tuple2<>(2, "aa"),
new Tuple2<>(3, "aaa")
);
JavaPairRDD<Integer, String> pairRDD = sc.parallelizePairs(pairs);
// Regression test for SPARK-668:
JavaPairRDD<String, Integer> swapped = pairRDD.flatMapToPair(
item -> Collections.singletonList(item.swap()).iterator());
swapped.collect();
// There was never a bug here, but it's worth testing:
pairRDD.mapToPair(Tuple2::swap).collect();
}
@Test
public void mapPartitions() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4), 2);
JavaRDD<Integer> partitionSums = rdd.mapPartitions(iter -> {
int sum = 0;
while (iter.hasNext()) {
sum += iter.next();
}
return Collections.singletonList(sum).iterator();
});
assertEquals("[3, 7]", partitionSums.collect().toString());
}
@Test
public void mapPartitionsWithIndex() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4), 2);
JavaRDD<Integer> partitionSums = rdd.mapPartitionsWithIndex((index, iter) -> {
int sum = 0;
while (iter.hasNext()) {
sum += iter.next();
}
return Collections.singletonList(sum).iterator();
}, false);
assertEquals("[3, 7]", partitionSums.collect().toString());
}
@Test
public void getNumPartitions(){
JavaRDD<Integer> rdd1 = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8), 3);
JavaDoubleRDD rdd2 = sc.parallelizeDoubles(Arrays.asList(1.0, 2.0, 3.0, 4.0), 2);
JavaPairRDD<String, Integer> rdd3 = sc.parallelizePairs(
Arrays.asList(
new Tuple2<>("a", 1),
new Tuple2<>("aa", 2),
new Tuple2<>("aaa", 3)
),
2);
assertEquals(3, rdd1.getNumPartitions());
assertEquals(2, rdd2.getNumPartitions());
assertEquals(2, rdd3.getNumPartitions());
}
@Test
public void repartition() {
// Shrinking number of partitions
JavaRDD<Integer> in1 = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8), 2);
JavaRDD<Integer> repartitioned1 = in1.repartition(4);
List<List<Integer>> result1 = repartitioned1.glom().collect();
assertEquals(4, result1.size());
for (List<Integer> l : result1) {
assertFalse(l.isEmpty());
}
// Growing number of partitions
JavaRDD<Integer> in2 = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8), 4);
JavaRDD<Integer> repartitioned2 = in2.repartition(2);
List<List<Integer>> result2 = repartitioned2.glom().collect();
assertEquals(2, result2.size());
for (List<Integer> l: result2) {
assertFalse(l.isEmpty());
}
}
@SuppressWarnings("unchecked")
@Test
public void persist() {
JavaDoubleRDD doubleRDD = sc.parallelizeDoubles(Arrays.asList(1.0, 1.0, 2.0, 3.0, 5.0, 8.0));
doubleRDD = doubleRDD.persist(StorageLevel.DISK_ONLY());
assertEquals(20, doubleRDD.sum(), 0.1);
List<Tuple2<Integer, String>> pairs = Arrays.asList(
new Tuple2<>(1, "a"),
new Tuple2<>(2, "aa"),
new Tuple2<>(3, "aaa")
);
JavaPairRDD<Integer, String> pairRDD = sc.parallelizePairs(pairs);
pairRDD = pairRDD.persist(StorageLevel.DISK_ONLY());
assertEquals("a", pairRDD.first()._2());
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5));
rdd = rdd.persist(StorageLevel.DISK_ONLY());
assertEquals(1, rdd.first().intValue());
}
@Test
public void iterator() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5), 2);
TaskContext context = TaskContext$.MODULE$.empty();
assertEquals(1, rdd.iterator(rdd.partitions().get(0), context).next().intValue());
}
@Test
public void glom() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4), 2);
assertEquals("[1, 2]", rdd.glom().first().toString());
}
// File input / output tests are largely adapted from FileSuite:
@Test
public void textFiles() throws IOException {
String outputDir = new File(tempDir, "output").getAbsolutePath();
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4));
rdd.saveAsTextFile(outputDir);
// Read the plain text file and check it's OK
File outputFile = new File(outputDir, "part-00000");
String content = Files.toString(outputFile, StandardCharsets.UTF_8);
assertEquals("1\n2\n3\n4\n", content);
// Also try reading it in as a text file RDD
List<String> expected = Arrays.asList("1", "2", "3", "4");
JavaRDD<String> readRDD = sc.textFile(outputDir);
assertEquals(expected, readRDD.collect());
}
@Test
public void wholeTextFiles() throws Exception {
byte[] content1 = "spark is easy to use.\n".getBytes(StandardCharsets.UTF_8);
byte[] content2 = "spark is also easy to use.\n".getBytes(StandardCharsets.UTF_8);
String tempDirName = tempDir.getAbsolutePath();
String path1 = new Path(tempDirName, "part-00000").toUri().getPath();
String path2 = new Path(tempDirName, "part-00001").toUri().getPath();
Files.write(content1, new File(path1));
Files.write(content2, new File(path2));
Map<String, String> container = new HashMap<>();
container.put(path1, new Text(content1).toString());
container.put(path2, new Text(content2).toString());
JavaPairRDD<String, String> readRDD = sc.wholeTextFiles(tempDirName, 3);
List<Tuple2<String, String>> result = readRDD.collect();
for (Tuple2<String, String> res : result) {
// Note that the paths from `wholeTextFiles` are in URI format on Windows,
// for example, file:/C:/a/b/c.
assertEquals(res._2(), container.get(new Path(res._1()).toUri().getPath()));
}
}
@Test
public void textFilesCompressed() throws IOException {
String outputDir = new File(tempDir, "output").getAbsolutePath();
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4));
rdd.saveAsTextFile(outputDir, DefaultCodec.class);
// Try reading it in as a text file RDD
List<String> expected = Arrays.asList("1", "2", "3", "4");
JavaRDD<String> readRDD = sc.textFile(outputDir);
assertEquals(expected, readRDD.collect());
}
@SuppressWarnings("unchecked")
@Test
public void sequenceFile() {
String outputDir = new File(tempDir, "output").getAbsolutePath();
List<Tuple2<Integer, String>> pairs = Arrays.asList(
new Tuple2<>(1, "a"),
new Tuple2<>(2, "aa"),
new Tuple2<>(3, "aaa")
);
JavaPairRDD<Integer, String> rdd = sc.parallelizePairs(pairs);
rdd.mapToPair(pair -> new Tuple2<>(new IntWritable(pair._1()), new Text(pair._2())))
.saveAsHadoopFile(outputDir, IntWritable.class, Text.class, SequenceFileOutputFormat.class);
// Try reading the output back as an object file
JavaPairRDD<Integer, String> readRDD = sc.sequenceFile(outputDir, IntWritable.class,
Text.class).mapToPair(pair -> new Tuple2<>(pair._1().get(), pair._2().toString()));
assertEquals(pairs, readRDD.collect());
}
@Test
public void binaryFiles() throws Exception {
// Reusing the wholeText files example
byte[] content1 = "spark is easy to use.\n".getBytes(StandardCharsets.UTF_8);
String tempDirName = tempDir.getAbsolutePath();
File file1 = new File(tempDirName + "/part-00000");
FileOutputStream fos1 = new FileOutputStream(file1);
FileChannel channel1 = fos1.getChannel();
ByteBuffer bbuf = ByteBuffer.wrap(content1);
channel1.write(bbuf);
channel1.close();
JavaPairRDD<String, PortableDataStream> readRDD = sc.binaryFiles(tempDirName, 3);
List<Tuple2<String, PortableDataStream>> result = readRDD.collect();
for (Tuple2<String, PortableDataStream> res : result) {
assertArrayEquals(content1, res._2().toArray());
}
}
@Test
public void binaryFilesCaching() throws Exception {
// Reusing the wholeText files example
byte[] content1 = "spark is easy to use.\n".getBytes(StandardCharsets.UTF_8);
String tempDirName = tempDir.getAbsolutePath();
File file1 = new File(tempDirName + "/part-00000");
FileOutputStream fos1 = new FileOutputStream(file1);
FileChannel channel1 = fos1.getChannel();
ByteBuffer bbuf = ByteBuffer.wrap(content1);
channel1.write(bbuf);
channel1.close();
JavaPairRDD<String, PortableDataStream> readRDD = sc.binaryFiles(tempDirName).cache();
readRDD.foreach(pair -> pair._2().toArray()); // force the file to read
List<Tuple2<String, PortableDataStream>> result = readRDD.collect();
for (Tuple2<String, PortableDataStream> res : result) {
assertArrayEquals(content1, res._2().toArray());
}
}
@Test
public void binaryRecords() throws Exception {
// Reusing the wholeText files example
byte[] content1 = "spark isn't always easy to use.\n".getBytes(StandardCharsets.UTF_8);
int numOfCopies = 10;
String tempDirName = tempDir.getAbsolutePath();
File file1 = new File(tempDirName + "/part-00000");
FileOutputStream fos1 = new FileOutputStream(file1);
FileChannel channel1 = fos1.getChannel();
for (int i = 0; i < numOfCopies; i++) {
ByteBuffer bbuf = ByteBuffer.wrap(content1);
channel1.write(bbuf);
}
channel1.close();
JavaRDD<byte[]> readRDD = sc.binaryRecords(tempDirName, content1.length);
assertEquals(numOfCopies,readRDD.count());
List<byte[]> result = readRDD.collect();
for (byte[] res : result) {
assertArrayEquals(content1, res);
}
}
@SuppressWarnings("unchecked")
@Test
public void writeWithNewAPIHadoopFile() {
String outputDir = new File(tempDir, "output").getAbsolutePath();
List<Tuple2<Integer, String>> pairs = Arrays.asList(
new Tuple2<>(1, "a"),
new Tuple2<>(2, "aa"),
new Tuple2<>(3, "aaa")
);
JavaPairRDD<Integer, String> rdd = sc.parallelizePairs(pairs);
rdd.mapToPair(pair -> new Tuple2<>(new IntWritable(pair._1()), new Text(pair._2())))
.saveAsNewAPIHadoopFile(outputDir, IntWritable.class, Text.class,
org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat.class);
JavaPairRDD<IntWritable, Text> output =
sc.sequenceFile(outputDir, IntWritable.class, Text.class);
assertEquals(pairs.toString(), output.map(Tuple2::toString).collect().toString());
}
@SuppressWarnings("unchecked")
@Test
public void readWithNewAPIHadoopFile() throws IOException {
String outputDir = new File(tempDir, "output").getAbsolutePath();
List<Tuple2<Integer, String>> pairs = Arrays.asList(
new Tuple2<>(1, "a"),
new Tuple2<>(2, "aa"),
new Tuple2<>(3, "aaa")
);
JavaPairRDD<Integer, String> rdd = sc.parallelizePairs(pairs);
rdd.mapToPair(pair -> new Tuple2<>(new IntWritable(pair._1()), new Text(pair._2())))
.saveAsHadoopFile(outputDir, IntWritable.class, Text.class, SequenceFileOutputFormat.class);
JavaPairRDD<IntWritable, Text> output = sc.newAPIHadoopFile(outputDir,
org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat.class,
IntWritable.class, Text.class, Job.getInstance().getConfiguration());
assertEquals(pairs.toString(), output.map(Tuple2::toString).collect().toString());
}
@Test
public void objectFilesOfInts() {
String outputDir = new File(tempDir, "output").getAbsolutePath();
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4));
rdd.saveAsObjectFile(outputDir);
// Try reading the output back as an object file
List<Integer> expected = Arrays.asList(1, 2, 3, 4);
JavaRDD<Integer> readRDD = sc.objectFile(outputDir);
assertEquals(expected, readRDD.collect());
}
@SuppressWarnings("unchecked")
@Test
public void objectFilesOfComplexTypes() {
String outputDir = new File(tempDir, "output").getAbsolutePath();
List<Tuple2<Integer, String>> pairs = Arrays.asList(
new Tuple2<>(1, "a"),
new Tuple2<>(2, "aa"),
new Tuple2<>(3, "aaa")
);
JavaPairRDD<Integer, String> rdd = sc.parallelizePairs(pairs);
rdd.saveAsObjectFile(outputDir);
// Try reading the output back as an object file
JavaRDD<Tuple2<Integer, String>> readRDD = sc.objectFile(outputDir);
assertEquals(pairs, readRDD.collect());
}
@SuppressWarnings("unchecked")
@Test
public void hadoopFile() {
String outputDir = new File(tempDir, "output").getAbsolutePath();
List<Tuple2<Integer, String>> pairs = Arrays.asList(
new Tuple2<>(1, "a"),
new Tuple2<>(2, "aa"),
new Tuple2<>(3, "aaa")
);
JavaPairRDD<Integer, String> rdd = sc.parallelizePairs(pairs);
rdd.mapToPair(pair -> new Tuple2<>(new IntWritable(pair._1()), new Text(pair._2())))
.saveAsHadoopFile(outputDir, IntWritable.class, Text.class, SequenceFileOutputFormat.class);
JavaPairRDD<IntWritable, Text> output = sc.hadoopFile(outputDir,
SequenceFileInputFormat.class, IntWritable.class, Text.class);
assertEquals(pairs.toString(), output.map(Tuple2::toString).collect().toString());
}
@SuppressWarnings("unchecked")
@Test
public void hadoopFileCompressed() {
String outputDir = new File(tempDir, "output_compressed").getAbsolutePath();
List<Tuple2<Integer, String>> pairs = Arrays.asList(
new Tuple2<>(1, "a"),
new Tuple2<>(2, "aa"),
new Tuple2<>(3, "aaa")
);
JavaPairRDD<Integer, String> rdd = sc.parallelizePairs(pairs);
rdd.mapToPair(pair -> new Tuple2<>(new IntWritable(pair._1()), new Text(pair._2())))
.saveAsHadoopFile(outputDir, IntWritable.class, Text.class,
SequenceFileOutputFormat.class, DefaultCodec.class);
JavaPairRDD<IntWritable, Text> output = sc.hadoopFile(outputDir,
SequenceFileInputFormat.class, IntWritable.class, Text.class);
assertEquals(pairs.toString(), output.map(Tuple2::toString).collect().toString());
}
@Test
public void zip() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5));
JavaDoubleRDD doubles = rdd.mapToDouble(Integer::doubleValue);
JavaPairRDD<Integer, Double> zipped = rdd.zip(doubles);
zipped.count();
}
@Test
public void zipPartitions() {
JavaRDD<Integer> rdd1 = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6), 2);
JavaRDD<String> rdd2 = sc.parallelize(Arrays.asList("1", "2", "3", "4"), 2);
FlatMapFunction2<Iterator<Integer>, Iterator<String>, Integer> sizesFn =
(i, s) -> Arrays.asList(Iterators.size(i), Iterators.size(s)).iterator();
JavaRDD<Integer> sizes = rdd1.zipPartitions(rdd2, sizesFn);
assertEquals("[3, 2, 3, 2]", sizes.collect().toString());
}
@SuppressWarnings("deprecation")
@Test
public void accumulators() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5));
Accumulator<Integer> intAccum = sc.intAccumulator(10);
rdd.foreach(intAccum::add);
assertEquals((Integer) 25, intAccum.value());
Accumulator<Double> doubleAccum = sc.doubleAccumulator(10.0);
rdd.foreach(x -> doubleAccum.add((double) x));
assertEquals((Double) 25.0, doubleAccum.value());
// Try a custom accumulator type
AccumulatorParam<Float> floatAccumulatorParam = new AccumulatorParam<Float>() {
@Override
public Float addInPlace(Float r, Float t) {
return r + t;
}
@Override
public Float addAccumulator(Float r, Float t) {
return r + t;
}
@Override
public Float zero(Float initialValue) {
return 0.0f;
}
};
Accumulator<Float> floatAccum = sc.accumulator(10.0f, floatAccumulatorParam);
rdd.foreach(x -> floatAccum.add((float) x));
assertEquals((Float) 25.0f, floatAccum.value());
// Test the setValue method
floatAccum.setValue(5.0f);
assertEquals((Float) 5.0f, floatAccum.value());
}
@Test
public void keyBy() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2));
List<Tuple2<String, Integer>> s = rdd.keyBy(Object::toString).collect();
assertEquals(new Tuple2<>("1", 1), s.get(0));
assertEquals(new Tuple2<>("2", 2), s.get(1));
}
@Test
public void checkpointAndComputation() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5));
sc.setCheckpointDir(tempDir.getAbsolutePath());
assertFalse(rdd.isCheckpointed());
rdd.checkpoint();
rdd.count(); // Forces the DAG to cause a checkpoint
assertTrue(rdd.isCheckpointed());
assertEquals(Arrays.asList(1, 2, 3, 4, 5), rdd.collect());
}
@Test
public void checkpointAndRestore() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5));
sc.setCheckpointDir(tempDir.getAbsolutePath());
assertFalse(rdd.isCheckpointed());
rdd.checkpoint();
rdd.count(); // Forces the DAG to cause a checkpoint
assertTrue(rdd.isCheckpointed());
assertTrue(rdd.getCheckpointFile().isPresent());
JavaRDD<Integer> recovered = sc.checkpointFile(rdd.getCheckpointFile().get());
assertEquals(Arrays.asList(1, 2, 3, 4, 5), recovered.collect());
}
@Test
public void combineByKey() {
JavaRDD<Integer> originalRDD = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6));
Function<Integer, Integer> keyFunction = v1 -> v1 % 3;
Function<Integer, Integer> createCombinerFunction = v1 -> v1;
Function2<Integer, Integer, Integer> mergeValueFunction = (v1, v2) -> v1 + v2;
JavaPairRDD<Integer, Integer> combinedRDD = originalRDD.keyBy(keyFunction)
.combineByKey(createCombinerFunction, mergeValueFunction, mergeValueFunction);
Map<Integer, Integer> results = combinedRDD.collectAsMap();
ImmutableMap<Integer, Integer> expected = ImmutableMap.of(0, 9, 1, 5, 2, 7);
assertEquals(expected, results);
Partitioner defaultPartitioner = Partitioner.defaultPartitioner(
combinedRDD.rdd(),
JavaConverters.collectionAsScalaIterableConverter(
Collections.<RDD<?>>emptyList()).asScala().toSeq());
combinedRDD = originalRDD.keyBy(keyFunction)
.combineByKey(
createCombinerFunction,
mergeValueFunction,
mergeValueFunction,
defaultPartitioner,
false,
new KryoSerializer(new SparkConf()));
results = combinedRDD.collectAsMap();
assertEquals(expected, results);
}
@SuppressWarnings("unchecked")
@Test
public void mapOnPairRDD() {
JavaRDD<Integer> rdd1 = sc.parallelize(Arrays.asList(1,2,3,4));
JavaPairRDD<Integer, Integer> rdd2 = rdd1.mapToPair(i -> new Tuple2<>(i, i % 2));
JavaPairRDD<Integer, Integer> rdd3 = rdd2.mapToPair(in -> new Tuple2<>(in._2(), in._1()));
assertEquals(Arrays.asList(
new Tuple2<>(1, 1),
new Tuple2<>(0, 2),
new Tuple2<>(1, 3),
new Tuple2<>(0, 4)), rdd3.collect());
}
@SuppressWarnings("unchecked")
@Test
public void collectPartitions() {
JavaRDD<Integer> rdd1 = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6, 7), 3);
JavaPairRDD<Integer, Integer> rdd2 = rdd1.mapToPair(i -> new Tuple2<>(i, i % 2));
List<Integer>[] parts = rdd1.collectPartitions(new int[] {0});
assertEquals(Arrays.asList(1, 2), parts[0]);
parts = rdd1.collectPartitions(new int[] {1, 2});
assertEquals(Arrays.asList(3, 4), parts[0]);
assertEquals(Arrays.asList(5, 6, 7), parts[1]);
assertEquals(
Arrays.asList(new Tuple2<>(1, 1), new Tuple2<>(2, 0)),
rdd2.collectPartitions(new int[] {0})[0]);
List<Tuple2<Integer,Integer>>[] parts2 = rdd2.collectPartitions(new int[] {1, 2});
assertEquals(Arrays.asList(new Tuple2<>(3, 1), new Tuple2<>(4, 0)), parts2[0]);
assertEquals(
Arrays.asList(
new Tuple2<>(5, 1),
new Tuple2<>(6, 0),
new Tuple2<>(7, 1)),
parts2[1]);
}
@Test
public void countApproxDistinct() {
List<Integer> arrayData = new ArrayList<>();
int size = 100;
for (int i = 0; i < 100000; i++) {
arrayData.add(i % size);
}
JavaRDD<Integer> simpleRdd = sc.parallelize(arrayData, 10);
assertTrue(Math.abs((simpleRdd.countApproxDistinct(0.05) - size) / (size * 1.0)) <= 0.1);
}
@Test
public void countApproxDistinctByKey() {
List<Tuple2<Integer, Integer>> arrayData = new ArrayList<>();
for (int i = 10; i < 100; i++) {
for (int j = 0; j < i; j++) {
arrayData.add(new Tuple2<>(i, j));
}
}
double relativeSD = 0.001;
JavaPairRDD<Integer, Integer> pairRdd = sc.parallelizePairs(arrayData);
List<Tuple2<Integer, Long>> res = pairRdd.countApproxDistinctByKey(relativeSD, 8).collect();
for (Tuple2<Integer, Long> resItem : res) {
double count = resItem._1();
long resCount = resItem._2();
double error = Math.abs((resCount - count) / count);
assertTrue(error < 0.1);
}
}
@Test
public void collectAsMapWithIntArrayValues() {
// Regression test for SPARK-1040
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1));
JavaPairRDD<Integer, int[]> pairRDD = rdd.mapToPair(x -> new Tuple2<>(x, new int[]{x}));
pairRDD.collect(); // Works fine
pairRDD.collectAsMap(); // Used to crash with ClassCastException
}
@SuppressWarnings("unchecked")
@Test
public void collectAsMapAndSerialize() throws Exception {
JavaPairRDD<String,Integer> rdd =
sc.parallelizePairs(Arrays.asList(new Tuple2<>("foo", 1)));
Map<String,Integer> map = rdd.collectAsMap();
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
new ObjectOutputStream(bytes).writeObject(map);
Map<String,Integer> deserializedMap = (Map<String,Integer>)
new ObjectInputStream(new ByteArrayInputStream(bytes.toByteArray())).readObject();
assertEquals(1, deserializedMap.get("foo").intValue());
}
@Test
@SuppressWarnings("unchecked")
public void sampleByKey() {
JavaRDD<Integer> rdd1 = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8), 3);
JavaPairRDD<Integer, Integer> rdd2 = rdd1.mapToPair(i -> new Tuple2<>(i % 2, 1));
Map<Integer, Double> fractions = new HashMap<>();
fractions.put(0, 0.5);
fractions.put(1, 1.0);
JavaPairRDD<Integer, Integer> wr = rdd2.sampleByKey(true, fractions, 1L);
Map<Integer, Long> wrCounts = wr.countByKey();
assertEquals(2, wrCounts.size());
assertTrue(wrCounts.get(0) > 0);
assertTrue(wrCounts.get(1) > 0);
JavaPairRDD<Integer, Integer> wor = rdd2.sampleByKey(false, fractions, 1L);
Map<Integer, Long> worCounts = wor.countByKey();
assertEquals(2, worCounts.size());
assertTrue(worCounts.get(0) > 0);
assertTrue(worCounts.get(1) > 0);
}
@Test
@SuppressWarnings("unchecked")
public void sampleByKeyExact() {
JavaRDD<Integer> rdd1 = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8), 3);
JavaPairRDD<Integer, Integer> rdd2 = rdd1.mapToPair(i -> new Tuple2<>(i % 2, 1));
Map<Integer, Double> fractions = new HashMap<>();
fractions.put(0, 0.5);
fractions.put(1, 1.0);
JavaPairRDD<Integer, Integer> wrExact = rdd2.sampleByKeyExact(true, fractions, 1L);
Map<Integer, Long> wrExactCounts = wrExact.countByKey();
assertEquals(2, wrExactCounts.size());
assertTrue(wrExactCounts.get(0) == 2);
assertTrue(wrExactCounts.get(1) == 4);
JavaPairRDD<Integer, Integer> worExact = rdd2.sampleByKeyExact(false, fractions, 1L);
Map<Integer, Long> worExactCounts = worExact.countByKey();
assertEquals(2, worExactCounts.size());
assertTrue(worExactCounts.get(0) == 2);
assertTrue(worExactCounts.get(1) == 4);
}
private static class SomeCustomClass implements Serializable {
SomeCustomClass() {
// Intentionally left blank
}
}
@Test
public void collectUnderlyingScalaRDD() {
List<SomeCustomClass> data = new ArrayList<>();
for (int i = 0; i < 100; i++) {
data.add(new SomeCustomClass());
}
JavaRDD<SomeCustomClass> rdd = sc.parallelize(data);
SomeCustomClass[] collected =
(SomeCustomClass[]) rdd.rdd().retag(SomeCustomClass.class).collect();
assertEquals(data.size(), collected.length);
}
private static final class BuggyMapFunction<T> implements Function<T, T> {
@Override
public T call(T x) {
throw new IllegalStateException("Custom exception!");
}
}
@Test
public void collectAsync() throws Exception {
List<Integer> data = Arrays.asList(1, 2, 3, 4, 5);
JavaRDD<Integer> rdd = sc.parallelize(data, 1);
JavaFutureAction<List<Integer>> future = rdd.collectAsync();
List<Integer> result = future.get();
assertEquals(data, result);
assertFalse(future.isCancelled());
assertTrue(future.isDone());
assertEquals(1, future.jobIds().size());
}
@Test
public void takeAsync() throws Exception {
List<Integer> data = Arrays.asList(1, 2, 3, 4, 5);
JavaRDD<Integer> rdd = sc.parallelize(data, 1);
JavaFutureAction<List<Integer>> future = rdd.takeAsync(1);
List<Integer> result = future.get();
assertEquals(1, result.size());
assertEquals((Integer) 1, result.get(0));
assertFalse(future.isCancelled());
assertTrue(future.isDone());
assertEquals(1, future.jobIds().size());
}
@Test
public void foreachAsync() throws Exception {
List<Integer> data = Arrays.asList(1, 2, 3, 4, 5);
JavaRDD<Integer> rdd = sc.parallelize(data, 1);
JavaFutureAction<Void> future = rdd.foreachAsync(integer -> {});
future.get();
assertFalse(future.isCancelled());
assertTrue(future.isDone());
assertEquals(1, future.jobIds().size());
}
@Test
public void countAsync() throws Exception {
List<Integer> data = Arrays.asList(1, 2, 3, 4, 5);
JavaRDD<Integer> rdd = sc.parallelize(data, 1);
JavaFutureAction<Long> future = rdd.countAsync();
long count = future.get();
assertEquals(data.size(), count);
assertFalse(future.isCancelled());
assertTrue(future.isDone());
assertEquals(1, future.jobIds().size());
}
@Test
public void testAsyncActionCancellation() throws Exception {
List<Integer> data = Arrays.asList(1, 2, 3, 4, 5);
JavaRDD<Integer> rdd = sc.parallelize(data, 1);
JavaFutureAction<Void> future = rdd.foreachAsync(integer -> {
Thread.sleep(10000); // To ensure that the job won't finish before it's cancelled.
});
future.cancel(true);
assertTrue(future.isCancelled());
assertTrue(future.isDone());
try {
future.get(2000, TimeUnit.MILLISECONDS);
fail("Expected future.get() for cancelled job to throw CancellationException");
} catch (CancellationException ignored) {
// pass
}
}
@Test
public void testAsyncActionErrorWrapping() throws Exception {
List<Integer> data = Arrays.asList(1, 2, 3, 4, 5);
JavaRDD<Integer> rdd = sc.parallelize(data, 1);
JavaFutureAction<Long> future = rdd.map(new BuggyMapFunction<>()).countAsync();
try {
future.get(2, TimeUnit.SECONDS);
fail("Expected future.get() for failed job to throw ExcecutionException");
} catch (ExecutionException ee) {
assertTrue(Throwables.getStackTraceAsString(ee).contains("Custom exception!"));
}
assertTrue(future.isDone());
}
static class Class1 {}
static class Class2 {}
@Test
public void testRegisterKryoClasses() {
SparkConf conf = new SparkConf();
conf.registerKryoClasses(new Class<?>[]{ Class1.class, Class2.class });
assertEquals(
Class1.class.getName() + "," + Class2.class.getName(),
conf.get("spark.kryo.classesToRegister"));
}
@Test
public void testGetPersistentRDDs() {
java.util.Map<Integer, JavaRDD<?>> cachedRddsMap = sc.getPersistentRDDs();
assertTrue(cachedRddsMap.isEmpty());
JavaRDD<String> rdd1 = sc.parallelize(Arrays.asList("a", "b")).setName("RDD1").cache();
JavaRDD<String> rdd2 = sc.parallelize(Arrays.asList("c", "d")).setName("RDD2").cache();
cachedRddsMap = sc.getPersistentRDDs();
assertEquals(2, cachedRddsMap.size());
assertEquals("RDD1", cachedRddsMap.get(0).name());
assertEquals("RDD2", cachedRddsMap.get(1).name());
}
}
| 9,583 |
0 | Create_ds/spark/core/src/test/java/test/org/apache | Create_ds/spark/core/src/test/java/test/org/apache/spark/JavaTaskContextCompileCheck.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package test.org.apache.spark;
import org.apache.spark.TaskContext;
import org.apache.spark.util.TaskCompletionListener;
import org.apache.spark.util.TaskFailureListener;
/**
* Something to make sure that TaskContext can be used in Java.
*/
public class JavaTaskContextCompileCheck {
public static void test() {
TaskContext tc = TaskContext.get();
tc.isCompleted();
tc.isInterrupted();
tc.addTaskCompletionListener(new JavaTaskCompletionListenerImpl());
tc.addTaskFailureListener(new JavaTaskFailureListenerImpl());
tc.attemptNumber();
tc.partitionId();
tc.stageId();
tc.stageAttemptNumber();
tc.taskAttemptId();
}
/**
* A simple implementation of TaskCompletionListener that makes sure TaskCompletionListener and
* TaskContext is Java friendly.
*/
static class JavaTaskCompletionListenerImpl implements TaskCompletionListener {
@Override
public void onTaskCompletion(TaskContext context) {
context.isCompleted();
context.isInterrupted();
context.stageId();
context.stageAttemptNumber();
context.partitionId();
context.addTaskCompletionListener(this);
}
}
/**
* A simple implementation of TaskCompletionListener that makes sure TaskCompletionListener and
* TaskContext is Java friendly.
*/
static class JavaTaskFailureListenerImpl implements TaskFailureListener {
@Override
public void onTaskFailure(TaskContext context, Throwable error) {
}
}
}
| 9,584 |
0 | Create_ds/spark/core/src/test/java/org/apache | Create_ds/spark/core/src/test/java/org/apache/spark/JavaJdbcRDDSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark;
import java.io.Serializable;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.sql.Statement;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.rdd.JdbcRDD;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class JavaJdbcRDDSuite implements Serializable {
private transient JavaSparkContext sc;
@Before
public void setUp() throws ClassNotFoundException, SQLException {
sc = new JavaSparkContext("local", "JavaAPISuite");
Class.forName("org.apache.derby.jdbc.EmbeddedDriver");
Connection connection =
DriverManager.getConnection("jdbc:derby:target/JavaJdbcRDDSuiteDb;create=true");
try {
Statement create = connection.createStatement();
create.execute(
"CREATE TABLE FOO(" +
"ID INTEGER NOT NULL GENERATED ALWAYS AS IDENTITY (START WITH 1, INCREMENT BY 1)," +
"DATA INTEGER)");
create.close();
PreparedStatement insert = connection.prepareStatement("INSERT INTO FOO(DATA) VALUES(?)");
for (int i = 1; i <= 100; i++) {
insert.setInt(1, i * 2);
insert.executeUpdate();
}
insert.close();
} catch (SQLException e) {
// If table doesn't exist...
if (e.getSQLState().compareTo("X0Y32") != 0) {
throw e;
}
} finally {
connection.close();
}
}
@After
public void tearDown() throws SQLException {
try {
DriverManager.getConnection("jdbc:derby:target/JavaJdbcRDDSuiteDb;shutdown=true");
} catch(SQLException e) {
// Throw if not normal single database shutdown
// https://db.apache.org/derby/docs/10.2/ref/rrefexcept71493.html
if (e.getSQLState().compareTo("08006") != 0) {
throw e;
}
}
sc.stop();
sc = null;
}
@Test
public void testJavaJdbcRDD() throws Exception {
JavaRDD<Integer> rdd = JdbcRDD.create(
sc,
() -> DriverManager.getConnection("jdbc:derby:target/JavaJdbcRDDSuiteDb"),
"SELECT DATA FROM FOO WHERE ? <= ID AND ID <= ?",
1, 100, 1,
r -> r.getInt(1)
).cache();
Assert.assertEquals(100, rdd.count());
Assert.assertEquals(Integer.valueOf(10100), rdd.reduce((i1, i2) -> i1 + i2));
}
}
| 9,585 |
0 | Create_ds/spark/core/src/test/java/org/apache | Create_ds/spark/core/src/test/java/org/apache/spark/ExecutorPluginSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark;
import org.apache.spark.api.java.JavaSparkContext;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
public class ExecutorPluginSuite {
private static final String EXECUTOR_PLUGIN_CONF_NAME = "spark.executor.plugins";
private static final String testBadPluginName = TestBadShutdownPlugin.class.getName();
private static final String testPluginName = TestExecutorPlugin.class.getName();
private static final String testSecondPluginName = TestSecondPlugin.class.getName();
// Static value modified by testing plugins to ensure plugins loaded correctly.
public static int numSuccessfulPlugins = 0;
// Static value modified by testing plugins to verify plugins shut down properly.
public static int numSuccessfulTerminations = 0;
private JavaSparkContext sc;
@Before
public void setUp() {
sc = null;
numSuccessfulPlugins = 0;
numSuccessfulTerminations = 0;
}
@After
public void tearDown() {
if (sc != null) {
sc.stop();
sc = null;
}
}
private SparkConf initializeSparkConf(String pluginNames) {
return new SparkConf()
.setMaster("local")
.setAppName("test")
.set(EXECUTOR_PLUGIN_CONF_NAME, pluginNames);
}
@Test
public void testPluginClassDoesNotExist() {
SparkConf conf = initializeSparkConf("nonexistent.plugin");
try {
sc = new JavaSparkContext(conf);
fail("No exception thrown for nonexistent plugin");
} catch (Exception e) {
// We cannot catch ClassNotFoundException directly because Java doesn't think it'll be thrown
assertTrue(e.toString().startsWith("java.lang.ClassNotFoundException"));
}
}
@Test
public void testAddPlugin() throws InterruptedException {
// Load the sample TestExecutorPlugin, which will change the value of numSuccessfulPlugins
SparkConf conf = initializeSparkConf(testPluginName);
sc = new JavaSparkContext(conf);
assertEquals(1, numSuccessfulPlugins);
sc.stop();
sc = null;
assertEquals(1, numSuccessfulTerminations);
}
@Test
public void testAddMultiplePlugins() throws InterruptedException {
// Load two plugins and verify they both execute.
SparkConf conf = initializeSparkConf(testPluginName + "," + testSecondPluginName);
sc = new JavaSparkContext(conf);
assertEquals(2, numSuccessfulPlugins);
sc.stop();
sc = null;
assertEquals(2, numSuccessfulTerminations);
}
@Test
public void testPluginShutdownWithException() {
// Verify an exception in one plugin shutdown does not affect the others
String pluginNames = testPluginName + "," + testBadPluginName + "," + testPluginName;
SparkConf conf = initializeSparkConf(pluginNames);
sc = new JavaSparkContext(conf);
assertEquals(3, numSuccessfulPlugins);
sc.stop();
sc = null;
assertEquals(2, numSuccessfulTerminations);
}
public static class TestExecutorPlugin implements ExecutorPlugin {
public void init() {
ExecutorPluginSuite.numSuccessfulPlugins++;
}
public void shutdown() {
ExecutorPluginSuite.numSuccessfulTerminations++;
}
}
public static class TestSecondPlugin implements ExecutorPlugin {
public void init() {
ExecutorPluginSuite.numSuccessfulPlugins++;
}
public void shutdown() {
ExecutorPluginSuite.numSuccessfulTerminations++;
}
}
public static class TestBadShutdownPlugin implements ExecutorPlugin {
public void init() {
ExecutorPluginSuite.numSuccessfulPlugins++;
}
public void shutdown() {
throw new RuntimeException("This plugin will fail to cleanly shut down");
}
}
}
| 9,586 |
0 | Create_ds/spark/core/src/test/java/org/apache/spark | Create_ds/spark/core/src/test/java/org/apache/spark/launcher/SparkLauncherSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.launcher;
import java.time.Duration;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.junit.Assume.*;
import static org.mockito.Mockito.*;
import org.apache.spark.SparkContext;
import org.apache.spark.SparkContext$;
import org.apache.spark.internal.config.package$;
import org.apache.spark.util.Utils;
/**
* These tests require the Spark assembly to be built before they can be run.
*/
public class SparkLauncherSuite extends BaseSuite {
private static final NamedThreadFactory TF = new NamedThreadFactory("SparkLauncherSuite-%d");
private final SparkLauncher launcher = new SparkLauncher();
@Test
public void testSparkArgumentHandling() throws Exception {
SparkSubmitOptionParser opts = new SparkSubmitOptionParser();
launcher.addSparkArg(opts.HELP);
try {
launcher.addSparkArg(opts.PROXY_USER);
fail("Expected IllegalArgumentException.");
} catch (IllegalArgumentException e) {
// Expected.
}
launcher.addSparkArg(opts.PROXY_USER, "someUser");
try {
launcher.addSparkArg(opts.HELP, "someValue");
fail("Expected IllegalArgumentException.");
} catch (IllegalArgumentException e) {
// Expected.
}
launcher.addSparkArg("--future-argument");
launcher.addSparkArg("--future-argument", "someValue");
launcher.addSparkArg(opts.MASTER, "myMaster");
assertEquals("myMaster", launcher.builder.master);
launcher.addJar("foo");
launcher.addSparkArg(opts.JARS, "bar");
assertEquals(Arrays.asList("bar"), launcher.builder.jars);
launcher.addFile("foo");
launcher.addSparkArg(opts.FILES, "bar");
assertEquals(Arrays.asList("bar"), launcher.builder.files);
launcher.addPyFile("foo");
launcher.addSparkArg(opts.PY_FILES, "bar");
assertEquals(Arrays.asList("bar"), launcher.builder.pyFiles);
launcher.setConf("spark.foo", "foo");
launcher.addSparkArg(opts.CONF, "spark.foo=bar");
assertEquals("bar", launcher.builder.conf.get("spark.foo"));
launcher.setConf(SparkLauncher.PYSPARK_DRIVER_PYTHON, "python3.4");
launcher.setConf(SparkLauncher.PYSPARK_PYTHON, "python3.5");
assertEquals("python3.4", launcher.builder.conf.get(
package$.MODULE$.PYSPARK_DRIVER_PYTHON().key()));
assertEquals("python3.5", launcher.builder.conf.get(package$.MODULE$.PYSPARK_PYTHON().key()));
}
@Test
public void testChildProcLauncher() throws Exception {
// This test is failed on Windows due to the failure of initiating executors
// by the path length limitation. See SPARK-18718.
assumeTrue(!Utils.isWindows());
SparkSubmitOptionParser opts = new SparkSubmitOptionParser();
Map<String, String> env = new HashMap<>();
env.put("SPARK_PRINT_LAUNCH_COMMAND", "1");
launcher
.setMaster("local")
.setAppResource(SparkLauncher.NO_RESOURCE)
.addSparkArg(opts.CONF,
String.format("%s=-Dfoo=ShouldBeOverriddenBelow", SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS))
.setConf(SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS,
"-Dfoo=bar -Dtest.appender=console")
.setConf(SparkLauncher.DRIVER_EXTRA_CLASSPATH, System.getProperty("java.class.path"))
.addSparkArg(opts.CLASS, "ShouldBeOverriddenBelow")
.setMainClass(SparkLauncherTestApp.class.getName())
.redirectError()
.addAppArgs("proc");
final Process app = launcher.launch();
new OutputRedirector(app.getInputStream(), getClass().getName() + ".child", TF);
assertEquals(0, app.waitFor());
}
@Test
public void testInProcessLauncher() throws Exception {
// Because this test runs SparkLauncher in process and in client mode, it pollutes the system
// properties, and that can cause test failures down the test pipeline. So restore the original
// system properties after this test runs.
Map<Object, Object> properties = new HashMap<>(System.getProperties());
try {
inProcessLauncherTestImpl();
} finally {
Properties p = new Properties();
for (Map.Entry<Object, Object> e : properties.entrySet()) {
p.put(e.getKey(), e.getValue());
}
System.setProperties(p);
// Here DAGScheduler is stopped, while SparkContext.clearActiveContext may not be called yet.
// Wait for a reasonable amount of time to avoid creating two active SparkContext in JVM.
// See SPARK-23019 and SparkContext.stop() for details.
eventually(Duration.ofSeconds(5), Duration.ofMillis(10), () -> {
assertTrue("SparkContext is still alive.", SparkContext$.MODULE$.getActive().isEmpty());
});
}
}
private void inProcessLauncherTestImpl() throws Exception {
final List<SparkAppHandle.State> transitions = new ArrayList<>();
SparkAppHandle.Listener listener = mock(SparkAppHandle.Listener.class);
doAnswer(invocation -> {
SparkAppHandle h = (SparkAppHandle) invocation.getArguments()[0];
synchronized (transitions) {
transitions.add(h.getState());
}
return null;
}).when(listener).stateChanged(any(SparkAppHandle.class));
SparkAppHandle handle = null;
try {
synchronized (InProcessTestApp.LOCK) {
handle = new InProcessLauncher()
.setMaster("local")
.setAppResource(SparkLauncher.NO_RESOURCE)
.setMainClass(InProcessTestApp.class.getName())
.addAppArgs("hello")
.startApplication(listener);
// SPARK-23020: see doc for InProcessTestApp.LOCK for a description of the race. Here
// we wait until we know that the connection between the app and the launcher has been
// established before allowing the app to finish.
final SparkAppHandle _handle = handle;
eventually(Duration.ofSeconds(5), Duration.ofMillis(10), () -> {
assertNotEquals(SparkAppHandle.State.UNKNOWN, _handle.getState());
});
InProcessTestApp.LOCK.wait(5000);
}
waitFor(handle);
assertEquals(SparkAppHandle.State.FINISHED, handle.getState());
// Matches the behavior of LocalSchedulerBackend.
List<SparkAppHandle.State> expected = Arrays.asList(
SparkAppHandle.State.CONNECTED,
SparkAppHandle.State.RUNNING,
SparkAppHandle.State.FINISHED);
assertEquals(expected, transitions);
} finally {
if (handle != null) {
handle.kill();
}
}
}
@Test
public void testInProcessLauncherDoesNotKillJvm() throws Exception {
SparkSubmitOptionParser opts = new SparkSubmitOptionParser();
List<String[]> wrongArgs = Arrays.asList(
new String[] { "--unknown" },
new String[] { opts.DEPLOY_MODE, "invalid" });
for (String[] args : wrongArgs) {
InProcessLauncher launcher = new InProcessLauncher()
.setAppResource(SparkLauncher.NO_RESOURCE);
switch (args.length) {
case 2:
launcher.addSparkArg(args[0], args[1]);
break;
case 1:
launcher.addSparkArg(args[0]);
break;
default:
fail("FIXME: invalid test.");
}
SparkAppHandle handle = launcher.startApplication();
waitFor(handle);
assertEquals(SparkAppHandle.State.FAILED, handle.getState());
}
// Run --version, which is useless as a use case, but should succeed and not exit the JVM.
// The expected state is "LOST" since "--version" doesn't report state back to the handle.
SparkAppHandle handle = new InProcessLauncher().addSparkArg(opts.VERSION).startApplication();
waitFor(handle);
assertEquals(SparkAppHandle.State.LOST, handle.getState());
}
public static class SparkLauncherTestApp {
public static void main(String[] args) throws Exception {
assertEquals(1, args.length);
assertEquals("proc", args[0]);
assertEquals("bar", System.getProperty("foo"));
assertEquals("local", System.getProperty(SparkLauncher.SPARK_MASTER));
}
}
public static class InProcessTestApp {
/**
* SPARK-23020: there's a race caused by a child app finishing too quickly. This would cause
* the InProcessAppHandle to dispose of itself even before the child connection was properly
* established, so no state changes would be detected for the application and its final
* state would be LOST.
*
* It's not really possible to fix that race safely in the handle code itself without changing
* the way in-process apps talk to the launcher library, so we work around that in the test by
* synchronizing on this object.
*/
public static final Object LOCK = new Object();
public static void main(String[] args) throws Exception {
assertNotEquals(0, args.length);
assertEquals(args[0], "hello");
new SparkContext().stop();
synchronized (LOCK) {
LOCK.notifyAll();
}
}
}
}
| 9,587 |
0 | Create_ds/spark/core/src/test/java/org/apache/spark/shuffle | Create_ds/spark/core/src/test/java/org/apache/spark/shuffle/sort/ShuffleInMemoryRadixSorterSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle.sort;
public class ShuffleInMemoryRadixSorterSuite extends ShuffleInMemorySorterSuite {
@Override
protected boolean shouldUseRadixSort() { return true; }
}
| 9,588 |
0 | Create_ds/spark/core/src/test/java/org/apache/spark/shuffle | Create_ds/spark/core/src/test/java/org/apache/spark/shuffle/sort/PackedRecordPointerSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle.sort;
import java.io.IOException;
import org.junit.Test;
import org.apache.spark.SparkConf;
import org.apache.spark.memory.*;
import org.apache.spark.unsafe.memory.MemoryBlock;
import static org.apache.spark.shuffle.sort.PackedRecordPointer.MAXIMUM_PAGE_SIZE_BYTES;
import static org.apache.spark.shuffle.sort.PackedRecordPointer.MAXIMUM_PARTITION_ID;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
public class PackedRecordPointerSuite {
@Test
public void heap() throws IOException {
final SparkConf conf = new SparkConf().set("spark.memory.offHeap.enabled", "false");
final TaskMemoryManager memoryManager =
new TaskMemoryManager(new TestMemoryManager(conf), 0);
final MemoryConsumer c = new TestMemoryConsumer(memoryManager, MemoryMode.ON_HEAP);
final MemoryBlock page0 = memoryManager.allocatePage(128, c);
final MemoryBlock page1 = memoryManager.allocatePage(128, c);
final long addressInPage1 = memoryManager.encodePageNumberAndOffset(page1,
page1.getBaseOffset() + 42);
PackedRecordPointer packedPointer = new PackedRecordPointer();
packedPointer.set(PackedRecordPointer.packPointer(addressInPage1, 360));
assertEquals(360, packedPointer.getPartitionId());
final long recordPointer = packedPointer.getRecordPointer();
assertEquals(1, TaskMemoryManager.decodePageNumber(recordPointer));
assertEquals(page1.getBaseOffset() + 42, memoryManager.getOffsetInPage(recordPointer));
assertEquals(addressInPage1, recordPointer);
memoryManager.cleanUpAllAllocatedMemory();
}
@Test
public void offHeap() throws IOException {
final SparkConf conf = new SparkConf()
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", "10000");
final TaskMemoryManager memoryManager =
new TaskMemoryManager(new TestMemoryManager(conf), 0);
final MemoryConsumer c = new TestMemoryConsumer(memoryManager, MemoryMode.OFF_HEAP);
final MemoryBlock page0 = memoryManager.allocatePage(128, c);
final MemoryBlock page1 = memoryManager.allocatePage(128, c);
final long addressInPage1 = memoryManager.encodePageNumberAndOffset(page1,
page1.getBaseOffset() + 42);
PackedRecordPointer packedPointer = new PackedRecordPointer();
packedPointer.set(PackedRecordPointer.packPointer(addressInPage1, 360));
assertEquals(360, packedPointer.getPartitionId());
final long recordPointer = packedPointer.getRecordPointer();
assertEquals(1, TaskMemoryManager.decodePageNumber(recordPointer));
assertEquals(page1.getBaseOffset() + 42, memoryManager.getOffsetInPage(recordPointer));
assertEquals(addressInPage1, recordPointer);
memoryManager.cleanUpAllAllocatedMemory();
}
@Test
public void maximumPartitionIdCanBeEncoded() {
PackedRecordPointer packedPointer = new PackedRecordPointer();
packedPointer.set(PackedRecordPointer.packPointer(0, MAXIMUM_PARTITION_ID));
assertEquals(MAXIMUM_PARTITION_ID, packedPointer.getPartitionId());
}
@Test
public void partitionIdsGreaterThanMaximumPartitionIdWillOverflowOrTriggerError() {
PackedRecordPointer packedPointer = new PackedRecordPointer();
try {
// Pointers greater than the maximum partition ID will overflow or trigger an assertion error
packedPointer.set(PackedRecordPointer.packPointer(0, MAXIMUM_PARTITION_ID + 1));
assertFalse(MAXIMUM_PARTITION_ID + 1 == packedPointer.getPartitionId());
} catch (AssertionError e ) {
// pass
}
}
@Test
public void maximumOffsetInPageCanBeEncoded() {
PackedRecordPointer packedPointer = new PackedRecordPointer();
long address = TaskMemoryManager.encodePageNumberAndOffset(0, MAXIMUM_PAGE_SIZE_BYTES - 1);
packedPointer.set(PackedRecordPointer.packPointer(address, 0));
assertEquals(address, packedPointer.getRecordPointer());
}
@Test
public void offsetsPastMaxOffsetInPageWillOverflow() {
PackedRecordPointer packedPointer = new PackedRecordPointer();
long address = TaskMemoryManager.encodePageNumberAndOffset(0, MAXIMUM_PAGE_SIZE_BYTES);
packedPointer.set(PackedRecordPointer.packPointer(address, 0));
assertEquals(0, packedPointer.getRecordPointer());
}
}
| 9,589 |
0 | Create_ds/spark/core/src/test/java/org/apache/spark/shuffle | Create_ds/spark/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle.sort;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.*;
import scala.Option;
import scala.Product2;
import scala.Tuple2;
import scala.Tuple2$;
import scala.collection.Iterator;
import com.google.common.collect.HashMultiset;
import com.google.common.collect.Iterators;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.apache.spark.HashPartitioner;
import org.apache.spark.ShuffleDependency;
import org.apache.spark.SparkConf;
import org.apache.spark.TaskContext;
import org.apache.spark.executor.ShuffleWriteMetrics;
import org.apache.spark.executor.TaskMetrics;
import org.apache.spark.io.CompressionCodec$;
import org.apache.spark.io.LZ4CompressionCodec;
import org.apache.spark.io.LZFCompressionCodec;
import org.apache.spark.io.SnappyCompressionCodec;
import org.apache.spark.memory.TaskMemoryManager;
import org.apache.spark.memory.TestMemoryManager;
import org.apache.spark.network.util.LimitedInputStream;
import org.apache.spark.scheduler.MapStatus;
import org.apache.spark.security.CryptoStreamUtils;
import org.apache.spark.serializer.*;
import org.apache.spark.shuffle.IndexShuffleBlockResolver;
import org.apache.spark.storage.*;
import org.apache.spark.util.Utils;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.lessThan;
import static org.junit.Assert.*;
import static org.mockito.Answers.RETURNS_SMART_NULLS;
import static org.mockito.Mockito.*;
public class UnsafeShuffleWriterSuite {
static final int NUM_PARTITITONS = 4;
TestMemoryManager memoryManager;
TaskMemoryManager taskMemoryManager;
final HashPartitioner hashPartitioner = new HashPartitioner(NUM_PARTITITONS);
File mergedOutputFile;
File tempDir;
long[] partitionSizesInMergedFile;
final LinkedList<File> spillFilesCreated = new LinkedList<>();
SparkConf conf;
final Serializer serializer = new KryoSerializer(new SparkConf());
TaskMetrics taskMetrics;
@Mock(answer = RETURNS_SMART_NULLS) BlockManager blockManager;
@Mock(answer = RETURNS_SMART_NULLS) IndexShuffleBlockResolver shuffleBlockResolver;
@Mock(answer = RETURNS_SMART_NULLS) DiskBlockManager diskBlockManager;
@Mock(answer = RETURNS_SMART_NULLS) TaskContext taskContext;
@Mock(answer = RETURNS_SMART_NULLS) ShuffleDependency<Object, Object, Object> shuffleDep;
@After
public void tearDown() {
Utils.deleteRecursively(tempDir);
final long leakedMemory = taskMemoryManager.cleanUpAllAllocatedMemory();
if (leakedMemory != 0) {
fail("Test leaked " + leakedMemory + " bytes of managed memory");
}
}
@Before
@SuppressWarnings("unchecked")
public void setUp() throws IOException {
MockitoAnnotations.initMocks(this);
tempDir = Utils.createTempDir(null, "test");
mergedOutputFile = File.createTempFile("mergedoutput", "", tempDir);
partitionSizesInMergedFile = null;
spillFilesCreated.clear();
conf = new SparkConf()
.set("spark.buffer.pageSize", "1m")
.set("spark.memory.offHeap.enabled", "false");
taskMetrics = new TaskMetrics();
memoryManager = new TestMemoryManager(conf);
taskMemoryManager = new TaskMemoryManager(memoryManager, 0);
// Some tests will override this manager because they change the configuration. This is a
// default for tests that don't need a specific one.
SerializerManager manager = new SerializerManager(serializer, conf);
when(blockManager.serializerManager()).thenReturn(manager);
when(blockManager.diskBlockManager()).thenReturn(diskBlockManager);
when(blockManager.getDiskWriter(
any(BlockId.class),
any(File.class),
any(SerializerInstance.class),
anyInt(),
any(ShuffleWriteMetrics.class))).thenAnswer(invocationOnMock -> {
Object[] args = invocationOnMock.getArguments();
return new DiskBlockObjectWriter(
(File) args[1],
blockManager.serializerManager(),
(SerializerInstance) args[2],
(Integer) args[3],
false,
(ShuffleWriteMetrics) args[4],
(BlockId) args[0]
);
});
when(shuffleBlockResolver.getDataFile(anyInt(), anyInt())).thenReturn(mergedOutputFile);
doAnswer(invocationOnMock -> {
partitionSizesInMergedFile = (long[]) invocationOnMock.getArguments()[2];
File tmp = (File) invocationOnMock.getArguments()[3];
mergedOutputFile.delete();
tmp.renameTo(mergedOutputFile);
return null;
}).when(shuffleBlockResolver)
.writeIndexFileAndCommit(anyInt(), anyInt(), any(long[].class), any(File.class));
when(diskBlockManager.createTempShuffleBlock()).thenAnswer(invocationOnMock -> {
TempShuffleBlockId blockId = new TempShuffleBlockId(UUID.randomUUID());
File file = File.createTempFile("spillFile", ".spill", tempDir);
spillFilesCreated.add(file);
return Tuple2$.MODULE$.apply(blockId, file);
});
when(taskContext.taskMetrics()).thenReturn(taskMetrics);
when(shuffleDep.serializer()).thenReturn(serializer);
when(shuffleDep.partitioner()).thenReturn(hashPartitioner);
}
private UnsafeShuffleWriter<Object, Object> createWriter(
boolean transferToEnabled) throws IOException {
conf.set("spark.file.transferTo", String.valueOf(transferToEnabled));
return new UnsafeShuffleWriter<>(
blockManager,
shuffleBlockResolver,
taskMemoryManager,
new SerializedShuffleHandle<>(0, 1, shuffleDep),
0, // map id
taskContext,
conf
);
}
private void assertSpillFilesWereCleanedUp() {
for (File spillFile : spillFilesCreated) {
assertFalse("Spill file " + spillFile.getPath() + " was not cleaned up",
spillFile.exists());
}
}
private List<Tuple2<Object, Object>> readRecordsFromFile() throws IOException {
final ArrayList<Tuple2<Object, Object>> recordsList = new ArrayList<>();
long startOffset = 0;
for (int i = 0; i < NUM_PARTITITONS; i++) {
final long partitionSize = partitionSizesInMergedFile[i];
if (partitionSize > 0) {
FileInputStream fin = new FileInputStream(mergedOutputFile);
fin.getChannel().position(startOffset);
InputStream in = new LimitedInputStream(fin, partitionSize);
in = blockManager.serializerManager().wrapForEncryption(in);
if (conf.getBoolean("spark.shuffle.compress", true)) {
in = CompressionCodec$.MODULE$.createCodec(conf).compressedInputStream(in);
}
DeserializationStream recordsStream = serializer.newInstance().deserializeStream(in);
Iterator<Tuple2<Object, Object>> records = recordsStream.asKeyValueIterator();
while (records.hasNext()) {
Tuple2<Object, Object> record = records.next();
assertEquals(i, hashPartitioner.getPartition(record._1()));
recordsList.add(record);
}
recordsStream.close();
startOffset += partitionSize;
}
}
return recordsList;
}
@Test(expected=IllegalStateException.class)
public void mustCallWriteBeforeSuccessfulStop() throws IOException {
createWriter(false).stop(true);
}
@Test
public void doNotNeedToCallWriteBeforeUnsuccessfulStop() throws IOException {
createWriter(false).stop(false);
}
static class PandaException extends RuntimeException {
}
@Test(expected=PandaException.class)
public void writeFailurePropagates() throws Exception {
class BadRecords extends scala.collection.AbstractIterator<Product2<Object, Object>> {
@Override public boolean hasNext() {
throw new PandaException();
}
@Override public Product2<Object, Object> next() {
return null;
}
}
final UnsafeShuffleWriter<Object, Object> writer = createWriter(true);
writer.write(new BadRecords());
}
@Test
public void writeEmptyIterator() throws Exception {
final UnsafeShuffleWriter<Object, Object> writer = createWriter(true);
writer.write(Iterators.emptyIterator());
final Option<MapStatus> mapStatus = writer.stop(true);
assertTrue(mapStatus.isDefined());
assertTrue(mergedOutputFile.exists());
assertArrayEquals(new long[NUM_PARTITITONS], partitionSizesInMergedFile);
assertEquals(0, taskMetrics.shuffleWriteMetrics().recordsWritten());
assertEquals(0, taskMetrics.shuffleWriteMetrics().bytesWritten());
assertEquals(0, taskMetrics.diskBytesSpilled());
assertEquals(0, taskMetrics.memoryBytesSpilled());
}
@Test
public void writeWithoutSpilling() throws Exception {
// In this example, each partition should have exactly one record:
final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
for (int i = 0; i < NUM_PARTITITONS; i++) {
dataToWrite.add(new Tuple2<>(i, i));
}
final UnsafeShuffleWriter<Object, Object> writer = createWriter(true);
writer.write(dataToWrite.iterator());
final Option<MapStatus> mapStatus = writer.stop(true);
assertTrue(mapStatus.isDefined());
assertTrue(mergedOutputFile.exists());
long sumOfPartitionSizes = 0;
for (long size: partitionSizesInMergedFile) {
// All partitions should be the same size:
assertEquals(partitionSizesInMergedFile[0], size);
sumOfPartitionSizes += size;
}
assertEquals(mergedOutputFile.length(), sumOfPartitionSizes);
assertEquals(
HashMultiset.create(dataToWrite),
HashMultiset.create(readRecordsFromFile()));
assertSpillFilesWereCleanedUp();
ShuffleWriteMetrics shuffleWriteMetrics = taskMetrics.shuffleWriteMetrics();
assertEquals(dataToWrite.size(), shuffleWriteMetrics.recordsWritten());
assertEquals(0, taskMetrics.diskBytesSpilled());
assertEquals(0, taskMetrics.memoryBytesSpilled());
assertEquals(mergedOutputFile.length(), shuffleWriteMetrics.bytesWritten());
}
private void testMergingSpills(
final boolean transferToEnabled,
String compressionCodecName,
boolean encrypt) throws Exception {
if (compressionCodecName != null) {
conf.set("spark.shuffle.compress", "true");
conf.set("spark.io.compression.codec", compressionCodecName);
} else {
conf.set("spark.shuffle.compress", "false");
}
conf.set(org.apache.spark.internal.config.package$.MODULE$.IO_ENCRYPTION_ENABLED(), encrypt);
SerializerManager manager;
if (encrypt) {
manager = new SerializerManager(serializer, conf,
Option.apply(CryptoStreamUtils.createKey(conf)));
} else {
manager = new SerializerManager(serializer, conf);
}
when(blockManager.serializerManager()).thenReturn(manager);
testMergingSpills(transferToEnabled, encrypt);
}
private void testMergingSpills(
boolean transferToEnabled,
boolean encrypted) throws IOException {
final UnsafeShuffleWriter<Object, Object> writer = createWriter(transferToEnabled);
final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
for (int i : new int[] { 1, 2, 3, 4, 4, 2 }) {
dataToWrite.add(new Tuple2<>(i, i));
}
writer.insertRecordIntoSorter(dataToWrite.get(0));
writer.insertRecordIntoSorter(dataToWrite.get(1));
writer.insertRecordIntoSorter(dataToWrite.get(2));
writer.insertRecordIntoSorter(dataToWrite.get(3));
writer.forceSorterToSpill();
writer.insertRecordIntoSorter(dataToWrite.get(4));
writer.insertRecordIntoSorter(dataToWrite.get(5));
writer.closeAndWriteOutput();
final Option<MapStatus> mapStatus = writer.stop(true);
assertTrue(mapStatus.isDefined());
assertTrue(mergedOutputFile.exists());
assertEquals(2, spillFilesCreated.size());
long sumOfPartitionSizes = 0;
for (long size: partitionSizesInMergedFile) {
sumOfPartitionSizes += size;
}
assertEquals(sumOfPartitionSizes, mergedOutputFile.length());
assertEquals(HashMultiset.create(dataToWrite), HashMultiset.create(readRecordsFromFile()));
assertSpillFilesWereCleanedUp();
ShuffleWriteMetrics shuffleWriteMetrics = taskMetrics.shuffleWriteMetrics();
assertEquals(dataToWrite.size(), shuffleWriteMetrics.recordsWritten());
assertThat(taskMetrics.diskBytesSpilled(), greaterThan(0L));
assertThat(taskMetrics.diskBytesSpilled(), lessThan(mergedOutputFile.length()));
assertThat(taskMetrics.memoryBytesSpilled(), greaterThan(0L));
assertEquals(mergedOutputFile.length(), shuffleWriteMetrics.bytesWritten());
}
@Test
public void mergeSpillsWithTransferToAndLZF() throws Exception {
testMergingSpills(true, LZFCompressionCodec.class.getName(), false);
}
@Test
public void mergeSpillsWithFileStreamAndLZF() throws Exception {
testMergingSpills(false, LZFCompressionCodec.class.getName(), false);
}
@Test
public void mergeSpillsWithTransferToAndLZ4() throws Exception {
testMergingSpills(true, LZ4CompressionCodec.class.getName(), false);
}
@Test
public void mergeSpillsWithFileStreamAndLZ4() throws Exception {
testMergingSpills(false, LZ4CompressionCodec.class.getName(), false);
}
@Test
public void mergeSpillsWithTransferToAndSnappy() throws Exception {
testMergingSpills(true, SnappyCompressionCodec.class.getName(), false);
}
@Test
public void mergeSpillsWithFileStreamAndSnappy() throws Exception {
testMergingSpills(false, SnappyCompressionCodec.class.getName(), false);
}
@Test
public void mergeSpillsWithTransferToAndNoCompression() throws Exception {
testMergingSpills(true, null, false);
}
@Test
public void mergeSpillsWithFileStreamAndNoCompression() throws Exception {
testMergingSpills(false, null, false);
}
@Test
public void mergeSpillsWithCompressionAndEncryption() throws Exception {
// This should actually be translated to a "file stream merge" internally, just have the
// test to make sure that it's the case.
testMergingSpills(true, LZ4CompressionCodec.class.getName(), true);
}
@Test
public void mergeSpillsWithFileStreamAndCompressionAndEncryption() throws Exception {
testMergingSpills(false, LZ4CompressionCodec.class.getName(), true);
}
@Test
public void mergeSpillsWithCompressionAndEncryptionSlowPath() throws Exception {
conf.set("spark.shuffle.unsafe.fastMergeEnabled", "false");
testMergingSpills(false, LZ4CompressionCodec.class.getName(), true);
}
@Test
public void mergeSpillsWithEncryptionAndNoCompression() throws Exception {
// This should actually be translated to a "file stream merge" internally, just have the
// test to make sure that it's the case.
testMergingSpills(true, null, true);
}
@Test
public void mergeSpillsWithFileStreamAndEncryptionAndNoCompression() throws Exception {
testMergingSpills(false, null, true);
}
@Test
public void writeEnoughDataToTriggerSpill() throws Exception {
memoryManager.limit(PackedRecordPointer.MAXIMUM_PAGE_SIZE_BYTES);
final UnsafeShuffleWriter<Object, Object> writer = createWriter(false);
final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
final byte[] bigByteArray = new byte[PackedRecordPointer.MAXIMUM_PAGE_SIZE_BYTES / 10];
for (int i = 0; i < 10 + 1; i++) {
dataToWrite.add(new Tuple2<>(i, bigByteArray));
}
writer.write(dataToWrite.iterator());
assertEquals(2, spillFilesCreated.size());
writer.stop(true);
readRecordsFromFile();
assertSpillFilesWereCleanedUp();
ShuffleWriteMetrics shuffleWriteMetrics = taskMetrics.shuffleWriteMetrics();
assertEquals(dataToWrite.size(), shuffleWriteMetrics.recordsWritten());
assertThat(taskMetrics.diskBytesSpilled(), greaterThan(0L));
assertThat(taskMetrics.diskBytesSpilled(), lessThan(mergedOutputFile.length()));
assertThat(taskMetrics.memoryBytesSpilled(), greaterThan(0L));
assertEquals(mergedOutputFile.length(), shuffleWriteMetrics.bytesWritten());
}
@Test
public void writeEnoughRecordsToTriggerSortBufferExpansionAndSpillRadixOff() throws Exception {
conf.set("spark.shuffle.sort.useRadixSort", "false");
writeEnoughRecordsToTriggerSortBufferExpansionAndSpill();
assertEquals(2, spillFilesCreated.size());
}
@Test
public void writeEnoughRecordsToTriggerSortBufferExpansionAndSpillRadixOn() throws Exception {
conf.set("spark.shuffle.sort.useRadixSort", "true");
writeEnoughRecordsToTriggerSortBufferExpansionAndSpill();
assertEquals(3, spillFilesCreated.size());
}
private void writeEnoughRecordsToTriggerSortBufferExpansionAndSpill() throws Exception {
memoryManager.limit(UnsafeShuffleWriter.DEFAULT_INITIAL_SORT_BUFFER_SIZE * 16);
final UnsafeShuffleWriter<Object, Object> writer = createWriter(false);
final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
for (int i = 0; i < UnsafeShuffleWriter.DEFAULT_INITIAL_SORT_BUFFER_SIZE + 1; i++) {
dataToWrite.add(new Tuple2<>(i, i));
}
writer.write(dataToWrite.iterator());
writer.stop(true);
readRecordsFromFile();
assertSpillFilesWereCleanedUp();
ShuffleWriteMetrics shuffleWriteMetrics = taskMetrics.shuffleWriteMetrics();
assertEquals(dataToWrite.size(), shuffleWriteMetrics.recordsWritten());
assertThat(taskMetrics.diskBytesSpilled(), greaterThan(0L));
assertThat(taskMetrics.diskBytesSpilled(), lessThan(mergedOutputFile.length()));
assertThat(taskMetrics.memoryBytesSpilled(), greaterThan(0L));
assertEquals(mergedOutputFile.length(), shuffleWriteMetrics.bytesWritten());
}
@Test
public void writeRecordsThatAreBiggerThanDiskWriteBufferSize() throws Exception {
final UnsafeShuffleWriter<Object, Object> writer = createWriter(false);
final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
final byte[] bytes = new byte[(int) (ShuffleExternalSorter.DISK_WRITE_BUFFER_SIZE * 2.5)];
new Random(42).nextBytes(bytes);
dataToWrite.add(new Tuple2<>(1, ByteBuffer.wrap(bytes)));
writer.write(dataToWrite.iterator());
writer.stop(true);
assertEquals(
HashMultiset.create(dataToWrite),
HashMultiset.create(readRecordsFromFile()));
assertSpillFilesWereCleanedUp();
}
@Test
public void writeRecordsThatAreBiggerThanMaxRecordSize() throws Exception {
final UnsafeShuffleWriter<Object, Object> writer = createWriter(false);
final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
dataToWrite.add(new Tuple2<>(1, ByteBuffer.wrap(new byte[1])));
// We should be able to write a record that's right _at_ the max record size
final byte[] atMaxRecordSize = new byte[(int) taskMemoryManager.pageSizeBytes() - 4];
new Random(42).nextBytes(atMaxRecordSize);
dataToWrite.add(new Tuple2<>(2, ByteBuffer.wrap(atMaxRecordSize)));
// Inserting a record that's larger than the max record size
final byte[] exceedsMaxRecordSize = new byte[(int) taskMemoryManager.pageSizeBytes()];
new Random(42).nextBytes(exceedsMaxRecordSize);
dataToWrite.add(new Tuple2<>(3, ByteBuffer.wrap(exceedsMaxRecordSize)));
writer.write(dataToWrite.iterator());
writer.stop(true);
assertEquals(
HashMultiset.create(dataToWrite),
HashMultiset.create(readRecordsFromFile()));
assertSpillFilesWereCleanedUp();
}
@Test
public void spillFilesAreDeletedWhenStoppingAfterError() throws IOException {
final UnsafeShuffleWriter<Object, Object> writer = createWriter(false);
writer.insertRecordIntoSorter(new Tuple2<>(1, 1));
writer.insertRecordIntoSorter(new Tuple2<>(2, 2));
writer.forceSorterToSpill();
writer.insertRecordIntoSorter(new Tuple2<>(2, 2));
writer.stop(false);
assertSpillFilesWereCleanedUp();
}
@Test
public void testPeakMemoryUsed() throws Exception {
final long recordLengthBytes = 8;
final long pageSizeBytes = 256;
final long numRecordsPerPage = pageSizeBytes / recordLengthBytes;
taskMemoryManager = spy(taskMemoryManager);
when(taskMemoryManager.pageSizeBytes()).thenReturn(pageSizeBytes);
final UnsafeShuffleWriter<Object, Object> writer =
new UnsafeShuffleWriter<>(
blockManager,
shuffleBlockResolver,
taskMemoryManager,
new SerializedShuffleHandle<>(0, 1, shuffleDep),
0, // map id
taskContext,
conf);
// Peak memory should be monotonically increasing. More specifically, every time
// we allocate a new page it should increase by exactly the size of the page.
long previousPeakMemory = writer.getPeakMemoryUsedBytes();
long newPeakMemory;
try {
for (int i = 0; i < numRecordsPerPage * 10; i++) {
writer.insertRecordIntoSorter(new Tuple2<Object, Object>(1, 1));
newPeakMemory = writer.getPeakMemoryUsedBytes();
if (i % numRecordsPerPage == 0) {
// The first page is allocated in constructor, another page will be allocated after
// every numRecordsPerPage records (peak memory should change).
assertEquals(previousPeakMemory + pageSizeBytes, newPeakMemory);
} else {
assertEquals(previousPeakMemory, newPeakMemory);
}
previousPeakMemory = newPeakMemory;
}
// Spilling should not change peak memory
writer.forceSorterToSpill();
newPeakMemory = writer.getPeakMemoryUsedBytes();
assertEquals(previousPeakMemory, newPeakMemory);
for (int i = 0; i < numRecordsPerPage; i++) {
writer.insertRecordIntoSorter(new Tuple2<Object, Object>(1, 1));
}
newPeakMemory = writer.getPeakMemoryUsedBytes();
assertEquals(previousPeakMemory, newPeakMemory);
// Closing the writer should not change peak memory
writer.closeAndWriteOutput();
newPeakMemory = writer.getPeakMemoryUsedBytes();
assertEquals(previousPeakMemory, newPeakMemory);
} finally {
writer.stop(false);
}
}
}
| 9,590 |
0 | Create_ds/spark/core/src/test/java/org/apache/spark/shuffle | Create_ds/spark/core/src/test/java/org/apache/spark/shuffle/sort/ShuffleInMemorySorterSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle.sort;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.Random;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.HashPartitioner;
import org.apache.spark.SparkConf;
import org.apache.spark.memory.MemoryConsumer;
import org.apache.spark.memory.TaskMemoryManager;
import org.apache.spark.memory.TestMemoryConsumer;
import org.apache.spark.memory.TestMemoryManager;
import org.apache.spark.unsafe.Platform;
import org.apache.spark.unsafe.memory.MemoryBlock;
public class ShuffleInMemorySorterSuite {
protected boolean shouldUseRadixSort() { return false; }
final TestMemoryManager memoryManager =
new TestMemoryManager(new SparkConf().set("spark.memory.offHeap.enabled", "false"));
final TaskMemoryManager taskMemoryManager = new TaskMemoryManager(memoryManager, 0);
final TestMemoryConsumer consumer = new TestMemoryConsumer(taskMemoryManager);
private static String getStringFromDataPage(Object baseObject, long baseOffset, int strLength) {
final byte[] strBytes = new byte[strLength];
Platform.copyMemory(baseObject, baseOffset, strBytes, Platform.BYTE_ARRAY_OFFSET, strLength);
return new String(strBytes, StandardCharsets.UTF_8);
}
@Test
public void testSortingEmptyInput() {
final ShuffleInMemorySorter sorter = new ShuffleInMemorySorter(
consumer, 100, shouldUseRadixSort());
final ShuffleInMemorySorter.ShuffleSorterIterator iter = sorter.getSortedIterator();
Assert.assertFalse(iter.hasNext());
}
@Test
public void testBasicSorting() throws Exception {
final String[] dataToSort = new String[] {
"Boba",
"Pearls",
"Tapioca",
"Taho",
"Condensed Milk",
"Jasmine",
"Milk Tea",
"Lychee",
"Mango"
};
final SparkConf conf = new SparkConf().set("spark.memory.offHeap.enabled", "false");
final TaskMemoryManager memoryManager =
new TaskMemoryManager(new TestMemoryManager(conf), 0);
final MemoryConsumer c = new TestMemoryConsumer(memoryManager);
final MemoryBlock dataPage = memoryManager.allocatePage(2048, c);
final Object baseObject = dataPage.getBaseObject();
final ShuffleInMemorySorter sorter = new ShuffleInMemorySorter(
consumer, 4, shouldUseRadixSort());
final HashPartitioner hashPartitioner = new HashPartitioner(4);
// Write the records into the data page and store pointers into the sorter
long position = dataPage.getBaseOffset();
for (String str : dataToSort) {
if (!sorter.hasSpaceForAnotherRecord()) {
sorter.expandPointerArray(
consumer.allocateArray(sorter.getMemoryUsage() / 8 * 2));
}
final long recordAddress = memoryManager.encodePageNumberAndOffset(dataPage, position);
final byte[] strBytes = str.getBytes(StandardCharsets.UTF_8);
Platform.putInt(baseObject, position, strBytes.length);
position += 4;
Platform.copyMemory(
strBytes, Platform.BYTE_ARRAY_OFFSET, baseObject, position, strBytes.length);
position += strBytes.length;
sorter.insertRecord(recordAddress, hashPartitioner.getPartition(str));
}
// Sort the records
final ShuffleInMemorySorter.ShuffleSorterIterator iter = sorter.getSortedIterator();
int prevPartitionId = -1;
Arrays.sort(dataToSort);
for (int i = 0; i < dataToSort.length; i++) {
Assert.assertTrue(iter.hasNext());
iter.loadNext();
final int partitionId = iter.packedRecordPointer.getPartitionId();
Assert.assertTrue(partitionId >= 0 && partitionId <= 3);
Assert.assertTrue("Partition id " + partitionId + " should be >= prev id " + prevPartitionId,
partitionId >= prevPartitionId);
final long recordAddress = iter.packedRecordPointer.getRecordPointer();
final int recordLength = Platform.getInt(
memoryManager.getPage(recordAddress), memoryManager.getOffsetInPage(recordAddress));
final String str = getStringFromDataPage(
memoryManager.getPage(recordAddress),
memoryManager.getOffsetInPage(recordAddress) + 4, // skip over record length
recordLength);
Assert.assertTrue(Arrays.binarySearch(dataToSort, str) != -1);
}
Assert.assertFalse(iter.hasNext());
}
@Test
public void testSortingManyNumbers() throws Exception {
ShuffleInMemorySorter sorter = new ShuffleInMemorySorter(consumer, 4, shouldUseRadixSort());
int[] numbersToSort = new int[128000];
Random random = new Random(16);
for (int i = 0; i < numbersToSort.length; i++) {
if (!sorter.hasSpaceForAnotherRecord()) {
sorter.expandPointerArray(consumer.allocateArray(sorter.getMemoryUsage() / 8 * 2));
}
numbersToSort[i] = random.nextInt(PackedRecordPointer.MAXIMUM_PARTITION_ID + 1);
sorter.insertRecord(0, numbersToSort[i]);
}
Arrays.sort(numbersToSort);
int[] sorterResult = new int[numbersToSort.length];
ShuffleInMemorySorter.ShuffleSorterIterator iter = sorter.getSortedIterator();
int j = 0;
while (iter.hasNext()) {
iter.loadNext();
sorterResult[j] = iter.packedRecordPointer.getPartitionId();
j += 1;
}
Assert.assertArrayEquals(numbersToSort, sorterResult);
}
}
| 9,591 |
0 | Create_ds/spark/core/src/test/java/org/apache/spark | Create_ds/spark/core/src/test/java/org/apache/spark/serializer/TestJavaSerializerImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.serializer;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import scala.reflect.ClassTag;
/**
* A simple Serializer implementation to make sure the API is Java-friendly.
*/
class TestJavaSerializerImpl extends Serializer {
@Override
public SerializerInstance newInstance() {
return null;
}
static class SerializerInstanceImpl extends SerializerInstance {
@Override
public <T> ByteBuffer serialize(T t, ClassTag<T> evidence$1) {
return null;
}
@Override
public <T> T deserialize(ByteBuffer bytes, ClassLoader loader, ClassTag<T> evidence$1) {
return null;
}
@Override
public <T> T deserialize(ByteBuffer bytes, ClassTag<T> evidence$1) {
return null;
}
@Override
public SerializationStream serializeStream(OutputStream s) {
return null;
}
@Override
public DeserializationStream deserializeStream(InputStream s) {
return null;
}
}
static class SerializationStreamImpl extends SerializationStream {
@Override
public <T> SerializationStream writeObject(T t, ClassTag<T> evidence$1) {
return null;
}
@Override
public void flush() {
}
@Override
public void close() {
}
}
static class DeserializationStreamImpl extends DeserializationStream {
@Override
public <T> T readObject(ClassTag<T> evidence$1) {
return null;
}
@Override
public void close() {
}
}
}
| 9,592 |
0 | Create_ds/spark/core/src/test/java/org/apache/spark | Create_ds/spark/core/src/test/java/org/apache/spark/memory/TestMemoryConsumer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.memory;
import com.google.common.annotations.VisibleForTesting;
import org.apache.spark.unsafe.memory.MemoryBlock;
import java.io.IOException;
public class TestMemoryConsumer extends MemoryConsumer {
public TestMemoryConsumer(TaskMemoryManager memoryManager, MemoryMode mode) {
super(memoryManager, 1024L, mode);
}
public TestMemoryConsumer(TaskMemoryManager memoryManager) {
this(memoryManager, MemoryMode.ON_HEAP);
}
@Override
public long spill(long size, MemoryConsumer trigger) throws IOException {
long used = getUsed();
free(used);
return used;
}
public void use(long size) {
long got = taskMemoryManager.acquireExecutionMemory(size, this);
used += got;
}
public void free(long size) {
used -= size;
taskMemoryManager.releaseExecutionMemory(size, this);
}
@VisibleForTesting
public void freePage(MemoryBlock page) {
used -= page.size();
taskMemoryManager.freePage(page, this);
}
}
| 9,593 |
0 | Create_ds/spark/core/src/test/java/org/apache/spark | Create_ds/spark/core/src/test/java/org/apache/spark/memory/TaskMemoryManagerSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.memory;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.SparkConf;
import org.apache.spark.unsafe.memory.MemoryAllocator;
import org.apache.spark.unsafe.memory.MemoryBlock;
public class TaskMemoryManagerSuite {
@Test
public void leakedPageMemoryIsDetected() {
final TaskMemoryManager manager = new TaskMemoryManager(
new StaticMemoryManager(
new SparkConf().set("spark.memory.offHeap.enabled", "false"),
Long.MAX_VALUE,
Long.MAX_VALUE,
1),
0);
final MemoryConsumer c = new TestMemoryConsumer(manager);
manager.allocatePage(4096, c); // leak memory
Assert.assertEquals(4096, manager.getMemoryConsumptionForThisTask());
Assert.assertEquals(4096, manager.cleanUpAllAllocatedMemory());
}
@Test
public void encodePageNumberAndOffsetOffHeap() {
final SparkConf conf = new SparkConf()
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", "1000");
final TaskMemoryManager manager = new TaskMemoryManager(new TestMemoryManager(conf), 0);
final MemoryConsumer c = new TestMemoryConsumer(manager, MemoryMode.OFF_HEAP);
final MemoryBlock dataPage = manager.allocatePage(256, c);
// In off-heap mode, an offset is an absolute address that may require more than 51 bits to
// encode. This test exercises that corner-case:
final long offset = ((1L << TaskMemoryManager.OFFSET_BITS) + 10);
final long encodedAddress = manager.encodePageNumberAndOffset(dataPage, offset);
Assert.assertEquals(null, manager.getPage(encodedAddress));
Assert.assertEquals(offset, manager.getOffsetInPage(encodedAddress));
manager.freePage(dataPage, c);
}
@Test
public void encodePageNumberAndOffsetOnHeap() {
final TaskMemoryManager manager = new TaskMemoryManager(
new TestMemoryManager(new SparkConf().set("spark.memory.offHeap.enabled", "false")), 0);
final MemoryConsumer c = new TestMemoryConsumer(manager, MemoryMode.ON_HEAP);
final MemoryBlock dataPage = manager.allocatePage(256, c);
final long encodedAddress = manager.encodePageNumberAndOffset(dataPage, 64);
Assert.assertEquals(dataPage.getBaseObject(), manager.getPage(encodedAddress));
Assert.assertEquals(64, manager.getOffsetInPage(encodedAddress));
}
@Test
public void freeingPageSetsPageNumberToSpecialConstant() {
final TaskMemoryManager manager = new TaskMemoryManager(
new TestMemoryManager(new SparkConf().set("spark.memory.offHeap.enabled", "false")), 0);
final MemoryConsumer c = new TestMemoryConsumer(manager, MemoryMode.ON_HEAP);
final MemoryBlock dataPage = manager.allocatePage(256, c);
c.freePage(dataPage);
Assert.assertEquals(MemoryBlock.FREED_IN_ALLOCATOR_PAGE_NUMBER, dataPage.pageNumber);
}
@Test(expected = AssertionError.class)
public void freeingPageDirectlyInAllocatorTriggersAssertionError() {
final TaskMemoryManager manager = new TaskMemoryManager(
new TestMemoryManager(new SparkConf().set("spark.memory.offHeap.enabled", "false")), 0);
final MemoryConsumer c = new TestMemoryConsumer(manager, MemoryMode.ON_HEAP);
final MemoryBlock dataPage = manager.allocatePage(256, c);
MemoryAllocator.HEAP.free(dataPage);
}
@Test(expected = AssertionError.class)
public void callingFreePageOnDirectlyAllocatedPageTriggersAssertionError() {
final TaskMemoryManager manager = new TaskMemoryManager(
new TestMemoryManager(new SparkConf().set("spark.memory.offHeap.enabled", "false")), 0);
final MemoryConsumer c = new TestMemoryConsumer(manager, MemoryMode.ON_HEAP);
final MemoryBlock dataPage = MemoryAllocator.HEAP.allocate(256);
manager.freePage(dataPage, c);
}
@Test
public void cooperativeSpilling() {
final TestMemoryManager memoryManager = new TestMemoryManager(new SparkConf());
memoryManager.limit(100);
final TaskMemoryManager manager = new TaskMemoryManager(memoryManager, 0);
TestMemoryConsumer c1 = new TestMemoryConsumer(manager);
TestMemoryConsumer c2 = new TestMemoryConsumer(manager);
c1.use(100);
Assert.assertEquals(100, c1.getUsed());
c2.use(100);
Assert.assertEquals(100, c2.getUsed());
Assert.assertEquals(0, c1.getUsed()); // spilled
c1.use(100);
Assert.assertEquals(100, c1.getUsed());
Assert.assertEquals(0, c2.getUsed()); // spilled
c1.use(50);
Assert.assertEquals(50, c1.getUsed()); // spilled
Assert.assertEquals(0, c2.getUsed());
c2.use(50);
Assert.assertEquals(50, c1.getUsed());
Assert.assertEquals(50, c2.getUsed());
c1.use(100);
Assert.assertEquals(100, c1.getUsed());
Assert.assertEquals(0, c2.getUsed()); // spilled
c1.free(20);
Assert.assertEquals(80, c1.getUsed());
c2.use(10);
Assert.assertEquals(80, c1.getUsed());
Assert.assertEquals(10, c2.getUsed());
c2.use(100);
Assert.assertEquals(100, c2.getUsed());
Assert.assertEquals(0, c1.getUsed()); // spilled
c1.free(0);
c2.free(100);
Assert.assertEquals(0, manager.cleanUpAllAllocatedMemory());
}
@Test
public void cooperativeSpilling2() {
final TestMemoryManager memoryManager = new TestMemoryManager(new SparkConf());
memoryManager.limit(100);
final TaskMemoryManager manager = new TaskMemoryManager(memoryManager, 0);
TestMemoryConsumer c1 = new TestMemoryConsumer(manager);
TestMemoryConsumer c2 = new TestMemoryConsumer(manager);
TestMemoryConsumer c3 = new TestMemoryConsumer(manager);
c1.use(20);
Assert.assertEquals(20, c1.getUsed());
c2.use(80);
Assert.assertEquals(80, c2.getUsed());
c3.use(80);
Assert.assertEquals(20, c1.getUsed()); // c1: not spilled
Assert.assertEquals(0, c2.getUsed()); // c2: spilled as it has required size of memory
Assert.assertEquals(80, c3.getUsed());
c2.use(80);
Assert.assertEquals(20, c1.getUsed()); // c1: not spilled
Assert.assertEquals(0, c3.getUsed()); // c3: spilled as it has required size of memory
Assert.assertEquals(80, c2.getUsed());
c3.use(10);
Assert.assertEquals(0, c1.getUsed()); // c1: spilled as it has required size of memory
Assert.assertEquals(80, c2.getUsed()); // c2: not spilled as spilling c1 already satisfies c3
Assert.assertEquals(10, c3.getUsed());
c1.free(0);
c2.free(80);
c3.free(10);
Assert.assertEquals(0, manager.cleanUpAllAllocatedMemory());
}
@Test
public void shouldNotForceSpillingInDifferentModes() {
final TestMemoryManager memoryManager = new TestMemoryManager(new SparkConf());
memoryManager.limit(100);
final TaskMemoryManager manager = new TaskMemoryManager(memoryManager, 0);
TestMemoryConsumer c1 = new TestMemoryConsumer(manager, MemoryMode.ON_HEAP);
TestMemoryConsumer c2 = new TestMemoryConsumer(manager, MemoryMode.OFF_HEAP);
c1.use(80);
Assert.assertEquals(80, c1.getUsed());
c2.use(80);
Assert.assertEquals(20, c2.getUsed()); // not enough memory
Assert.assertEquals(80, c1.getUsed()); // not spilled
c2.use(10);
Assert.assertEquals(10, c2.getUsed()); // spilled
Assert.assertEquals(80, c1.getUsed()); // not spilled
}
@Test
public void offHeapConfigurationBackwardsCompatibility() {
// Tests backwards-compatibility with the old `spark.unsafe.offHeap` configuration, which
// was deprecated in Spark 1.6 and replaced by `spark.memory.offHeap.enabled` (see SPARK-12251).
final SparkConf conf = new SparkConf()
.set("spark.unsafe.offHeap", "true")
.set("spark.memory.offHeap.size", "1000");
final TaskMemoryManager manager = new TaskMemoryManager(new TestMemoryManager(conf), 0);
Assert.assertSame(MemoryMode.OFF_HEAP, manager.tungstenMemoryMode);
}
}
| 9,594 |
0 | Create_ds/spark/core/src/test/java/org/apache/spark/util | Create_ds/spark/core/src/test/java/org/apache/spark/util/collection/TestTimSort.java | /**
* Copyright 2015 Stijn de Gouw
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.spark.util.collection;
import java.util.*;
/**
* This codes generates a int array which fails the standard TimSort.
*
* The blog that reported the bug
* http://www.envisage-project.eu/timsort-specification-and-verification/
*
* This codes was originally wrote by Stijn de Gouw, modified by Evan Yu to adapt to
* our test suite.
*
* https://github.com/abstools/java-timsort-bug
* https://github.com/abstools/java-timsort-bug/blob/master/LICENSE
*/
public class TestTimSort {
private static final int MIN_MERGE = 32;
/**
* Returns an array of integers that demonstrate the bug in TimSort
*/
public static int[] getTimSortBugTestSet(int length) {
int minRun = minRunLength(length);
List<Long> runs = runsJDKWorstCase(minRun, length);
return createArray(runs, length);
}
private static int minRunLength(int n) {
int r = 0; // Becomes 1 if any 1 bits are shifted off
while (n >= MIN_MERGE) {
r |= (n & 1);
n >>= 1;
}
return n + r;
}
private static int[] createArray(List<Long> runs, int length) {
int[] a = new int[length];
Arrays.fill(a, 0);
int endRun = -1;
for (long len : runs) {
a[endRun += len] = 1;
}
a[length - 1] = 0;
return a;
}
/**
* Fills <code>runs</code> with a sequence of run lengths of the form<br>
* Y_n x_{n,1} x_{n,2} ... x_{n,l_n} <br>
* Y_{n-1} x_{n-1,1} x_{n-1,2} ... x_{n-1,l_{n-1}} <br>
* ... <br>
* Y_1 x_{1,1} x_{1,2} ... x_{1,l_1}<br>
* The Y_i's are chosen to satisfy the invariant throughout execution,
* but the x_{i,j}'s are merged (by <code>TimSort.mergeCollapse</code>)
* into an X_i that violates the invariant.
*
* @param length The sum of all run lengths that will be added to <code>runs</code>.
*/
private static List<Long> runsJDKWorstCase(int minRun, int length) {
List<Long> runs = new ArrayList<>();
long runningTotal = 0, Y = minRun + 4, X = minRun;
while (runningTotal + Y + X <= length) {
runningTotal += X + Y;
generateJDKWrongElem(runs, minRun, X);
runs.add(0, Y);
// X_{i+1} = Y_i + x_{i,1} + 1, since runs.get(1) = x_{i,1}
X = Y + runs.get(1) + 1;
// Y_{i+1} = X_{i+1} + Y_i + 1
Y += X + 1;
}
if (runningTotal + X <= length) {
runningTotal += X;
generateJDKWrongElem(runs, minRun, X);
}
runs.add(length - runningTotal);
return runs;
}
/**
* Adds a sequence x_1, ..., x_n of run lengths to <code>runs</code> such that:<br>
* 1. X = x_1 + ... + x_n <br>
* 2. x_j >= minRun for all j <br>
* 3. x_1 + ... + x_{j-2} < x_j < x_1 + ... + x_{j-1} for all j <br>
* These conditions guarantee that TimSort merges all x_j's one by one
* (resulting in X) using only merges on the second-to-last element.
*
* @param X The sum of the sequence that should be added to runs.
*/
private static void generateJDKWrongElem(List<Long> runs, int minRun, long X) {
for (long newTotal; X >= 2 * minRun + 1; X = newTotal) {
//Default strategy
newTotal = X / 2 + 1;
//Specialized strategies
if (3 * minRun + 3 <= X && X <= 4 * minRun + 1) {
// add x_1=MIN+1, x_2=MIN, x_3=X-newTotal to runs
newTotal = 2 * minRun + 1;
} else if (5 * minRun + 5 <= X && X <= 6 * minRun + 5) {
// add x_1=MIN+1, x_2=MIN, x_3=MIN+2, x_4=X-newTotal to runs
newTotal = 3 * minRun + 3;
} else if (8 * minRun + 9 <= X && X <= 10 * minRun + 9) {
// add x_1=MIN+1, x_2=MIN, x_3=MIN+2, x_4=2MIN+2, x_5=X-newTotal to runs
newTotal = 5 * minRun + 5;
} else if (13 * minRun + 15 <= X && X <= 16 * minRun + 17) {
// add x_1=MIN+1, x_2=MIN, x_3=MIN+2, x_4=2MIN+2, x_5=3MIN+4, x_6=X-newTotal to runs
newTotal = 8 * minRun + 9;
}
runs.add(0, X - newTotal);
}
runs.add(0, X);
}
}
| 9,595 |
0 | Create_ds/spark/core/src/test/java/org/apache/spark/util/collection/unsafe | Create_ds/spark/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterRadixSortSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection.unsafe.sort;
public class UnsafeExternalSorterRadixSortSuite extends UnsafeExternalSorterSuite {
@Override
protected boolean shouldUseRadixSort() { return true; }
}
| 9,596 |
0 | Create_ds/spark/core/src/test/java/org/apache/spark/util/collection/unsafe | Create_ds/spark/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection.unsafe.sort;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.UUID;
import org.hamcrest.Matchers;
import scala.Tuple2$;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.apache.spark.SparkConf;
import org.apache.spark.TaskContext;
import org.apache.spark.executor.ShuffleWriteMetrics;
import org.apache.spark.executor.TaskMetrics;
import org.apache.spark.internal.config.package$;
import org.apache.spark.memory.TestMemoryManager;
import org.apache.spark.memory.TaskMemoryManager;
import org.apache.spark.serializer.JavaSerializer;
import org.apache.spark.serializer.SerializerInstance;
import org.apache.spark.serializer.SerializerManager;
import org.apache.spark.storage.*;
import org.apache.spark.unsafe.Platform;
import org.apache.spark.util.Utils;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.junit.Assert.*;
import static org.mockito.Answers.RETURNS_SMART_NULLS;
import static org.mockito.Mockito.*;
public class UnsafeExternalSorterSuite {
private final SparkConf conf = new SparkConf();
final LinkedList<File> spillFilesCreated = new LinkedList<>();
final TestMemoryManager memoryManager =
new TestMemoryManager(conf.clone().set("spark.memory.offHeap.enabled", "false"));
final TaskMemoryManager taskMemoryManager = new TaskMemoryManager(memoryManager, 0);
final SerializerManager serializerManager = new SerializerManager(
new JavaSerializer(conf),
conf.clone().set("spark.shuffle.spill.compress", "false"));
// Use integer comparison for comparing prefixes (which are partition ids, in this case)
final PrefixComparator prefixComparator = PrefixComparators.LONG;
// Since the key fits within the 8-byte prefix, we don't need to do any record comparison, so
// use a dummy comparator
final RecordComparator recordComparator = new RecordComparator() {
@Override
public int compare(
Object leftBaseObject,
long leftBaseOffset,
int leftBaseLength,
Object rightBaseObject,
long rightBaseOffset,
int rightBaseLength) {
return 0;
}
};
File tempDir;
@Mock(answer = RETURNS_SMART_NULLS) BlockManager blockManager;
@Mock(answer = RETURNS_SMART_NULLS) DiskBlockManager diskBlockManager;
@Mock(answer = RETURNS_SMART_NULLS) TaskContext taskContext;
protected boolean shouldUseRadixSort() { return false; }
private final long pageSizeBytes = conf.getSizeAsBytes("spark.buffer.pageSize", "4m");
private final int spillThreshold =
(int) conf.get(package$.MODULE$.SHUFFLE_SPILL_NUM_ELEMENTS_FORCE_SPILL_THRESHOLD());
@Before
public void setUp() {
MockitoAnnotations.initMocks(this);
tempDir = Utils.createTempDir(System.getProperty("java.io.tmpdir"), "unsafe-test");
spillFilesCreated.clear();
taskContext = mock(TaskContext.class);
when(taskContext.taskMetrics()).thenReturn(new TaskMetrics());
when(blockManager.diskBlockManager()).thenReturn(diskBlockManager);
when(diskBlockManager.createTempLocalBlock()).thenAnswer(invocationOnMock -> {
TempLocalBlockId blockId = new TempLocalBlockId(UUID.randomUUID());
File file = File.createTempFile("spillFile", ".spill", tempDir);
spillFilesCreated.add(file);
return Tuple2$.MODULE$.apply(blockId, file);
});
when(blockManager.getDiskWriter(
any(BlockId.class),
any(File.class),
any(SerializerInstance.class),
anyInt(),
any(ShuffleWriteMetrics.class))).thenAnswer(invocationOnMock -> {
Object[] args = invocationOnMock.getArguments();
return new DiskBlockObjectWriter(
(File) args[1],
serializerManager,
(SerializerInstance) args[2],
(Integer) args[3],
false,
(ShuffleWriteMetrics) args[4],
(BlockId) args[0]
);
});
}
@After
public void tearDown() {
try {
assertEquals(0L, taskMemoryManager.cleanUpAllAllocatedMemory());
} finally {
Utils.deleteRecursively(tempDir);
tempDir = null;
}
}
private void assertSpillFilesWereCleanedUp() {
for (File spillFile : spillFilesCreated) {
assertFalse("Spill file " + spillFile.getPath() + " was not cleaned up",
spillFile.exists());
}
}
private static void insertNumber(UnsafeExternalSorter sorter, int value) throws Exception {
final int[] arr = new int[]{ value };
sorter.insertRecord(arr, Platform.INT_ARRAY_OFFSET, 4, value, false);
}
private static void insertRecord(
UnsafeExternalSorter sorter,
int[] record,
long prefix) throws IOException {
sorter.insertRecord(record, Platform.INT_ARRAY_OFFSET, record.length * 4, prefix, false);
}
private UnsafeExternalSorter newSorter() throws IOException {
return UnsafeExternalSorter.create(
taskMemoryManager,
blockManager,
serializerManager,
taskContext,
() -> recordComparator,
prefixComparator,
/* initialSize */ 1024,
pageSizeBytes,
spillThreshold,
shouldUseRadixSort());
}
@Test
public void testSortingOnlyByPrefix() throws Exception {
final UnsafeExternalSorter sorter = newSorter();
insertNumber(sorter, 5);
insertNumber(sorter, 1);
insertNumber(sorter, 3);
sorter.spill();
insertNumber(sorter, 4);
sorter.spill();
insertNumber(sorter, 2);
UnsafeSorterIterator iter = sorter.getSortedIterator();
for (int i = 1; i <= 5; i++) {
iter.loadNext();
assertEquals(i, iter.getKeyPrefix());
assertEquals(4, iter.getRecordLength());
assertEquals(i, Platform.getInt(iter.getBaseObject(), iter.getBaseOffset()));
}
sorter.cleanupResources();
assertSpillFilesWereCleanedUp();
}
@Test
public void testSortingEmptyArrays() throws Exception {
final UnsafeExternalSorter sorter = newSorter();
sorter.insertRecord(null, 0, 0, 0, false);
sorter.insertRecord(null, 0, 0, 0, false);
sorter.spill();
sorter.insertRecord(null, 0, 0, 0, false);
sorter.spill();
sorter.insertRecord(null, 0, 0, 0, false);
sorter.insertRecord(null, 0, 0, 0, false);
UnsafeSorterIterator iter = sorter.getSortedIterator();
for (int i = 1; i <= 5; i++) {
iter.loadNext();
assertEquals(0, iter.getKeyPrefix());
assertEquals(0, iter.getRecordLength());
}
sorter.cleanupResources();
assertSpillFilesWereCleanedUp();
}
@Test
public void testSortTimeMetric() throws Exception {
final UnsafeExternalSorter sorter = newSorter();
long prevSortTime = sorter.getSortTimeNanos();
assertEquals(prevSortTime, 0);
sorter.insertRecord(null, 0, 0, 0, false);
sorter.spill();
assertThat(sorter.getSortTimeNanos(), greaterThan(prevSortTime));
prevSortTime = sorter.getSortTimeNanos();
sorter.spill(); // no sort needed
assertEquals(sorter.getSortTimeNanos(), prevSortTime);
sorter.insertRecord(null, 0, 0, 0, false);
UnsafeSorterIterator iter = sorter.getSortedIterator();
assertThat(sorter.getSortTimeNanos(), greaterThan(prevSortTime));
}
@Test
public void spillingOccursInResponseToMemoryPressure() throws Exception {
final UnsafeExternalSorter sorter = newSorter();
// This should be enough records to completely fill up a data page:
final int numRecords = (int) (pageSizeBytes / (4 + 4));
for (int i = 0; i < numRecords; i++) {
insertNumber(sorter, numRecords - i);
}
assertEquals(1, sorter.getNumberOfAllocatedPages());
memoryManager.markExecutionAsOutOfMemoryOnce();
// The insertion of this record should trigger a spill:
insertNumber(sorter, 0);
// Ensure that spill files were created
assertThat(tempDir.listFiles().length, greaterThanOrEqualTo(1));
// Read back the sorted data:
UnsafeSorterIterator iter = sorter.getSortedIterator();
int i = 0;
while (iter.hasNext()) {
iter.loadNext();
assertEquals(i, iter.getKeyPrefix());
assertEquals(4, iter.getRecordLength());
assertEquals(i, Platform.getInt(iter.getBaseObject(), iter.getBaseOffset()));
i++;
}
assertEquals(numRecords + 1, i);
sorter.cleanupResources();
assertSpillFilesWereCleanedUp();
}
@Test
public void testFillingPage() throws Exception {
final UnsafeExternalSorter sorter = newSorter();
byte[] record = new byte[16];
while (sorter.getNumberOfAllocatedPages() < 2) {
sorter.insertRecord(record, Platform.BYTE_ARRAY_OFFSET, record.length, 0, false);
}
sorter.cleanupResources();
assertSpillFilesWereCleanedUp();
}
@Test
public void sortingRecordsThatExceedPageSize() throws Exception {
final UnsafeExternalSorter sorter = newSorter();
final int[] largeRecord = new int[(int) pageSizeBytes + 16];
Arrays.fill(largeRecord, 456);
final int[] smallRecord = new int[100];
Arrays.fill(smallRecord, 123);
insertRecord(sorter, largeRecord, 456);
sorter.spill();
insertRecord(sorter, smallRecord, 123);
sorter.spill();
insertRecord(sorter, smallRecord, 123);
insertRecord(sorter, largeRecord, 456);
UnsafeSorterIterator iter = sorter.getSortedIterator();
// Small record
assertTrue(iter.hasNext());
iter.loadNext();
assertEquals(123, iter.getKeyPrefix());
assertEquals(smallRecord.length * 4, iter.getRecordLength());
assertEquals(123, Platform.getInt(iter.getBaseObject(), iter.getBaseOffset()));
// Small record
assertTrue(iter.hasNext());
iter.loadNext();
assertEquals(123, iter.getKeyPrefix());
assertEquals(smallRecord.length * 4, iter.getRecordLength());
assertEquals(123, Platform.getInt(iter.getBaseObject(), iter.getBaseOffset()));
// Large record
assertTrue(iter.hasNext());
iter.loadNext();
assertEquals(456, iter.getKeyPrefix());
assertEquals(largeRecord.length * 4, iter.getRecordLength());
assertEquals(456, Platform.getInt(iter.getBaseObject(), iter.getBaseOffset()));
// Large record
assertTrue(iter.hasNext());
iter.loadNext();
assertEquals(456, iter.getKeyPrefix());
assertEquals(largeRecord.length * 4, iter.getRecordLength());
assertEquals(456, Platform.getInt(iter.getBaseObject(), iter.getBaseOffset()));
assertFalse(iter.hasNext());
sorter.cleanupResources();
assertSpillFilesWereCleanedUp();
}
@Test
public void forcedSpillingWithReadIterator() throws Exception {
final UnsafeExternalSorter sorter = newSorter();
long[] record = new long[100];
int recordSize = record.length * 8;
int n = (int) pageSizeBytes / recordSize * 3;
for (int i = 0; i < n; i++) {
record[0] = (long) i;
sorter.insertRecord(record, Platform.LONG_ARRAY_OFFSET, recordSize, 0, false);
}
assertTrue(sorter.getNumberOfAllocatedPages() >= 2);
UnsafeExternalSorter.SpillableIterator iter =
(UnsafeExternalSorter.SpillableIterator) sorter.getSortedIterator();
int lastv = 0;
for (int i = 0; i < n / 3; i++) {
iter.hasNext();
iter.loadNext();
assertTrue(Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()) == i);
lastv = i;
}
assertTrue(iter.spill() > 0);
assertEquals(0, iter.spill());
assertTrue(Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()) == lastv);
for (int i = n / 3; i < n; i++) {
iter.hasNext();
iter.loadNext();
assertEquals(i, Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()));
}
sorter.cleanupResources();
assertSpillFilesWereCleanedUp();
}
@Test
public void forcedSpillingWithNotReadIterator() throws Exception {
final UnsafeExternalSorter sorter = newSorter();
long[] record = new long[100];
int recordSize = record.length * 8;
int n = (int) pageSizeBytes / recordSize * 3;
for (int i = 0; i < n; i++) {
record[0] = (long) i;
sorter.insertRecord(record, Platform.LONG_ARRAY_OFFSET, recordSize, 0, false);
}
assertTrue(sorter.getNumberOfAllocatedPages() >= 2);
UnsafeExternalSorter.SpillableIterator iter =
(UnsafeExternalSorter.SpillableIterator) sorter.getSortedIterator();
assertTrue(iter.spill() > 0);
assertEquals(0, iter.spill());
for (int i = 0; i < n; i++) {
iter.hasNext();
iter.loadNext();
assertEquals(i, Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()));
}
sorter.cleanupResources();
assertSpillFilesWereCleanedUp();
}
@Test
public void forcedSpillingWithoutComparator() throws Exception {
final UnsafeExternalSorter sorter = UnsafeExternalSorter.create(
taskMemoryManager,
blockManager,
serializerManager,
taskContext,
null,
null,
/* initialSize */ 1024,
pageSizeBytes,
spillThreshold,
shouldUseRadixSort());
long[] record = new long[100];
int recordSize = record.length * 8;
int n = (int) pageSizeBytes / recordSize * 3;
int batch = n / 4;
for (int i = 0; i < n; i++) {
record[0] = (long) i;
sorter.insertRecord(record, Platform.LONG_ARRAY_OFFSET, recordSize, 0, false);
if (i % batch == batch - 1) {
sorter.spill();
}
}
UnsafeSorterIterator iter = sorter.getIterator(0);
for (int i = 0; i < n; i++) {
iter.hasNext();
iter.loadNext();
assertEquals(i, Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()));
}
sorter.cleanupResources();
assertSpillFilesWereCleanedUp();
}
@Test
public void testDiskSpilledBytes() throws Exception {
final UnsafeExternalSorter sorter = newSorter();
long[] record = new long[100];
int recordSize = record.length * 8;
int n = (int) pageSizeBytes / recordSize * 3;
for (int i = 0; i < n; i++) {
record[0] = (long) i;
sorter.insertRecord(record, Platform.LONG_ARRAY_OFFSET, recordSize, 0, false);
}
// We will have at-least 2 memory pages allocated because of rounding happening due to
// integer division of pageSizeBytes and recordSize.
assertTrue(sorter.getNumberOfAllocatedPages() >= 2);
assertTrue(taskContext.taskMetrics().diskBytesSpilled() == 0);
UnsafeExternalSorter.SpillableIterator iter =
(UnsafeExternalSorter.SpillableIterator) sorter.getSortedIterator();
assertTrue(iter.spill() > 0);
assertTrue(taskContext.taskMetrics().diskBytesSpilled() > 0);
assertEquals(0, iter.spill());
// Even if we did not spill second time, the disk spilled bytes should still be non-zero
assertTrue(taskContext.taskMetrics().diskBytesSpilled() > 0);
sorter.cleanupResources();
assertSpillFilesWereCleanedUp();
}
@Test
public void testPeakMemoryUsed() throws Exception {
final long recordLengthBytes = 8;
final long pageSizeBytes = 256;
final long numRecordsPerPage = pageSizeBytes / recordLengthBytes;
final UnsafeExternalSorter sorter = UnsafeExternalSorter.create(
taskMemoryManager,
blockManager,
serializerManager,
taskContext,
() -> recordComparator,
prefixComparator,
1024,
pageSizeBytes,
spillThreshold,
shouldUseRadixSort());
// Peak memory should be monotonically increasing. More specifically, every time
// we allocate a new page it should increase by exactly the size of the page.
long previousPeakMemory = sorter.getPeakMemoryUsedBytes();
long newPeakMemory;
try {
for (int i = 0; i < numRecordsPerPage * 10; i++) {
insertNumber(sorter, i);
newPeakMemory = sorter.getPeakMemoryUsedBytes();
if (i % numRecordsPerPage == 0) {
// We allocated a new page for this record, so peak memory should change
assertEquals(previousPeakMemory + pageSizeBytes, newPeakMemory);
} else {
assertEquals(previousPeakMemory, newPeakMemory);
}
previousPeakMemory = newPeakMemory;
}
// Spilling should not change peak memory
sorter.spill();
newPeakMemory = sorter.getPeakMemoryUsedBytes();
assertEquals(previousPeakMemory, newPeakMemory);
for (int i = 0; i < numRecordsPerPage; i++) {
insertNumber(sorter, i);
}
newPeakMemory = sorter.getPeakMemoryUsedBytes();
assertEquals(previousPeakMemory, newPeakMemory);
} finally {
sorter.cleanupResources();
assertSpillFilesWereCleanedUp();
}
}
@Test
public void testGetIterator() throws Exception {
final UnsafeExternalSorter sorter = newSorter();
for (int i = 0; i < 100; i++) {
insertNumber(sorter, i);
}
verifyIntIterator(sorter.getIterator(0), 0, 100);
verifyIntIterator(sorter.getIterator(79), 79, 100);
sorter.spill();
for (int i = 100; i < 200; i++) {
insertNumber(sorter, i);
}
sorter.spill();
verifyIntIterator(sorter.getIterator(79), 79, 200);
for (int i = 200; i < 300; i++) {
insertNumber(sorter, i);
}
verifyIntIterator(sorter.getIterator(79), 79, 300);
verifyIntIterator(sorter.getIterator(139), 139, 300);
verifyIntIterator(sorter.getIterator(279), 279, 300);
}
@Test
public void testOOMDuringSpill() throws Exception {
final UnsafeExternalSorter sorter = newSorter();
// we assume that given default configuration,
// the size of the data we insert to the sorter (ints)
// and assuming we shouldn't spill before pointers array is exhausted
// (memory manager is not configured to throw at this point)
// - so this loop runs a reasonable number of iterations (<2000).
// test indeed completed within <30ms (on a quad i7 laptop).
for (int i = 0; sorter.hasSpaceForAnotherRecord(); ++i) {
insertNumber(sorter, i);
}
// we expect the next insert to attempt growing the pointerssArray first
// allocation is expected to fail, then a spill is triggered which
// attempts another allocation which also fails and we expect to see this
// OOM here. the original code messed with a released array within the
// spill code and ended up with a failed assertion. we also expect the
// location of the OOM to be
// org.apache.spark.util.collection.unsafe.sort.UnsafeInMemorySorter.reset
memoryManager.markconsequentOOM(2);
try {
insertNumber(sorter, 1024);
fail("expected OutOfMmoryError but it seems operation surprisingly succeeded");
}
// we expect an OutOfMemoryError here, anything else (i.e the original NPE is a failure)
catch (OutOfMemoryError oom){
String oomStackTrace = Utils.exceptionString(oom);
assertThat("expected OutOfMemoryError in " +
"org.apache.spark.util.collection.unsafe.sort.UnsafeInMemorySorter.reset",
oomStackTrace,
Matchers.containsString(
"org.apache.spark.util.collection.unsafe.sort.UnsafeInMemorySorter.reset"));
}
}
private void verifyIntIterator(UnsafeSorterIterator iter, int start, int end)
throws IOException {
for (int i = start; i < end; i++) {
assert (iter.hasNext());
iter.loadNext();
assert (Platform.getInt(iter.getBaseObject(), iter.getBaseOffset()) == i);
}
}
}
| 9,597 |
0 | Create_ds/spark/core/src/test/java/org/apache/spark/util/collection/unsafe | Create_ds/spark/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorterRadixSortSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection.unsafe.sort;
public class UnsafeInMemorySorterRadixSortSuite extends UnsafeInMemorySorterSuite {
@Override
protected boolean shouldUseRadixSort() { return true; }
}
| 9,598 |
0 | Create_ds/spark/core/src/test/java/org/apache/spark/util/collection/unsafe | Create_ds/spark/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorterSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection.unsafe.sort;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import org.junit.Assert;
import org.junit.Test;
import org.apache.spark.HashPartitioner;
import org.apache.spark.SparkConf;
import org.apache.spark.memory.TestMemoryConsumer;
import org.apache.spark.memory.TestMemoryManager;
import org.apache.spark.memory.TaskMemoryManager;
import org.apache.spark.unsafe.Platform;
import org.apache.spark.unsafe.memory.MemoryBlock;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.isIn;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
public class UnsafeInMemorySorterSuite {
protected boolean shouldUseRadixSort() { return false; }
private static String getStringFromDataPage(Object baseObject, long baseOffset, int length) {
final byte[] strBytes = new byte[length];
Platform.copyMemory(baseObject, baseOffset, strBytes, Platform.BYTE_ARRAY_OFFSET, length);
return new String(strBytes, StandardCharsets.UTF_8);
}
@Test
public void testSortingEmptyInput() {
final TaskMemoryManager memoryManager = new TaskMemoryManager(
new TestMemoryManager(new SparkConf().set("spark.memory.offHeap.enabled", "false")), 0);
final TestMemoryConsumer consumer = new TestMemoryConsumer(memoryManager);
final UnsafeInMemorySorter sorter = new UnsafeInMemorySorter(consumer,
memoryManager,
mock(RecordComparator.class),
mock(PrefixComparator.class),
100,
shouldUseRadixSort());
final UnsafeSorterIterator iter = sorter.getSortedIterator();
Assert.assertFalse(iter.hasNext());
}
@Test
public void testSortingOnlyByIntegerPrefix() throws Exception {
final String[] dataToSort = new String[] {
"Boba",
"Pearls",
"Tapioca",
"Taho",
"Condensed Milk",
"Jasmine",
"Milk Tea",
"Lychee",
"Mango"
};
final TaskMemoryManager memoryManager = new TaskMemoryManager(
new TestMemoryManager(new SparkConf().set("spark.memory.offHeap.enabled", "false")), 0);
final TestMemoryConsumer consumer = new TestMemoryConsumer(memoryManager);
final MemoryBlock dataPage = memoryManager.allocatePage(2048, consumer);
final Object baseObject = dataPage.getBaseObject();
// Write the records into the data page:
long position = dataPage.getBaseOffset();
for (String str : dataToSort) {
final byte[] strBytes = str.getBytes(StandardCharsets.UTF_8);
Platform.putInt(baseObject, position, strBytes.length);
position += 4;
Platform.copyMemory(
strBytes, Platform.BYTE_ARRAY_OFFSET, baseObject, position, strBytes.length);
position += strBytes.length;
}
// Since the key fits within the 8-byte prefix, we don't need to do any record comparison, so
// use a dummy comparator
final RecordComparator recordComparator = new RecordComparator() {
@Override
public int compare(
Object leftBaseObject,
long leftBaseOffset,
int leftBaseLength,
Object rightBaseObject,
long rightBaseOffset,
int rightBaseLength) {
return 0;
}
};
// Compute key prefixes based on the records' partition ids
final HashPartitioner hashPartitioner = new HashPartitioner(4);
// Use integer comparison for comparing prefixes (which are partition ids, in this case)
final PrefixComparator prefixComparator = PrefixComparators.LONG;
UnsafeInMemorySorter sorter = new UnsafeInMemorySorter(consumer, memoryManager,
recordComparator, prefixComparator, dataToSort.length, shouldUseRadixSort());
// Given a page of records, insert those records into the sorter one-by-one:
position = dataPage.getBaseOffset();
for (int i = 0; i < dataToSort.length; i++) {
if (!sorter.hasSpaceForAnotherRecord()) {
sorter.expandPointerArray(
consumer.allocateArray(sorter.getMemoryUsage() / 8 * 2));
}
// position now points to the start of a record (which holds its length).
final int recordLength = Platform.getInt(baseObject, position);
final long address = memoryManager.encodePageNumberAndOffset(dataPage, position);
final String str = getStringFromDataPage(baseObject, position + 4, recordLength);
final int partitionId = hashPartitioner.getPartition(str);
sorter.insertRecord(address, partitionId, false);
position += 4 + recordLength;
}
final UnsafeSorterIterator iter = sorter.getSortedIterator();
int iterLength = 0;
long prevPrefix = -1;
while (iter.hasNext()) {
iter.loadNext();
final String str =
getStringFromDataPage(iter.getBaseObject(), iter.getBaseOffset(), iter.getRecordLength());
final long keyPrefix = iter.getKeyPrefix();
assertThat(str, isIn(Arrays.asList(dataToSort)));
assertThat(keyPrefix, greaterThanOrEqualTo(prevPrefix));
prevPrefix = keyPrefix;
iterLength++;
}
assertEquals(dataToSort.length, iterLength);
}
@Test
public void freeAfterOOM() {
final SparkConf sparkConf = new SparkConf();
sparkConf.set("spark.memory.offHeap.enabled", "false");
final TestMemoryManager testMemoryManager =
new TestMemoryManager(sparkConf);
final TaskMemoryManager memoryManager = new TaskMemoryManager(
testMemoryManager, 0);
final TestMemoryConsumer consumer = new TestMemoryConsumer(memoryManager);
final MemoryBlock dataPage = memoryManager.allocatePage(2048, consumer);
final Object baseObject = dataPage.getBaseObject();
// Write the records into the data page:
long position = dataPage.getBaseOffset();
final HashPartitioner hashPartitioner = new HashPartitioner(4);
// Use integer comparison for comparing prefixes (which are partition ids, in this case)
final PrefixComparator prefixComparator = PrefixComparators.LONG;
final RecordComparator recordComparator = new RecordComparator() {
@Override
public int compare(
Object leftBaseObject,
long leftBaseOffset,
int leftBaseLength,
Object rightBaseObject,
long rightBaseOffset,
int rightBaseLength) {
return 0;
}
};
UnsafeInMemorySorter sorter = new UnsafeInMemorySorter(consumer, memoryManager,
recordComparator, prefixComparator, 100, shouldUseRadixSort());
testMemoryManager.markExecutionAsOutOfMemoryOnce();
try {
sorter.reset();
fail("expected OutOfMmoryError but it seems operation surprisingly succeeded");
} catch (OutOfMemoryError oom) {
// as expected
}
// [SPARK-21907] this failed on NPE at
// org.apache.spark.memory.MemoryConsumer.freeArray(MemoryConsumer.java:108)
sorter.free();
// simulate a 'back to back' free.
sorter.free();
}
}
| 9,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.