code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
public static void mergeTypeClassPaths( List<String> destinationPaths, final List<String> sourcePaths, final String rootPath) { if (sourcePaths != null) { for (String jar : sourcePaths) { File file = new File(jar); if (!file.isAbsolute()) { file = new File(rootPath + File.separatorChar + jar); } String path = file.getAbsolutePath(); if (!destinationPaths.contains(path)) { destinationPaths.add(path); } } } }
Merge the absolute paths of source paths into the list of destination paths @param destinationPaths the path list which the source paths will be merged into @param sourcePaths source paths @param rootPath defined root path for source paths when they are not absolute path
mergeTypeClassPaths
java
azkaban/azkaban
az-core/src/main/java/azkaban/utils/Utils.java
https://github.com/azkaban/azkaban/blob/master/az-core/src/main/java/azkaban/utils/Utils.java
Apache-2.0
public static void mergeStringList( final List<String> destinationList, final List<String> sourceList) { if (sourceList != null) { for (String item : sourceList) { if (!destinationList.contains(item)) { destinationList.add(item); } } } }
Merge elements in Source List into the Destination List @param destinationList the list which the source elements will be merged into @param sourceList source List
mergeStringList
java
azkaban/azkaban
az-core/src/main/java/azkaban/utils/Utils.java
https://github.com/azkaban/azkaban/blob/master/az-core/src/main/java/azkaban/utils/Utils.java
Apache-2.0
public long getGaugeValue(final String name) { // Assume that the gauge value can be converted to type long. return (long) this.registry.getGauges().get(name).getValue(); }
This class is designed for a utility class to test drop wizard metrics
getGaugeValue
java
azkaban/azkaban
az-core/src/test/java/azkaban/metrics/MetricsTestUtility.java
https://github.com/azkaban/azkaban/blob/master/az-core/src/test/java/azkaban/metrics/MetricsTestUtility.java
Apache-2.0
public long getCounterValue(final String name) { return this.registry.getCounters().get(name).getCount(); }
@return the value for the specified {@link Counter}
getCounterValue
java
azkaban/azkaban
az-core/src/test/java/azkaban/metrics/MetricsTestUtility.java
https://github.com/azkaban/azkaban/blob/master/az-core/src/test/java/azkaban/metrics/MetricsTestUtility.java
Apache-2.0
public long getMeterValue(final String name) { return this.registry.getMeters().get(name).getCount(); }
@return the value for the specified {@link Meter}
getMeterValue
java
azkaban/azkaban
az-core/src/test/java/azkaban/metrics/MetricsTestUtility.java
https://github.com/azkaban/azkaban/blob/master/az-core/src/test/java/azkaban/metrics/MetricsTestUtility.java
Apache-2.0
public Snapshot getHistogramSnapshot(final String name) { return this.registry.getHistograms().get(name).getSnapshot(); }
@return the {@link Snapshot} for the specified {@link Histogram}.
getHistogramSnapshot
java
azkaban/azkaban
az-core/src/test/java/azkaban/metrics/MetricsTestUtility.java
https://github.com/azkaban/azkaban/blob/master/az-core/src/test/java/azkaban/metrics/MetricsTestUtility.java
Apache-2.0
public long getTimerCount(final String name) { return this.registry.getTimers().get(name).getCount(); }
@return the count for the specified {@link Timer}.
getTimerCount
java
azkaban/azkaban
az-core/src/test/java/azkaban/metrics/MetricsTestUtility.java
https://github.com/azkaban/azkaban/blob/master/az-core/src/test/java/azkaban/metrics/MetricsTestUtility.java
Apache-2.0
public Snapshot getTimerSnapshot(final String name) { return this.registry.getTimers().get(name).getSnapshot(); }
@return the {@link Snapshot} for the specified {@link Timer}.
getTimerSnapshot
java
azkaban/azkaban
az-core/src/test/java/azkaban/metrics/MetricsTestUtility.java
https://github.com/azkaban/azkaban/blob/master/az-core/src/test/java/azkaban/metrics/MetricsTestUtility.java
Apache-2.0
public static Version fromVerString(final String ver) { final Version result = REVERSE_ENTRIES.get(ver); Preconditions.checkNotNull(ver, "Invalid version " + ver); return result; }
Provides Version enum based on version String @param ver Version String
fromVerString
java
azkaban/azkaban
az-crypto/src/main/java/azkaban/crypto/Version.java
https://github.com/azkaban/azkaban/blob/master/az-crypto/src/main/java/azkaban/crypto/Version.java
Apache-2.0
public static List<String> versionStrings() { final List<String> versions = Lists.newArrayList(REVERSE_ENTRIES.keySet()); Collections.sort(versions); return versions; }
@return Naturally ordered list of version String.
versionStrings
java
azkaban/azkaban
az-crypto/src/main/java/azkaban/crypto/Version.java
https://github.com/azkaban/azkaban/blob/master/az-crypto/src/main/java/azkaban/crypto/Version.java
Apache-2.0
public synchronized List<String> getTopicList() { final List<String> res = new ArrayList<>(this.topicEventMap.keySet()); return res; }
Get a list of topics. @return List of String of topics
getTopicList
java
azkaban/azkaban
az-flow-trigger-dependency-type/kafka-event-trigger/src/main/java/trigger/kafka/KafkaDepInstanceCollection.java
https://github.com/azkaban/azkaban/blob/master/az-flow-trigger-dependency-type/kafka-event-trigger/src/main/java/trigger/kafka/KafkaDepInstanceCollection.java
Apache-2.0
public synchronized Set<String> regexInTopic(final String topic, final String payload) { final Set<String> res = new HashSet<>(); final Map<String, List<KafkaDependencyInstanceContext>> eventMap = this.topicEventMap.get(topic); if (eventMap == null) { return Collections.emptySet(); } for (final Map.Entry<String, List<KafkaDependencyInstanceContext>> entry : eventMap.entrySet()) { final RegexKafkaDependencyMatcher matcher = new RegexKafkaDependencyMatcher(Pattern.compile(entry.getKey())); if (matcher.isMatch(payload)) { res.add(entry.getKey()); } } return res; }
Return a set of pattern that matches with the payload. @param payload and topic @return regexs that meet the customized requirement
regexInTopic
java
azkaban/azkaban
az-flow-trigger-dependency-type/kafka-event-trigger/src/main/java/trigger/kafka/KafkaDepInstanceCollection.java
https://github.com/azkaban/azkaban/blob/master/az-flow-trigger-dependency-type/kafka-event-trigger/src/main/java/trigger/kafka/KafkaDepInstanceCollection.java
Apache-2.0
@VisibleForTesting synchronized void consumerSubscriptionRebalance() { log.debug("Subscribed Topics " + this.consumer.subscription()); if (!this.subscribedTopics.isEmpty()) { final Iterator<String> iter = this.subscribedTopics.iterator(); final List<String> topics = new ArrayList<>(); while (iter.hasNext()) { topics.add(iter.next()); } this.subscribedTopics.clear(); //re-subscribe topics that are needed this.consumer.subscribe(topics); } }
Dynamically tune subscription only for the topic that dependencies need.
consumerSubscriptionRebalance
java
azkaban/azkaban
az-flow-trigger-dependency-type/kafka-event-trigger/src/main/java/trigger/kafka/KafkaEventMonitor.java
https://github.com/azkaban/azkaban/blob/master/az-flow-trigger-dependency-type/kafka-event-trigger/src/main/java/trigger/kafka/KafkaEventMonitor.java
Apache-2.0
private void triggerDependencies(final Set<String> matchedList, final ConsumerRecord<String, String> record) { final List<KafkaDependencyInstanceContext> deleteList = new LinkedList<>(); for (final String it : matchedList) { final List<KafkaDependencyInstanceContext> possibleAvailableDeps = this.depInstances.getDepsByTopicAndEvent(record.topic(), it); for (final KafkaDependencyInstanceContext dep : possibleAvailableDeps) { dep.getCallback().onSuccess(dep); deleteList.add(dep); } //If dependencies that need to be removed could lead to unsubscribing topics, do the topics rebalance if (!this.depInstances.removeList(record.topic(), it, deleteList)) { this.subscribedTopics.addAll(this.depInstances.getTopicList()); } } }
If the matcher returns true, remove the dependency from collection.
triggerDependencies
java
azkaban/azkaban
az-flow-trigger-dependency-type/kafka-event-trigger/src/main/java/trigger/kafka/KafkaEventMonitor.java
https://github.com/azkaban/azkaban/blob/master/az-flow-trigger-dependency-type/kafka-event-trigger/src/main/java/trigger/kafka/KafkaEventMonitor.java
Apache-2.0
@Override public boolean isMatch(String payload) { return pattern.matcher(payload).find(); }
A RegexKafkaDependencyMatcher implements the regex match for whole kafka payload.
isMatch
java
azkaban/azkaban
az-flow-trigger-dependency-type/kafka-event-trigger/src/main/java/trigger/kafka/RegexKafkaDependencyMatcher.java
https://github.com/azkaban/azkaban/blob/master/az-flow-trigger-dependency-type/kafka-event-trigger/src/main/java/trigger/kafka/RegexKafkaDependencyMatcher.java
Apache-2.0
public static void injectResources(Props props) { // Add mapred, yarn and hdfs site configs (in addition to core-site, which // is automatically added) as default resources before we add the injected // configuration. This will cause the injected properties to override the // default site properties (instead of vice-versa). This is safe to do, // even when these site files don't exist for your Hadoop installation. if (props.getBoolean("azkaban.inject.hadoop-site.configs", true)) { Configuration.addDefaultResource("mapred-default.xml"); Configuration.addDefaultResource("mapred-site.xml"); Configuration.addDefaultResource("yarn-default.xml"); Configuration.addDefaultResource("yarn-site.xml"); Configuration.addDefaultResource("hdfs-default.xml"); Configuration.addDefaultResource("hdfs-site.xml"); } Configuration.addDefaultResource(INJECT_FILE); }
HadoopConfigurationInjector is responsible for inserting links back to the Azkaban UI in configurations and for automatically injecting designated job properties into the Hadoop configuration. <p> It is assumed that the necessary links have already been loaded into the properties. After writing the necessary links as a xml file as required by Hadoop's configuration, clients may add the links as a default resource using injectResources() so that they are included in any Configuration constructed.
injectResources
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopConfigurationInjector.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopConfigurationInjector.java
Apache-2.0
public static File getConfFile(Props props, String workingDir, String fileName) { File jobDir = new File(workingDir, getDirName(props)); if (!jobDir.exists()) { jobDir.mkdir(); } return new File(jobDir, fileName); }
Resolve the location of the file containing the configuration file. @param props The Azkaban properties @param workingDir The Azkaban job working directory @param fileName The desired configuration file name
getConfFile
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopConfigurationInjector.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopConfigurationInjector.java
Apache-2.0
public static String getDirName(Props props) { String dirSuffix = props.get(CommonJobProperties.NESTED_FLOW_PATH); if ((dirSuffix == null) || (dirSuffix.length() == 0)) { dirSuffix = props.get(CommonJobProperties.JOB_ID); if ((dirSuffix == null) || (dirSuffix.length() == 0)) { throw new RuntimeException("azkaban.flow.nested.path and azkaban.job.id were not set"); } } return "_resources_" + dirSuffix.replace(':', '_'); }
For classpath reasons, we'll put each link file in a separate directory. This must be called only after the job id has been inserted by the job. @param props The Azkaban properties
getDirName
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopConfigurationInjector.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopConfigurationInjector.java
Apache-2.0
public static String getPath(Props props, String workingDir) { return new File(workingDir, getDirName(props)).toString(); }
Gets the path to the directory in which the generated links and Hadoop conf properties files are written. @param props The Azkaban properties @param workingDir The Azkaban job working directory
getPath
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopConfigurationInjector.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopConfigurationInjector.java
Apache-2.0
public static void loadProp(Props props, Configuration conf, String name) { String prop = props.get(name); if (prop != null) { conf.set(name, prop); } }
Loads an Azkaban property into the Hadoop configuration. @param props The Azkaban properties @param conf The Hadoop configuration @param name The property name to load from the Azkaban properties into the Hadoop configuration
loadProp
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopConfigurationInjector.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopConfigurationInjector.java
Apache-2.0
@Override public void cancel() throws InterruptedException { super.cancel(); info("Cancel called. Killing the Hive launched MR jobs on the cluster"); getHadoopProxy().killAllSpawnedHadoopJobs(getJobProps(), getLog()); }
This cancel method, in addition to the default canceling behavior, also kills the MR jobs launched by Hive on Hadoop
cancel
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopHiveJob.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopHiveJob.java
Apache-2.0
public static void addAdditionalNamenodesToPropsFromMRJob(final Props props, final Logger log) { final String additionalNamenodes = (new Configuration()).get(MAPREDUCE_JOB_OTHER_NAMENODES); if (additionalNamenodes != null && additionalNamenodes.length() > 0) { log.info("Found property " + MAPREDUCE_JOB_OTHER_NAMENODES + " = " + additionalNamenodes + "; setting additional namenodes"); HadoopJobUtils.addAdditionalNamenodesToProps(props, additionalNamenodes); } }
The same as {@link #addAdditionalNamenodesToProps}, but assumes that the calling job is MapReduce-based and so uses the {@link #MAPREDUCE_JOB_OTHER_NAMENODES} from a {@link Configuration} object to get the list of additional namenodes. @param props Props to add the new Namenode URIs to. @see #addAdditionalNamenodesToProps(Props, String)
addAdditionalNamenodesToPropsFromMRJob
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopJobUtils.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopJobUtils.java
Apache-2.0
public static void addAdditionalNamenodesToProps(final Props props, final String additionalNamenodes) { final String otherNamenodes = props.get(OTHER_NAMENODES_PROPERTY); if (otherNamenodes != null && otherNamenodes.length() > 0) { props.put(OTHER_NAMENODES_PROPERTY, otherNamenodes + "," + additionalNamenodes); } else { props.put(OTHER_NAMENODES_PROPERTY, additionalNamenodes); } }
Takes the list of other Namenodes from which to fetch delegation tokens, the {@link #OTHER_NAMENODES_PROPERTY} property, from Props and inserts it back with the addition of the the potentially JobType-specific Namenode URIs from additionalNamenodes. Modifies props in-place. @param props Props to add the new Namenode URIs to. @param additionalNamenodes Comma-separated list of Namenode URIs from which to fetch delegation tokens.
addAdditionalNamenodesToProps
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopJobUtils.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopJobUtils.java
Apache-2.0
public static File getHadoopTokens(final HadoopSecurityManager hadoopSecurityManager, final Props props, final Logger log) throws HadoopSecurityManagerException { File tokenFile = null; try { tokenFile = File.createTempFile("mr-azkaban", ".token"); } catch (final Exception e) { throw new HadoopSecurityManagerException("Failed to create the token file.", e); } hadoopSecurityManager.prefetchToken(tokenFile, props, log); return tokenFile; }
Fetching token with the Azkaban user
getHadoopTokens
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopJobUtils.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopJobUtils.java
Apache-2.0
public static String resolveWildCardForJarSpec(final String workingDirectory, final String unresolvedJarSpec, final Logger log) { log.debug("resolveWildCardForJarSpec: unresolved jar specification: " + unresolvedJarSpec); log.debug("working directory: " + workingDirectory); if (unresolvedJarSpec == null || unresolvedJarSpec.isEmpty()) { return ""; } final StringBuilder resolvedJarSpec = new StringBuilder(); final String[] unresolvedJarSpecList = unresolvedJarSpec.split(","); for (final String s : unresolvedJarSpecList) { // if need resolution if (s.endsWith("*")) { // remove last 2 characters to get to the folder final String dirName = String .format("%s/%s", workingDirectory, s.substring(0, s.length() - 2)); File[] jars = null; try { jars = getFilesInFolderByRegex(new File(dirName), ".*jar"); } catch (final FileNotFoundException fnfe) { log.warn("folder does not exist: " + dirName); continue; } // if the folder is there, add them to the jar list for (final File jar : jars) { resolvedJarSpec.append(jar.toString()).append(","); } } else { // no need for resolution resolvedJarSpec.append(s).append(","); } } log.debug("resolveWildCardForJarSpec: resolvedJarSpec: " + resolvedJarSpec); // remove the trailing comma final int lastCharIndex = resolvedJarSpec.length() - 1; if (lastCharIndex >= 0 && resolvedJarSpec.charAt(lastCharIndex) == ',') { resolvedJarSpec.deleteCharAt(lastCharIndex); } return resolvedJarSpec.toString(); }
<pre> If there's a * specification in the "jar" argument (e.g. jar=./lib/*,./lib2/*), this method helps to resolve the * into actual jar names inside the folder, and in order. This is due to the requirement that Spark 1.4 doesn't seem to do the resolution for users </pre> @return jar file list, comma separated, all .../* expanded into actual jar names in order
resolveWildCardForJarSpec
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopJobUtils.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopJobUtils.java
Apache-2.0
public static Set<String> getApplicationIDsToKill(YarnClient yarnClient, Props jobProps, final Logger log) { Set<String> jobsToKill; String yarnKillVersion = jobProps.getString(YARN_KILL_VERSION, YARN_KILL_LEGACY).trim(); if (YARN_KILL_USE_API_WITH_TOKEN.equals(yarnKillVersion)) { try { jobsToKill = YarnUtils.getAllAliveAppIDsByExecID(yarnClient, jobProps.getString(AZKABAN_FLOW_EXEC_ID), log); log.info(String.format("Get alive yarn application IDs from yarn cluster: %s", jobsToKill)); } catch (Exception e) { log.warn("fail to get application-ids from yarn, fallback to scan logfile", e); final String logFilePath = jobProps.getString(CommonJobProperties.JOB_LOG_FILE); log.info("The job log file path is: " + logFilePath); jobsToKill = findApplicationIdFromLog(logFilePath, log); log.info(String.format("Get all spawned yarn application IDs from job log file: %s", jobsToKill)); } } else { final String logFilePath = jobProps.getString(CommonJobProperties.JOB_LOG_FILE); jobsToKill = findApplicationIdFromLog(logFilePath, log); } return jobsToKill; }
Get the yarn applications' ids that needs to be killed (the ones alive / spawned). First use yarn client to call the cluster, if it fails, fallback to scan the job log file to look for application ids @param yarnClient the started client @param jobProps should contain flow execution id, and the job log file's path @param log logger @return the set of application ids
getApplicationIDsToKill
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopJobUtils.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopJobUtils.java
Apache-2.0
public static Set<String> killAllSpawnedHadoopJobs(final String logFilePath, final Logger log, final Props jobProps) { final Set<String> allSpawnedJobs = findApplicationIdFromLog(logFilePath, log); log.info("applicationIds to kill: " + allSpawnedJobs); for (final String appId : allSpawnedJobs) { try { killJobOnCluster(appId, log, jobProps); } catch (final Throwable t) { log.warn("something happened while trying to kill this job: " + appId, t); } } return allSpawnedJobs; }
Pass in a log file, this method will find all the hadoop jobs it has launched, and kills it <p> Only works with Hadoop2 @return a Set<String>. The set will contain the applicationIds that this job tried to kill.
killAllSpawnedHadoopJobs
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopJobUtils.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopJobUtils.java
Apache-2.0
public static Set<String> findApplicationIdFromLog(final String logFilePath, final Logger log) { // At least one job log file must be there. final File logFile = new File(logFilePath); if (!logFile.exists()) { throw new IllegalArgumentException("the logFilePath does not exist: " + logFilePath); } // Get the directory and file name // The log files have names in this format, // _job<job name specific string>.log // A rolled over log file name appends to above format and adds an index in this format, // _job<job name specific string>.log.<index> // The index starts at 1 upto a configurable number. final int lastSlash = logFilePath.lastIndexOf('/'); final String dirPath = logFilePath.substring(0, lastSlash); final String logFileName = logFilePath.substring(lastSlash + 1); // Fetch all the log files for this job which start with // _job<job name specific string>.log final File[] logFiles = new File(dirPath).listFiles((dir, name) -> name.startsWith(logFileName)); BufferedReader br = null; final Set<String> applicationIds = new HashSet<>(); // There can be more than one log file. Go through each one of them. for (final File curLogFile : logFiles) { // Start with sanity checks if (!curLogFile.exists()) { throw new IllegalArgumentException("the logFilePath does not exist: " + curLogFile.getAbsolutePath()); } if (!curLogFile.isFile()) { throw new IllegalArgumentException("the logFilePath specified is not a valid file: " + curLogFile.getAbsolutePath()); } if (!curLogFile.canRead()) { throw new IllegalArgumentException( "unable to read the logFilePath specified: " + curLogFile.getAbsolutePath()); } try { br = new BufferedReader(new InputStreamReader( new FileInputStream(curLogFile), StandardCharsets.UTF_8)); String line; // finds all the application IDs while ((line = br.readLine()) != null) { final String[] inputs = line.split("\\s"); for (final String input : inputs) { final Matcher m = APPLICATION_ID_PATTERN.matcher(input); if (m.find()) { final String appId = m.group(1); applicationIds.add(appId); } } // end for loop } } catch (final IOException e) { log.error("Error while trying to find applicationId from " + curLogFile.getAbsolutePath() + ". Some MR jobs may leak.", e); } finally { try { if (br != null) { br.close(); } } catch (final IOException e) { // do nothing } } } return applicationIds; }
<pre> Takes in a log file, will grep every line to look for the application_id pattern. If it finds multiple, it will return all of them, de-duped (this is possible in the case of pig jobs) This can be used in conjunction with the @killJobOnCluster method in this file. </pre> @return a Set. May be empty, but will never be null
findApplicationIdFromLog
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopJobUtils.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopJobUtils.java
Apache-2.0
public static void killJobOnCluster(final String applicationId, final Logger log, final Props jobProps) throws YarnException, IOException { final YarnConfiguration yarnConf = new YarnConfiguration(); final YarnClient yarnClient = YarnClient.createYarnClient(); if (jobProps.containsKey(YARN_CONF_DIRECTORY_PROPERTY)) { yarnConf.addResource( new Path(jobProps.get(YARN_CONF_DIRECTORY_PROPERTY) + "/" + YARN_CONF_FILENAME)); } yarnClient.init(yarnConf); yarnClient.start(); final String[] split = applicationId.split("_"); final ApplicationId aid = ApplicationId.newInstance(Long.parseLong(split[1]), Integer.parseInt(split[2])); log.info("start klling application: " + aid); yarnClient.killApplication(aid); log.info("successfully killed application: " + aid); }
<pre> Uses YarnClient to kill the job on HDFS. Using JobClient only works partially: If yarn container has started but spark job haven't, it will kill If spark job has started, the cancel will hang until the spark job is complete If the spark job is complete, it will return immediately, with a job not found on job tracker </pre>
killJobOnCluster
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopJobUtils.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopJobUtils.java
Apache-2.0
public static List<String> filterCommands(final Collection<String> commands, final String whitelistRegex, final String blacklistRegex, final Logger log) { final List<String> filteredCommands = new LinkedList<>(); final Pattern whitelistPattern = Pattern.compile(whitelistRegex); final Pattern blacklistPattern = Pattern.compile(blacklistRegex); for (final String command : commands) { if (whitelistPattern.matcher(command).matches() && !blacklistPattern.matcher(command).matches()) { filteredCommands.add(command); } else { log.warn(String.format("Removing restricted command: %s", command)); } } return filteredCommands; }
Filter a collection of String commands to match a whitelist regex and not match a blacklist regex. @param commands Collection of commands to be filtered @param whitelistRegex whitelist regex to work as inclusion criteria @param blacklistRegex blacklist regex to work as exclusion criteria @param log logger to report violation @return filtered list of matching. Empty list if no command match all the criteria.
filterCommands
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopJobUtils.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopJobUtils.java
Apache-2.0
private void copyAndModifyScript(File source, File dest, Map<Pattern, String> rampRegisterItems) throws IOException { BufferedReader bufferedReader = null; PrintWriter printWriter = null; bufferedReader = Files.newBufferedReader(source.toPath(), Charset.defaultCharset()); printWriter = new PrintWriter(Files.newBufferedWriter(dest.toPath(), Charset.defaultCharset())); String line; while ((line = bufferedReader.readLine()) != null) { printWriter.println(replaceRegisterStatements(line, rampRegisterItems)); } bufferedReader.close(); printWriter.close(); }
Copy Pig Script from source to destination and update REGISTER statements based on the map of ramp items
copyAndModifyScript
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopPigJob.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopPigJob.java
Apache-2.0
@Override public void cancel() throws InterruptedException { super.cancel(); info("Cancel called. Killing the Pig launched MR jobs on the cluster"); getHadoopProxy().killAllSpawnedHadoopJobs(getJobProps(), getLog()); }
This cancel method, in addition to the default canceling behavior, also kills the MR jobs launched by Pig on Hadoop
cancel
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopPigJob.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopPigJob.java
Apache-2.0
private void init(Props sysProps, Props jobProps, final Logger logger) { shouldProxy = sysProps.getBoolean(HadoopSecurityManager.ENABLE_PROXYING, false); jobProps.put(HadoopSecurityManager.ENABLE_PROXYING, Boolean.toString(shouldProxy)); if (!jobProps.containsKey(YARN_KILL_VERSION)) { String yarnKillVersion = sysProps.getString(YARN_KILL_VERSION, YARN_KILL_LEGACY); logger.info("sysProps.YARN_KILL_VERSION is:" + yarnKillVersion); jobProps.put(YARN_KILL_VERSION, yarnKillVersion); } logger.info("jobProps.YARN_KILL_VERSION is set to:" + jobProps.getString(YARN_KILL_VERSION)); obtainTokens = sysProps.getBoolean(HadoopSecurityManager.OBTAIN_BINARY_TOKEN, false); if (shouldProxy) { logger.info("Initiating hadoop security manager."); try { // hadoop.security.manager.class can be configured for each Hadoop cluster and it is exposed to // Hadoop jobs as a job property (See JobTypeManager.getClusterSpecificJobProps()).If no cluster // is configured, we can not find it from job properties. Instead, try to load it from sysProps. // This preserves the existing behavior when no clusters (& routing) are configured. final String hadoopSecurityClassName = jobProps.get(HadoopJobUtils.HADOOP_SECURITY_MANAGER_CLASS_PARAM) != null ? jobProps.get(HadoopJobUtils.HADOOP_SECURITY_MANAGER_CLASS_PARAM) : sysProps.getString(HadoopJobUtils.HADOOP_SECURITY_MANAGER_CLASS_PARAM); final Class<?> hadoopSecurityManagerClass = HadoopProxy.class.getClassLoader().loadClass(hadoopSecurityClassName); logger.info("Loading hadoop security manager " + hadoopSecurityManagerClass.getName()); hadoopSecurityManager = (HadoopSecurityManager) Utils.callConstructor(hadoopSecurityManagerClass, sysProps); } catch (Exception e) { logger.error("Could not instantiate Hadoop Security Manager ", e); throw new RuntimeException("Failed to get hadoop security manager!" + e.getCause(), e); } } }
Initialize the Hadoop Proxy Object @param sysProps system properties @param jobProps job properties @param logger logger handler
init
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopProxy.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopProxy.java
Apache-2.0
public void setupPropsForProxy(Props props, Props jobProps, final Logger logger) throws Exception { if (isProxyEnabled()) { userToProxy = jobProps.getString(HadoopSecurityManager.USER_TO_PROXY); logger.info("Need to proxy. Getting tokens."); // get tokens in to a file, and put the location in props tokenFile = HadoopJobUtils.getHadoopTokens(hadoopSecurityManager, props, logger); jobProps.put("env." + HADOOP_TOKEN_FILE_LOCATION, tokenFile.getAbsolutePath()); } }
Setup Job Properties when the proxy is enabled @param props all properties @param jobProps job properties @param logger logger handler
setupPropsForProxy
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopProxy.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopProxy.java
Apache-2.0
public String getJVMArgument(Props sysProps, Props jobProps, final Logger logger) { String secure = ""; if (shouldProxy) { logger.info("Setting up secure proxy info for child process"); secure = " -D" + HadoopSecurityManager.USER_TO_PROXY + "=" + jobProps.getString(HadoopSecurityManager.USER_TO_PROXY); String extraToken = sysProps.getString(HadoopSecurityManager.OBTAIN_BINARY_TOKEN, "false"); if (extraToken != null) { secure += " -D" + HadoopSecurityManager.OBTAIN_BINARY_TOKEN + "=" + extraToken; } logger.info("Secure settings = " + secure); } else { logger.info("Not setting up secure proxy info for child process"); } return secure; }
Generate JVM Proxy Secure Argument @param sysProps system properties @param jobProps job properties @param logger logger handler @return proxy secure JVM argument string
getJVMArgument
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopProxy.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopProxy.java
Apache-2.0
public void cancelHadoopTokens(final Logger logger) { if (tokenFile == null) { return; } try { hadoopSecurityManager.cancelTokens(tokenFile, userToProxy, logger); } catch (HadoopSecurityManagerException e) { logger.error(e.getCause() + e.getMessage()); } catch (Exception e) { logger.error(e.getCause() + e.getMessage()); } }
Cancel Hadoop Tokens @param logger logger handler
cancelHadoopTokens
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopProxy.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopProxy.java
Apache-2.0
public void killAllSpawnedHadoopJobs(Props jobProps, final Logger logger) { if (tokenFile == null) { return; // do null check for tokenFile } HadoopJobUtils.proxyUserKillAllSpawnedHadoopJobs(this.hadoopSecurityManager, jobProps, tokenFile, logger); }
Kill all Spawned Hadoop Jobs @param jobProps job properties @param logger logger handler
killAllSpawnedHadoopJobs
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopProxy.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopProxy.java
Apache-2.0
private static String stripSingleDoubleQuote(String input) { if (StringUtils.isEmpty(input)) { return input; } if (input.startsWith(SINGLE_QUOTE_STRING) || input.startsWith(DOUBLE_QUOTE_STRING)) { input = input.substring(1); } if (input.endsWith(SINGLE_QUOTE_STRING) || input.endsWith(DOUBLE_QUOTE_STRING)) { input = input.substring(0, input.length() - 1); } return input; }
Strip single quote or double quote at either end of the string @return string with w/o leading or trailing single or double quote
stripSingleDoubleQuote
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSecureHiveWrapper.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSecureHiveWrapper.java
Apache-2.0
protected static String[] handleQueueEnforcement(String[] argArray) { SparkConf sparkConf = getSparkProperties(); Configuration conf = new Configuration(); int queueParameterIndex = getUserSpecifiedQueueParameterIndex(argArray); boolean requiredSparkDefaultQueue = false; if (sparkConf.getBoolean(SPARK_CONF_DYNAMIC_ALLOC_ENABLED, false)) { if (isLargeContainerRequired(argArray, conf, sparkConf)) { // Case A requiredSparkDefaultQueue = true; logger.info( "Spark application requires Large containers. Scheduling this application into default queue by a " + "default conf(spark.yarn.queue) in spark-defaults.conf."); } else { // Case B logger.info( "Dynamic allocation is enabled for selected spark version and application requires small container. " + "Hence, scheduling this application into Org specific queue"); if (queueParameterIndex == -1) { LinkedList<String> argList = new LinkedList(Arrays.asList(argArray)); argList.addFirst(SPARK_CONF_QUEUE + "=" + DEFAULT_QUEUE); argList.addFirst(SparkJobArg.SPARK_CONF_PREFIX.sparkParamName); argArray = argList.toArray(new String[argList.size()]); } } } else { // Case C logger.info( "Spark version, selected for this application, doesn't support dynamic allocation. Scheduling this " + "application into default queue by a default conf(spark.yarn.queue) in spark-defaults.conf."); requiredSparkDefaultQueue = true; } if (queueParameterIndex != -1 && requiredSparkDefaultQueue) { logger.info("Azbakan enforces spark.yarn.queue queue. Ignore user param: " + argArray[queueParameterIndex] + " " + argArray[queueParameterIndex + 1]); argArray[queueParameterIndex] = null; argArray[queueParameterIndex + 1] = null; } return argArray; }
This method is used to enforce queue for Spark application. Rules are explained below. a) If dynamic resource allocation is enabled for selected spark version and application requires large container then schedule it into default queue by a default conf(spark.yarn.queue) in spark-defaults.conf. b) If dynamic resource allocation is enabled for selected spark version and application requires small container then schedule it into Org specific queue. c) If dynamic resource allocation is disabled for selected spark version then schedule application into default queue by a default conf(spark.yarn.queue) in spark-defaults.conf.
handleQueueEnforcement
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSecureSparkWrapper.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSecureSparkWrapper.java
Apache-2.0
private static void ignoreUserSpecifiedNodeLabelParameter(String[] argArray, boolean autoNodeLabeling) { for (int i = 0; i < argArray.length; i++) { if (argArray[i] == null) { continue; } if (autoNodeLabeling) { // This config will be automatically set by the job type based on the mem-to-vcore resource ratio requested by // the user application. if (argArray[i].equals(SparkJobArg.SPARK_CONF_PREFIX.sparkParamName) && argArray[i + 1] .startsWith(SPARK_EXECUTOR_NODE_LABEL_EXP)) { logger.info( "Azbakan auto-sets node label expression. Ignore user param: " + argArray[i] + " " + argArray[i + 1]); argArray[i] = null; argArray[++i] = null; continue; } } } }
This method is used to ignore user specified node label Parameter. When auto node labeling is enabled, job type should ignore user supplied node label expression for Spark executors.
ignoreUserSpecifiedNodeLabelParameter
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSecureSparkWrapper.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSecureSparkWrapper.java
Apache-2.0
private static Map<String, String> getUserSpecifiedExecutorParameters(String[] argArray) { Map<String, String> executorParameters = Maps.newHashMap(); for (int i = 0; i < argArray.length; i++) { if (argArray[i] == null) { continue; } if (argArray[i].equals(SparkJobArg.EXECUTOR_CORES.sparkParamName)) { executorParameters.put(SPARK_EXECUTOR_CORES, argArray[++i]); } if (argArray[i].equals(SparkJobArg.EXECUTOR_MEMORY.sparkParamName)) { executorParameters.put(SPARK_EXECUTOR_MEMORY, argArray[++i]); } if (argArray[i].equals(SparkJobArg.SPARK_CONF_PREFIX.sparkParamName) && argArray[i + 1] .startsWith(SPARK_EXECUTOR_MEMORY_OVERHEAD)) { executorParameters .put(SPARK_EXECUTOR_MEMORY_OVERHEAD, argArray[i + 1].split("=")[1].trim()); } } return executorParameters; }
This method is used to get User specified executor parameters. It is capturing executor-memory, executor-cores and spark.yarn.executor.memoryOverhead.
getUserSpecifiedExecutorParameters
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSecureSparkWrapper.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSecureSparkWrapper.java
Apache-2.0
private static int getUserSpecifiedQueueParameterIndex(String[] argArray) { int queueParameterIndex = -1; for (int i = 0; i < argArray.length; i++) { if (argArray[i] == null) { continue; } // Fetch index of queue parameter passed by User. // (--queue test or --conf spark.yarn.queue=test) if ((argArray[i].equals(SparkJobArg.SPARK_CONF_PREFIX.sparkParamName) && argArray[i + 1] .startsWith(SPARK_CONF_QUEUE)) || (argArray[i] .equals(SparkJobArg.QUEUE.sparkParamName))) { queueParameterIndex = i++; break; } } return queueParameterIndex; }
This method is used to retrieve index of queue parameter passed by User.
getUserSpecifiedQueueParameterIndex
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSecureSparkWrapper.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSecureSparkWrapper.java
Apache-2.0
private static SparkConf getSparkProperties() { String sparkPropertyFile = HadoopSecureSparkWrapper.class.getClassLoader() .getResource("spark-defaults.conf").getPath(); SparkConf sparkConf = new SparkConf(false); sparkConf.setAll(Utils.getPropertiesFromFile(sparkPropertyFile)); return sparkConf; }
This method is used to get Spark properties which will fetch properties from spark-defaults.conf file.
getSparkProperties
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSecureSparkWrapper.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSecureSparkWrapper.java
Apache-2.0
private static UserGroupInformation createSecurityEnabledProxyUser( HadoopSecurityManager hadoopSecurityManager, String userToProxy, String fileLocation, Logger log ) throws IOException, HadoopSecurityManagerException { log.info("createSecurityEnabledProxyUser starts"); if (!new File(fileLocation).exists()) { throw new RuntimeException("hadoop token file doesn't exist."); } log.info("Found token file. Setting " + HadoopSecurityManager.MAPREDUCE_JOB_CREDENTIALS_BINARY + " to " + fileLocation); System.setProperty(HadoopSecurityManager.MAPREDUCE_JOB_CREDENTIALS_BINARY, fileLocation); // log the tokens of getLoginUser() and monitor the copying UserGroupInformation loginUser = UserGroupInformation.getLoginUser(); log.info("Current logged in user is " + loginUser); UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(userToProxy, loginUser); log.info(String.format("Copy from loginUser [%s] to proxyUser [%s]", loginUser, proxyUser)); for (Token<?> token : loginUser.getTokens()) { proxyUser.addToken(token); log.info(String.format("Token = %s, %s, %s ", token.getKind(), token.getService(), Arrays.toString(token.getIdentifier()))); } proxyUser.addCredentials(loginUser.getCredentials()); // read tokens from the file and put into proxyUser if (hadoopSecurityManager != null) { Credentials creds = hadoopSecurityManager.getTokens(new File(fileLocation), log); log.info(String.format("Loading tokens from file [%s] to proxyUser [%s]", fileLocation, proxyUser)); for (Token<?> token : creds.getAllTokens()) { proxyUser.addToken(token); log.info(String.format("Token = %s, %s, %s ", token.getKind(), token.getService(), Arrays.toString(token.getIdentifier()))); } proxyUser.addCredentials(creds); } log.info("token copy finished for " + loginUser.getUserName()); return proxyUser; }
Perform all the magic required to get the proxyUser in a securitized grid @return a UserGroupInformation object for the specified userToProxy, which will also contain the logged in user's tokens
createSecurityEnabledProxyUser
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSecureWrapperUtils.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSecureWrapperUtils.java
Apache-2.0
public static UserGroupInformation setupProxyUser(Properties jobProps, String tokenFile, Logger log) { return setupProxyUserWithHSM(null, jobProps, tokenFile, log); }
Sets up the UserGroupInformation proxyUser object without tokens from token file loaded by hadoopSecurityManager
setupProxyUser
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSecureWrapperUtils.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSecureWrapperUtils.java
Apache-2.0
public static UserGroupInformation setupProxyUserWithHSM( HadoopSecurityManager hadoopSecurityManager, Properties jobProps, String tokenFile, Logger log) { UserGroupInformation proxyUser = null; if (!HadoopSecureWrapperUtils.shouldProxy(jobProps)) { log.info("submitting job as original submitter, not proxying"); return proxyUser; } // set up hadoop related configurations final Configuration conf = new Configuration(); UserGroupInformation.setConfiguration(conf); boolean securityEnabled = UserGroupInformation.isSecurityEnabled(); // setting up proxy user if required try { String userToProxy = null; userToProxy = jobProps.getProperty(HadoopSecurityManager.USER_TO_PROXY); if (securityEnabled) { log.info("security enabled, proxying as user " + userToProxy); proxyUser = HadoopSecureWrapperUtils.createSecurityEnabledProxyUser( hadoopSecurityManager, userToProxy, tokenFile, log); } else { log.info("security not enabled, proxying as user " + userToProxy); proxyUser = UserGroupInformation.createRemoteUser(userToProxy); if (jobProps.getProperty(Constants.JobProperties.ENABLE_OAUTH, "false").equals("true")) { proxyUser.addCredentials(UserGroupInformation.getLoginUser().getCredentials()); } } } catch (IOException | HadoopSecurityManagerException e) { log.error("HadoopSecureWrapperUtils.setupProxyUser threw an IOException", e); } return proxyUser; }
Sets up the UserGroupInformation proxyUser object with tokens from loginUser and from the stored token file so that calling code can do doAs(), returns null if the jobProps does not call for a proxyUser @param hadoopSecurityManager Nullable, the security manager that provides token file and load tokens @param jobProps job properties @param tokenFile pass tokenFile if known. Pass null if the tokenFile is in the environmental variable already. @return returns null if no need to run as proxyUser, otherwise returns valid proxyUser that can doAs
setupProxyUserWithHSM
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSecureWrapperUtils.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSecureWrapperUtils.java
Apache-2.0
@SuppressWarnings("DefaultCharset") public static Properties loadAzkabanProps() throws IOException { String propsFile = System.getenv(ProcessJob.JOB_PROP_ENV); Properties props = new Properties(); props.load(new BufferedReader(new FileReader(propsFile))); return props; }
Loading the properties file, which is a combination of the jobProps file and sysProps file @return a Property file, which is the combination of the jobProps file and sysProps file
loadAzkabanProps
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSecureWrapperUtils.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSecureWrapperUtils.java
Apache-2.0
public static boolean shouldProxy(Properties props) { String shouldProxy = props.getProperty(HadoopSecurityManager.ENABLE_PROXYING); return shouldProxy != null && shouldProxy.equals("true"); }
Looks for particular properties inside the Properties object passed in, and determines whether proxying should happen or not @return a boolean value of whether the job should proxy or not
shouldProxy
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSecureWrapperUtils.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSecureWrapperUtils.java
Apache-2.0
private String[] getSparkLibConf() { String sparkHome = null; String sparkConf = null; // If user has specified version in job property. e.g. spark-version=1.6.0 final String jobSparkVer = getJobProps().get(SparkJobArg.SPARK_VERSION.azPropName); if (jobSparkVer != null) { info("This job sets spark version: " + jobSparkVer); // Spark jobtype supports this version through plugin's jobtype config sparkHome = getSparkHome(jobSparkVer); sparkConf = getSysProps().get("spark." + jobSparkVer + ".conf"); if (sparkConf == null) { sparkConf = sparkHome + "/conf"; } info("Using job specific spark: " + sparkHome + " and conf: " + sparkConf); // Override the SPARK_HOME SPARK_CONF_DIR env for HadoopSecureSparkWrapper process(spark client) getJobProps().put("env." + SPARK_HOME_ENV_VAR, sparkHome); getJobProps().put("env." + SPARK_CONF_DIR_ENV_VAR, sparkConf); } else { // User job doesn't give spark-version // Use default spark.home. Configured in the jobtype plugin's config sparkHome = getSysProps().get("spark.home"); if (sparkHome == null) { // Use system default SPARK_HOME env sparkHome = System.getenv(SPARK_HOME_ENV_VAR); } sparkConf = (System.getenv(SPARK_CONF_DIR_ENV_VAR) != null) ? System.getenv(SPARK_CONF_DIR_ENV_VAR) : (sparkHome + "/conf"); info("Using system default spark: " + sparkHome + " and conf: " + sparkConf); } if (sparkHome == null) { throw new RuntimeException("SPARK is not available on the azkaban machine."); } else { final File homeDir = new File(sparkHome); if (!homeDir.exists()) { throw new RuntimeException("SPARK home dir does not exist."); } final File confDir = new File(sparkConf); if (!confDir.exists()) { error("SPARK conf dir does not exist. Will use SPARK_HOME/conf as default."); sparkConf = sparkHome + "/conf"; } final File defaultSparkConf = new File(sparkConf + "/spark-defaults.conf"); if (!defaultSparkConf.exists()) { throw new RuntimeException("Default Spark config file spark-defaults.conf cannot" + " be found at " + defaultSparkConf); } } return new String[]{getSparkLibDir(sparkHome), sparkConf}; }
This method is used to retrieve Spark home and conf locations. Below logic is mentioned in detail. a) If user has specified spark version in job property, e.g. spark-version=1.6.0, then i) If spark.{sparkVersion}.home is set in commonprivate.properties/private.properties, then that will be returned. ii) If spark.{sparkVersion}.home is not set and spark.home.dir is set then it will retrieve Spark directory inside spark.home.dir, matching spark.home.prefix + sparkVersion pattern. b) If user has not specified spark version in job property, use default spark.home configured in the jobtype plugin's config c) If spark home is not found by both of the above cases, then throw RuntimeException.
getSparkLibConf
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSparkJob.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSparkJob.java
Apache-2.0
private String getSparkHome(final String sparkVersion) { String sparkHome = getSysProps().get("spark." + sparkVersion + ".home"); if (sparkHome == null) { info("Couldn't find spark." + sparkVersion + ".home property."); final String sparkDir = getSysProps().get(SPARK_BASE_DIR); final String sparkHomePrefix = getSysProps().get(SPARK_HOME_PREFIX) != null ? getSysProps().get(SPARK_HOME_PREFIX) : "*"; final String replaceTo = getSysProps().get(SPARK_VERSION_REGEX_TO_REPLACE); final String replaceWith = getSysProps().get(SPARK_VERSION_REGEX_TO_REPLACE_WITH) != null ? getSysProps() .get(SPARK_VERSION_REGEX_TO_REPLACE_WITH) : ""; final String versionPatterToMatch = sparkHomePrefix + (replaceTo != null ? sparkVersion .replace(replaceTo, replaceWith) : sparkVersion) + "*"; info("Looking for spark at " + sparkDir + " directory with " + sparkHomePrefix + " prefix for " + sparkVersion + " version."); final DirectoryScanner scanner = new DirectoryScanner(); scanner.setBasedir(sparkDir); scanner.setIncludes(new String[]{versionPatterToMatch}); scanner.scan(); final String[] directories = scanner.getIncludedDirectories(); if (directories != null && directories.length > 0) { sparkHome = sparkDir + "/" + directories[directories.length - 1]; } else { final String sparkReferenceDoc = getSysProps().get(SPARK_REFERENCE_DOCUMENT); final String exceptionMessage = sparkReferenceDoc == null ? "SPARK version specified by User is not available." : "SPARK version specified by User is not available. Available versions are mentioned at: " + sparkReferenceDoc; throw new RuntimeException(exceptionMessage); } } return sparkHome; }
This method is used to get spark home from plugin's jobtype config. If spark.{sparkVersion}.home is set in commonprivate.properties/private.properties, then that will be returned. If spark.{sparkVersion}.home is not set and spark.base.dir is set then it will retrieve Spark directory inside spark.base.dir, matching spark.home.prefix + sparkVersion pattern. Regex pattern can be passed as properties for version formatting. @param sparkVersion the version of spark @return return the spark home
getSparkHome
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSparkJob.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSparkJob.java
Apache-2.0
private String getSparkLibDir(final String sparkHome) { // sparkHome should have already been checked when this method is invoked final File homeDir = new File(sparkHome); File libDir = new File(homeDir, "lib"); if (libDir.exists()) { return libDir.getAbsolutePath(); } else { libDir = new File(homeDir, "jars"); if (libDir.exists()) { return libDir.getAbsolutePath(); } else { throw new RuntimeException("SPARK lib dir does not exist."); } } }
Given the dir path of Spark Home, return the dir path of Spark lib. It is either sparkHome/lib or sparkHome/jars based on the version of Spark chosen by user. @param sparkHome dir path of Spark Home @return dir path of Spark lib
getSparkLibDir
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSparkJob.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSparkJob.java
Apache-2.0
@Override public void cancel() throws InterruptedException { super.cancel(); info("Cancel called. Killing the launched Spark jobs on the cluster"); getHadoopProxy().killAllSpawnedHadoopJobs(getJobProps(), getLog()); }
This cancel method, in addition to the default canceling behavior, also kills the Spark job on Hadoop
cancel
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSparkJob.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/HadoopSparkJob.java
Apache-2.0
private static IPropertiesValidator getValidator(GobblinPresets preset) { Objects.requireNonNull(preset); switch (preset) { case MYSQL_TO_HDFS: return new MySqlToHdfsValidator(); case HDFS_TO_MYSQL: return new HdfsToMySqlValidator(); default: throw new UnsupportedOperationException("Preset " + preset + " is not supported"); } }
Factory method that provides IPropertiesValidator based on preset in runtime. Using factory method pattern as it is expected to grow. @return IPropertiesValidator
getValidator
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/connectors/gobblin/GobblinHadoopJob.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/connectors/gobblin/GobblinHadoopJob.java
Apache-2.0
private void initializePresets() { if (gobblinPresets == null) { synchronized (GobblinHadoopJob.class) { if (gobblinPresets == null) { gobblinPresets = Maps.newHashMap(); String gobblinPresetDirName = getSysProps() .getString(GobblinConstants.GOBBLIN_PRESET_DIR_KEY); File gobblinPresetDir = new File(gobblinPresetDirName); File[] presetFiles = gobblinPresetDir.listFiles(); if (presetFiles == null) { return; } File commonPropertiesFile = new File(gobblinPresetDir, GOBBLIN_PRESET_COMMON_PROPERTIES_FILE_NAME); if (!commonPropertiesFile.exists()) { throw new IllegalStateException("Gobbline preset common properties file is missing " + commonPropertiesFile.getAbsolutePath()); } for (File f : presetFiles) { if (GOBBLIN_PRESET_COMMON_PROPERTIES_FILE_NAME .equals(f.getName())) { //Don't load common one itself. continue; } if (f.isFile()) { Properties prop = new Properties(); try (InputStream commonIs = new BufferedInputStream( new FileInputStream(commonPropertiesFile)); InputStream presetIs = new BufferedInputStream(new FileInputStream(f))) { prop.load(commonIs); prop.load(presetIs); String presetName = f.getName().substring(0, f.getName().lastIndexOf('.')); //remove extension from the file name gobblinPresets.put(GobblinPresets.fromName(presetName), prop); } catch (IOException e) { throw new RuntimeException(e); } } } } } } }
Initializes presets and cache it into preset map. As presets do not change while server is up, this initialization happens only once per JVM.
initializePresets
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/connectors/gobblin/GobblinHadoopJob.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/connectors/gobblin/GobblinHadoopJob.java
Apache-2.0
private void loadPreset() { String presetName = getJobProps().get(GobblinConstants.GOBBLIN_PRESET_KEY); if (presetName == null) { return; } GobblinPresets preset = GobblinPresets.fromName(presetName); Properties presetProperties = gobblinPresets.get(preset); if (presetProperties == null) { throw new IllegalArgumentException( "Preset " + presetName + " is not supported. Supported presets: " + gobblinPresets.keySet()); } getLog().info("Loading preset " + presetName + " : " + presetProperties); Map<String, String> skipped = Maps.newHashMap(); for (String key : presetProperties.stringPropertyNames()) { if (getJobProps().containsKey(key)) { skipped.put(key, presetProperties.getProperty(key)); continue; } getJobProps().put(key, presetProperties.getProperty(key)); } getLog().info("Loaded preset " + presetName); if (!skipped.isEmpty()) { getLog().info( "Skipped some properties from preset as already exists in job properties. Skipped: " + skipped); } if (getJobProps().getBoolean(GobblinConstants.GOBBLIN_PROPERTIES_HELPER_ENABLED_KEY, true)) { getValidator(preset).validate(getJobProps()); } }
If input parameter has preset value, it will load set of properties into job property for the Gobblin job. Also, if user wants to validates the job properties(enabled by default), it will validate it based on the preset where preset is basically used as a proxy to the use case.
loadPreset
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/connectors/gobblin/GobblinHadoopJob.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/connectors/gobblin/GobblinHadoopJob.java
Apache-2.0
private void transformProperties() { //Gobblin does not accept the SQL query ends with semi-colon String query = getJobProps().getString(GOBBLIN_QUERY_KEY, null); if (query == null) { return; } query = query.trim(); int idx = -1; if ((idx = query.indexOf(';')) >= 0) { if (idx < query.length() - 1) { //Query string has been already trimmed and if index is not end of the query String, //it means there's more than one statement. throw new IllegalArgumentException( GOBBLIN_QUERY_KEY + " should consist of one SELECT statement. " + query); } query = query.substring(0, idx); getJobProps().put(GOBBLIN_QUERY_KEY, query); } }
Transform property to make it work for Gobblin. e.g: Gobblin fails when there's semicolon in SQL query as it just appends " and 1=1;" into the query, making the syntax incorrect and fails. As having semicolon is a correct syntax, instead of expecting user to remove it, Azkaban will remove it for user to make it work with Gobblin.
transformProperties
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/connectors/gobblin/GobblinHadoopJob.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/connectors/gobblin/GobblinHadoopJob.java
Apache-2.0
public static GobblinPresets fromName(String name) { GobblinPresets preset = NAME_TO_PRESET.get(name); if (preset == null) { throw new IllegalArgumentException(name + " is unrecognized. Known presets: " + NAME_TO_PRESET.keySet()); } return preset; }
An enum for GobblinPresets. Gobblin has more than hundred properties and GobblinPresets represents set of default properties. Using GobblinPresets, user can reduce number of input parameters which consequently increase usability.
fromName
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/connectors/gobblin/GobblinPresets.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/connectors/gobblin/GobblinPresets.java
Apache-2.0
public static HiveQueryExecutor getHiveQueryExecutor() { HiveQueryExecutorModule hqem = new HiveQueryExecutorModule(); try { return new RealHiveQueryExecutor(hqem.provideHiveConf(), hqem.provideCliSessionState(), new CliDriver()); } catch (Exception e) { throw new RuntimeException(e); } }
Grab bag of utilities for working with Hive. End users should obtain instances of the provided interfaces from these methods.
getHiveQueryExecutor
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/hiveutils/HiveUtils.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/hiveutils/HiveUtils.java
Apache-2.0
public void run() throws HiveViaAzkabanException { if (p == null) { throw new HiveViaAzkabanException("Properties is null. Can't continue"); } if (!p.containsKey(AZK_HIVE_ACTION)) { throw new HiveViaAzkabanException("Must specify a " + AZK_HIVE_ACTION + " key and value."); } HiveQueryExecutor hqe = HiveUtils.getHiveQueryExecutor(); HiveAction action = null; String hive_action = p.getProperty(AZK_HIVE_ACTION); // TODO: Factory time if (hive_action.equals(EXECUTE_QUERY)) { action = new ExecuteHiveQuery(p, hqe); } else if (hive_action.equals(DropAllPartitionsAddLatest.DROP_AND_ADD)) { action = new DropAllPartitionsAddLatest(p, hqe); } else if (hive_action .equals(UpdateTableLocationToLatest.UPDATE_TABLE_LOCATION_TO_LATEST)) { action = new UpdateTableLocationToLatest(p, hqe); } else { throw new HiveViaAzkabanException("Unknown value (" + hive_action + ") for value " + AZK_HIVE_ACTION); } action.execute(); }
Simple Java driver class to execute a Hive query provided via the Properties file. The query can be specified via: * hive.query = a single-line query that will be fed to Hive * hive.query.nn = a two-digit padded series of lines that will be joined and fed to to Hive as one big query
run
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/hiveutils/azkaban/HiveViaAzkaban.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/hiveutils/azkaban/HiveViaAzkaban.java
Apache-2.0
public static boolean tryDeleteFileOrDirectory(File file) { try { deleteFileOrDirectory(file); return true; } catch (Exception e) { logger.warn("Failed to delete file. file = " + file.getAbsolutePath(), e); return false; } }
Try to delete File or Directory @param file file object @return success delete or not
tryDeleteFileOrDirectory
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/javautils/FileUtils.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/javautils/FileUtils.java
Apache-2.0
public static void validateAllOrNone(Props props, String... keys) { Objects.requireNonNull(keys); boolean allExist = true; boolean someExist = false; for (String key : keys) { Object val = props.get(key); allExist &= val != null; someExist |= val != null; } if (someExist && !allExist) { throw new IllegalArgumentException( "Either all of properties exist or none of them should exist for " + Arrays .toString(keys)); } }
Validates if all of the keys exist of none of them exist @throws IllegalArgumentException only if some of the keys exist
validateAllOrNone
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/javautils/ValidationUtils.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/javautils/ValidationUtils.java
Apache-2.0
public void validateWhitelisted(String id) { if (whitelistSet.contains(id)) { return; } throw new UnsupportedOperationException(id + " is not authorized"); }
Checks if id is in whitelist. @throws UnsupportedOperationException if id is not whitelisted
validateWhitelisted
java
azkaban/azkaban
az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/javautils/Whitelist.java
https://github.com/azkaban/azkaban/blob/master/az-hadoop-jobtype-plugin/src/main/java/azkaban/jobtype/javautils/Whitelist.java
Apache-2.0
protected FSDataInputStream openFile(final FileSystem fs, final Path file, final int bufferSize, final long length) throws IOException { return fs.open(file, bufferSize); }
Override this method to specialize the type of {@link FSDataInputStream} returned.
openFile
java
azkaban/azkaban
az-hdfs-viewer/src/main/java/azkaban/viewer/hdfs/AzkabanSequenceFileReader.java
https://github.com/azkaban/azkaban/blob/master/az-hdfs-viewer/src/main/java/azkaban/viewer/hdfs/AzkabanSequenceFileReader.java
Apache-2.0
public boolean isCompressed() { return this.decompress; }
Returns true if values are compressed.
isCompressed
java
azkaban/azkaban
az-hdfs-viewer/src/main/java/azkaban/viewer/hdfs/AzkabanSequenceFileReader.java
https://github.com/azkaban/azkaban/blob/master/az-hdfs-viewer/src/main/java/azkaban/viewer/hdfs/AzkabanSequenceFileReader.java
Apache-2.0
public boolean isBlockCompressed() { return this.blockCompressed; }
Returns true if records are block-compressed.
isBlockCompressed
java
azkaban/azkaban
az-hdfs-viewer/src/main/java/azkaban/viewer/hdfs/AzkabanSequenceFileReader.java
https://github.com/azkaban/azkaban/blob/master/az-hdfs-viewer/src/main/java/azkaban/viewer/hdfs/AzkabanSequenceFileReader.java
Apache-2.0
Configuration getConf() { return this.conf; }
Returns the configuration used for this file.
getConf
java
azkaban/azkaban
az-hdfs-viewer/src/main/java/azkaban/viewer/hdfs/AzkabanSequenceFileReader.java
https://github.com/azkaban/azkaban/blob/master/az-hdfs-viewer/src/main/java/azkaban/viewer/hdfs/AzkabanSequenceFileReader.java
Apache-2.0
private synchronized void seekToCurrentValue() throws IOException { if (!this.blockCompressed) { if (this.decompress) { this.valInFilter.resetState(); } this.valBuffer.reset(); } else { // Check if this is the first value in the 'block' to be read if (this.lazyDecompress && !this.valuesDecompressed) { // Read the value lengths and values readBuffer(this.valLenBuffer, this.valLenInFilter); readBuffer(this.valBuffer, this.valInFilter); this.noBufferedValues = this.noBufferedRecords; this.valuesDecompressed = true; } // Calculate the no. of bytes to skip // Note: 'current' key has already been read! int skipValBytes = 0; final int currentKey = this.noBufferedKeys + 1; for (int i = this.noBufferedValues; i > currentKey; --i) { skipValBytes += WritableUtils.readVInt(this.valLenIn); --this.noBufferedValues; } // Skip to the 'val' corresponding to 'current' key if (skipValBytes > 0) { if (this.valIn.skipBytes(skipValBytes) != skipValBytes) { throw new IOException("Failed to seek to " + currentKey + "(th) value!"); } } } }
Position valLenIn/valIn to the 'value' corresponding to the 'current' key
seekToCurrentValue
java
azkaban/azkaban
az-hdfs-viewer/src/main/java/azkaban/viewer/hdfs/AzkabanSequenceFileReader.java
https://github.com/azkaban/azkaban/blob/master/az-hdfs-viewer/src/main/java/azkaban/viewer/hdfs/AzkabanSequenceFileReader.java
Apache-2.0
public synchronized boolean next(final Writable key) throws IOException { if (key.getClass() != getKeyClass()) throw new IOException("wrong key class: " + key.getClass().getName() + " is not " + this.keyClass); if (!this.blockCompressed) { this.outBuf.reset(); this.keyLength = next(this.outBuf); if (this.keyLength < 0) return false; this.valBuffer.reset(this.outBuf.getData(), this.outBuf.getLength()); key.readFields(this.valBuffer); this.valBuffer.mark(0); if (this.valBuffer.getPosition() != this.keyLength) { throw new IOException(key + " read " + this.valBuffer.getPosition() + " bytes, should read " + this.keyLength); } } else { // Reset syncSeen this.syncSeen = false; if (this.noBufferedKeys == 0) { try { readBlock(); } catch (final EOFException eof) { return false; } } final int keyLength = WritableUtils.readVInt(this.keyLenIn); // Sanity check if (keyLength < 0) { return false; } // Read another compressed 'key' key.readFields(this.keyIn); --this.noBufferedKeys; } return true; }
Read the next key in the file into <code>key</code>, skipping its value. True if another entry exists, and false at end of file.
next
java
azkaban/azkaban
az-hdfs-viewer/src/main/java/azkaban/viewer/hdfs/AzkabanSequenceFileReader.java
https://github.com/azkaban/azkaban/blob/master/az-hdfs-viewer/src/main/java/azkaban/viewer/hdfs/AzkabanSequenceFileReader.java
Apache-2.0
public synchronized boolean next(final Writable key, final Writable val) throws IOException { if (val.getClass() != getValueClass()) throw new IOException("wrong value class: " + val + " is not " + this.valClass); final boolean more = next(key); if (more) { getCurrentValue(val); } return more; }
Read the next key/value pair in the file into <code>key</code> and <code>val</code>. Returns true if such a pair exists and false when at end of file
next
java
azkaban/azkaban
az-hdfs-viewer/src/main/java/azkaban/viewer/hdfs/AzkabanSequenceFileReader.java
https://github.com/azkaban/azkaban/blob/master/az-hdfs-viewer/src/main/java/azkaban/viewer/hdfs/AzkabanSequenceFileReader.java
Apache-2.0
private synchronized int readRecordLength() throws IOException { if (this.in.getPos() >= this.end) { return -1; } int length = this.in.readInt(); if (this.version > 1 && this.sync != null && length == SYNC_ESCAPE) { // process a // sync entry this.in.readFully(this.syncCheck); // read syncCheck if (!Arrays.equals(this.sync, this.syncCheck)) // check it throw new IOException("File is corrupt!"); this.syncSeen = true; if (this.in.getPos() >= this.end) { return -1; } length = this.in.readInt(); // re-read length } else { this.syncSeen = false; } return length; }
Read and return the next record length, potentially skipping over a sync block. @return the length of the next record or -1 if there is no next record @throws IOException
readRecordLength
java
azkaban/azkaban
az-hdfs-viewer/src/main/java/azkaban/viewer/hdfs/AzkabanSequenceFileReader.java
https://github.com/azkaban/azkaban/blob/master/az-hdfs-viewer/src/main/java/azkaban/viewer/hdfs/AzkabanSequenceFileReader.java
Apache-2.0
public synchronized int nextRaw(final DataOutputBuffer key, final ValueBytes val) throws IOException { if (!this.blockCompressed) { final int length = readRecordLength(); if (length == -1) { return -1; } final int keyLength = this.in.readInt(); final int valLength = length - keyLength; key.write(this.in, keyLength); if (this.decompress) { final CompressedBytes value = (CompressedBytes) val; value.reset(this.in, valLength); } else { final UncompressedBytes value = (UncompressedBytes) val; value.reset(this.in, valLength); } return length; } else { // Reset syncSeen this.syncSeen = false; // Read 'key' if (this.noBufferedKeys == 0) { if (this.in.getPos() >= this.end) return -1; try { readBlock(); } catch (final EOFException eof) { return -1; } } final int keyLength = WritableUtils.readVInt(this.keyLenIn); if (keyLength < 0) { throw new IOException("zero length key found!"); } key.write(this.keyIn, keyLength); --this.noBufferedKeys; // Read raw 'value' seekToCurrentValue(); final int valLength = WritableUtils.readVInt(this.valLenIn); final UncompressedBytes rawValue = (UncompressedBytes) val; rawValue.reset(this.valIn, valLength); --this.noBufferedValues; return (keyLength + valLength); } }
Read 'raw' records. @param key - The buffer into which the key is read @param val - The 'raw' value @return Returns the total record length or -1 for end of file @throws IOException
nextRaw
java
azkaban/azkaban
az-hdfs-viewer/src/main/java/azkaban/viewer/hdfs/AzkabanSequenceFileReader.java
https://github.com/azkaban/azkaban/blob/master/az-hdfs-viewer/src/main/java/azkaban/viewer/hdfs/AzkabanSequenceFileReader.java
Apache-2.0
public int nextRawKey(final DataOutputBuffer key) throws IOException { if (!this.blockCompressed) { this.recordLength = readRecordLength(); if (this.recordLength == -1) { return -1; } this.keyLength = this.in.readInt(); key.write(this.in, this.keyLength); return this.keyLength; } else { // Reset syncSeen this.syncSeen = false; // Read 'key' if (this.noBufferedKeys == 0) { if (this.in.getPos() >= this.end) return -1; try { readBlock(); } catch (final EOFException eof) { return -1; } } final int keyLength = WritableUtils.readVInt(this.keyLenIn); if (keyLength < 0) { throw new IOException("zero length key found!"); } key.write(this.keyIn, keyLength); --this.noBufferedKeys; return keyLength; } }
Read 'raw' keys. @param key - The buffer into which the key is read @return Returns the key length or -1 for end of file @throws IOException
nextRawKey
java
azkaban/azkaban
az-hdfs-viewer/src/main/java/azkaban/viewer/hdfs/AzkabanSequenceFileReader.java
https://github.com/azkaban/azkaban/blob/master/az-hdfs-viewer/src/main/java/azkaban/viewer/hdfs/AzkabanSequenceFileReader.java
Apache-2.0
public synchronized void seek(final long position) throws IOException { this.in.seek(position); if (this.blockCompressed) { // trigger block read this.noBufferedKeys = 0; this.valuesDecompressed = true; } }
Set the current byte position in the input file. <p> The position passed must be a position returned by {@link AzkabanSequenceFileReader.Writer#getLength()} when writing this file. To seek to an arbitrary position, use {@link Reader#sync(long)}.
seek
java
azkaban/azkaban
az-hdfs-viewer/src/main/java/azkaban/viewer/hdfs/AzkabanSequenceFileReader.java
https://github.com/azkaban/azkaban/blob/master/az-hdfs-viewer/src/main/java/azkaban/viewer/hdfs/AzkabanSequenceFileReader.java
Apache-2.0
@Override public String getName() { return VIEWER_NAME; }
This class implements a viewer for ORC files @author gaggarwa
getName
java
azkaban/azkaban
az-hdfs-viewer/src/main/java/azkaban/viewer/hdfs/ORCFileViewer.java
https://github.com/azkaban/azkaban/blob/master/az-hdfs-viewer/src/main/java/azkaban/viewer/hdfs/ORCFileViewer.java
Apache-2.0
public Set<String> getAccessViewers() { final Set<String> viewers = new HashSet<>(); for (final String user : this.accessViewer.trim().split(ACCESS_LIST_SPLIT_REGEX)) { if (!user.isEmpty()) { viewers.add(user); } } return viewers; }
@return A set of users explicitly granted viewer access to the report.
getAccessViewers
java
azkaban/azkaban
az-reportal/src/main/java/azkaban/reportal/util/Reportal.java
https://github.com/azkaban/azkaban/blob/master/az-reportal/src/main/java/azkaban/reportal/util/Reportal.java
Apache-2.0
public static Set<String> parseUniqueEmails(final String emailList, final String splitRegex) { final Set<String> uniqueEmails = new HashSet<>(); if (emailList == null) { return uniqueEmails; } final String[] emails = emailList.trim().split(splitRegex); for (final String email : emails) { if (!email.isEmpty()) { uniqueEmails.add(email); } } return uniqueEmails; }
Given a string containing multiple emails, splits it based on the given regular expression, and returns a set containing the unique, non-empty emails.
parseUniqueEmails
java
azkaban/azkaban
az-reportal/src/main/java/azkaban/reportal/util/ReportalHelper.java
https://github.com/azkaban/azkaban/blob/master/az-reportal/src/main/java/azkaban/reportal/util/ReportalHelper.java
Apache-2.0
public static boolean isValidEmailAddress(final String email) { if (email == null) { return false; } boolean result = true; try { final InternetAddress emailAddr = new InternetAddress(email); emailAddr.validate(); } catch (final AddressException ex) { result = false; } return result; }
Returns true if the given email is valid and false otherwise.
isValidEmailAddress
java
azkaban/azkaban
az-reportal/src/main/java/azkaban/reportal/util/ReportalHelper.java
https://github.com/azkaban/azkaban/blob/master/az-reportal/src/main/java/azkaban/reportal/util/ReportalHelper.java
Apache-2.0
public static String getEmailDomain(final String email) { if (email == null || email.isEmpty()) { return null; } final int atSignIndex = email.indexOf('@'); if (atSignIndex != -1) { return email.substring(atSignIndex + 1); } return null; }
Given an email string, returns the domain part if it exists, and null otherwise.
getEmailDomain
java
azkaban/azkaban
az-reportal/src/main/java/azkaban/reportal/util/ReportalHelper.java
https://github.com/azkaban/azkaban/blob/master/az-reportal/src/main/java/azkaban/reportal/util/ReportalHelper.java
Apache-2.0
public static List<ExecutableNode> sortExecutableNodes(final ExecutableFlow flow) { final List<ExecutableNode> sortedNodes = new ArrayList<>(); if (flow != null) { final List<String> startNodeIds = flow.getStartNodes(); String nextNodeId = startNodeIds.isEmpty() ? null : startNodeIds.get(0); while (nextNodeId != null) { final ExecutableNode node = flow.getExecutableNode(nextNodeId); sortedNodes.add(node); final Set<String> outNodes = node.getOutNodes(); nextNodeId = outNodes.isEmpty() ? null : outNodes.iterator().next(); } } return sortedNodes; }
Returns a list of the executable nodes in the specified flow in execution order. Assumes that the flow is linear.
sortExecutableNodes
java
azkaban/azkaban
az-reportal/src/main/java/azkaban/reportal/util/ReportalUtil.java
https://github.com/azkaban/azkaban/blob/master/az-reportal/src/main/java/azkaban/reportal/util/ReportalUtil.java
Apache-2.0
public static List<Variable> getRunTimeVariables( final Collection<Variable> variables) { final List<Variable> runtimeVariables = ReportalUtil.getVariablesByRegex(variables, Reportal.REPORTAL_CONFIG_PREFIX_NEGATION_REGEX); return runtimeVariables; }
Get runtime variables to be set in unscheduled mode of execution. Returns empty list, if no runtime variable is found
getRunTimeVariables
java
azkaban/azkaban
az-reportal/src/main/java/azkaban/reportal/util/ReportalUtil.java
https://github.com/azkaban/azkaban/blob/master/az-reportal/src/main/java/azkaban/reportal/util/ReportalUtil.java
Apache-2.0
public static List<Variable> getVariablesByRegex( final Collection<Variable> variables, final String regex) { final List<Variable> shortlistedVariables = new ArrayList<>(); if (variables != null && regex != null) { for (final Variable var : variables) { if (var.getTitle().matches(regex)) { shortlistedVariables.add(var); } } } return shortlistedVariables; }
Shortlist variables which match a given regex. Returns empty empty list, if no eligible variable is found
getVariablesByRegex
java
azkaban/azkaban
az-reportal/src/main/java/azkaban/reportal/util/ReportalUtil.java
https://github.com/azkaban/azkaban/blob/master/az-reportal/src/main/java/azkaban/reportal/util/ReportalUtil.java
Apache-2.0
private FileStatus[] getFileStatusList(final String pathString) throws HadoopSecurityManagerException, IOException { ensureHdfs(); final Path path = new Path(pathString); FileStatus pathStatus = null; try { pathStatus = this.hdfs.getFileStatus(path); } catch (final IOException e) { cleanUp(); } if (pathStatus != null && pathStatus.isDir()) { return this.hdfs.listStatus(path); } return new FileStatus[0]; }
Returns an array of the file statuses of the files/directories in the given path if it is a directory and an empty array otherwise.
getFileStatusList
java
azkaban/azkaban
az-reportal/src/main/java/azkaban/reportal/util/StreamProviderHDFS.java
https://github.com/azkaban/azkaban/blob/master/az-reportal/src/main/java/azkaban/reportal/util/StreamProviderHDFS.java
Apache-2.0
@Inject @Provides @Singleton @Named(HADOOP_CONF) public Configuration createHadoopConfiguration(final AzkabanCommonModuleConfig azConfig) { final String hadoopConfDirPath = requireNonNull( azConfig.getProps().getString(HADOOP_CONF_DIR_PATH)); final File hadoopConfDir = new File(requireNonNull(hadoopConfDirPath)); checkArgument(hadoopConfDir.exists() && hadoopConfDir.isDirectory()); final Configuration conf = new Configuration(false); conf.addResource(new org.apache.hadoop.fs.Path(hadoopConfDirPath, "core-site.xml")); conf.addResource(new org.apache.hadoop.fs.Path(hadoopConfDirPath, "hdfs-site.xml")); return conf; }
Place Hadoop dependencies in this module. Since Hadoop is not included in the Azkaban Runtime dependency, we only install this module when Hadoop related injection (e.g., HDFS storage) is needed.
createHadoopConfiguration
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/HadoopModule.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/HadoopModule.java
Apache-2.0
default List<HTMLFormElement> getViewParameters() { return Collections.emptyList(); }
Parameters users should set to enable alerts on SLA misses via Web UI. Currently used to render the SLA definition page.
getViewParameters
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/alert/Alerter.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/alert/Alerter.java
Apache-2.0
public void loadClusters() throws IOException { if (this.clustersDir.exists()) { LOG.info("Loading clusters from directory " + this.clustersDir); for (final File clusterDir : this.clustersDir.listFiles()) { if (clusterDir.isDirectory() && clusterDir.canRead()) { LOG.info("Loading cluster from directory: " + clusterDir); loadCluster(clusterDir, this.clusterRegistry); LOG.info("Loaded " + clusterDir.getName() + " from " + clusterDir); } } } else { LOG.warn(String.format("%s does not exist. No clusters are loaded.", this.clustersDir)); } }
Loads clusters and their information from a directory and adds them to {@link ClusterRegistry}.
loadClusters
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/cluster/ClusterLoader.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/cluster/ClusterLoader.java
Apache-2.0
public static void loadCluster(final File clusterDir, final ClusterRegistry clusterRegistry) throws IOException { final File clusterConfigFile = new File(clusterDir, CLUSTER_CONF_FILE); if (!clusterConfigFile.exists()) { throw new FileNotFoundException(CLUSTER_CONF_FILE + " is missing under " + clusterDir); } final Props clusterConfigProp = new Props(null, clusterConfigFile); final Props resolvedClusterConfigProp = PropsUtils.resolveProps(clusterConfigProp); final String clusterId = clusterDir.getName(); final Cluster clusterInfo = new Cluster(clusterId, resolvedClusterConfigProp); clusterRegistry.addCluster(clusterId, clusterInfo); }
Load a cluster from a directory into the ClusterRegistry.
loadCluster
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/cluster/ClusterLoader.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/cluster/ClusterLoader.java
Apache-2.0
@Override public Cluster getCluster(final String jobId, final Props jobProps, final Logger jobLogger, final Collection<String> componentDependency) { final Cluster cluster = this.clusterRegistry.getCluster(Cluster.DEFAULT_CLUSTER); if (cluster == null) { jobLogger.error( String.format("Could not load cluster: %s for job %s", Cluster.DEFAULT_CLUSTER, jobId)); throw new RuntimeException("The default cluster is not found"); } return cluster; }
An implementation of {@link ClusterRouter} that always routes jobs to the default cluster.
getCluster
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/cluster/DefaultClusterRouter.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/cluster/DefaultClusterRouter.java
Apache-2.0
@Override public Cluster getCluster(final String jobId, final Props jobProps, final Logger jobLogger, final Collection<String> componentDependency) { return Cluster.UNKNOWN; }
An implementation of {@link ClusterRouter} that routes jobs to the UNKNOWN cluster so that the cluster implicitly loaded through Azkaban JVM will be used.
getCluster
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/cluster/DisabledClusterRouter.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/cluster/DisabledClusterRouter.java
Apache-2.0
public static synchronized AzKubernetesV1PodTemplate getInstance(String templatePath) throws IOException { if (null == instance) { instance = new AzKubernetesV1PodTemplate(templatePath); } return instance; }
@param templatePath Path where the template file is located. @return Singleton instance of this class. @throws IOException If unable to read the template file.
getInstance
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/container/models/AzKubernetesV1PodTemplate.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/container/models/AzKubernetesV1PodTemplate.java
Apache-2.0
public V1PodSpec getPodSpecFromTemplate() throws IOException { return ((V1Pod) Yaml.load(this.templatePodString)).getSpec(); }
Always returns a new instance of V1PodSpec generated from the template. @return the {@link V1PodSpec} POD spec generated from the template. @throws IOException If unable to read the template file.
getPodSpecFromTemplate
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/container/models/AzKubernetesV1PodTemplate.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/container/models/AzKubernetesV1PodTemplate.java
Apache-2.0
public V1ObjectMeta getPodMetadataFromTemplate() throws IOException { return ((V1Pod) Yaml.load(this.templatePodString)).getMetadata(); }
Always returns a new instance of {@link V1ObjectMeta} generated from the template to prevent data corruption through setter methods. @return the pod metadata object. @throws IOException If unable to read the template file.
getPodMetadataFromTemplate
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/container/models/AzKubernetesV1PodTemplate.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/container/models/AzKubernetesV1PodTemplate.java
Apache-2.0
public V1Service build() throws IOException { final ImmutableMap<Object, Object> templateValuesMap = this.templateValuesMapBuilder.build(); final StrSubstitutor strSubstitutor = new StrSubstitutor(templateValuesMap); final String serviceYaml = strSubstitutor.replace(this.template); return (V1Service) Yaml.load(serviceYaml); }
@return {@link V1Service} object by replacing the values from the templateValuesMap within the template @throws IOException if the object can't be constructed from the substituted YAML String
build
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/container/models/AzKubernetesV1ServiceBuilder.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/container/models/AzKubernetesV1ServiceBuilder.java
Apache-2.0
public AzKubernetesV1SpecBuilder addEnvVarToFlowContainer(final String envKey, final String envVal) { this.flowContainerBuilder.addNewEnv().withName(envKey).withValue(envVal).endEnv(); return this; }
@param envKey Key for the environment variable to be added to the flow container. @param envVal Value for the environment variable to be added to the flow container.
addEnvVarToFlowContainer
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/container/models/AzKubernetesV1SpecBuilder.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/container/models/AzKubernetesV1SpecBuilder.java
Apache-2.0
public AzKubernetesV1SpecBuilder addFlowContainer(String name, String image, ImagePullPolicy imagePullPolicy, String confVersion) { V1EnvVar azConfVersion = new V1EnvVarBuilder() .withName(AZ_CONF_VERSION_KEY) .withValue(confVersion) .build(); this.flowContainerBuilder .withName(name) .withImage(image) .withImagePullPolicy(imagePullPolicy.getPolicyVal()) .withEnv(this.azClusterName, azConfVersion); LOGGER.info("Created flow container object with name " + name); return this; }
@param name Flow-container/ application-container name @param image Docker image path in the image registry @param imagePullPolicy Docker image pull policy @param confVersion Version for the Azkaban configuration resource This method adds the configured application-container to the Pod spec. This application container is responsible for executing flow.
addFlowContainer
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/container/models/AzKubernetesV1SpecBuilder.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/container/models/AzKubernetesV1SpecBuilder.java
Apache-2.0
public AzKubernetesV1SpecBuilder addHostPathVolume(final String volName, final String hostPath, final String hostPathType, final String volMountPath, final boolean readOnly) { final V1Volume hostPathVolume = new V1VolumeBuilder().withName(volName).withNewHostPath() .withPath(hostPath).withType(hostPathType).endHostPath().build(); this.appVolumes.add(hostPathVolume); final V1VolumeMount hostPathVolMount = new V1VolumeMountBuilder() .withName(volName) .withMountPath(volMountPath) .withReadOnly(readOnly) .build(); this.appVolumeMounts.add(hostPathVolMount); return this; }
This method adds a HostPath volume to the pod-spec and also mounts the volume to the flow container. @param volName Name of the volume. @param hostPath Path for the hostPath volume. @param hostPathType Type for the hostPath volume. @param volMountPath Path to be mounted for the flow container.
addHostPathVolume
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/container/models/AzKubernetesV1SpecBuilder.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/container/models/AzKubernetesV1SpecBuilder.java
Apache-2.0
public AzKubernetesV1SpecBuilder addSecretVolume(final String volName, final String secretName, final String volMountPath) { final V1SecretVolumeSource secretVolumeSource = new V1SecretVolumeSourceBuilder() .withSecretName(secretName) .withDefaultMode(SECRET_VOLUME_DEFAULT_MODE) .build(); final V1Volume secretVolume = new V1VolumeBuilder() .withName(volName) .withSecret(secretVolumeSource).build(); this.appVolumes.add(secretVolume); final V1VolumeMount secretVolumeMount = new V1VolumeMountBuilder() .withMountPath(volMountPath) .withName(volName) .build(); this.appVolumeMounts.add(secretVolumeMount); return this; }
Adds a volume mount populated from a kubernetes secret. @param volName volume name @param secretName secret name @param volMountPath directory where secret will be mounted
addSecretVolume
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/container/models/AzKubernetesV1SpecBuilder.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/container/models/AzKubernetesV1SpecBuilder.java
Apache-2.0
private static void mergeInitContainers(V1PodSpec podSpec, V1PodSpec podSpecFromTemplate) { List<V1Container> podSpecInitContainers = podSpec.getInitContainers(); if (null == podSpecInitContainers) { return; } // Get init containers from podSpecFromTemplate which are not part of pod-spec init containers. final List<V1Container> templateOnlyInitContainers = getInitContainers(podSpecFromTemplate, templateInitContainer -> podSpecInitContainers.stream().map(V1Container::getName) .noneMatch(name -> name.equals(templateInitContainer.getName()))); // Get init containers from podSpecFromTemplate which are also part of pod-spec init containers // i.e. the other containers apart from the above list templateOnlyInitContainers. final List<V1Container> templateAlsoInitContainers = getInitContainers(podSpecFromTemplate, templateInitContainer -> templateOnlyInitContainers.stream().map(V1Container::getName) .noneMatch(name -> name.equals(templateInitContainer.getName()))); // Get init containers from pod-spec which are not part of podSpecFromTemplate init containers. final List<V1Container> podSpecOnlyInitContainers = getInitContainers(podSpec, podSpecInitContainer -> templateAlsoInitContainers.stream().map(V1Container::getName) .noneMatch(name -> name.equals(podSpecInitContainer.getName()))); // Get init containers from pod-spec which are also part of podSpecFromTemplate init containers // i.e. the other containers apart from the above list podSpecOnlyInitContainers. final Map<String, V1Container> podSpecAlsoInitContainers = getInitContainers(podSpec, podSpecInitContainer -> podSpecOnlyInitContainers.stream().map(V1Container::getName) .noneMatch(name -> name.equals(podSpecInitContainer.getName()))).stream() .collect(Collectors.toMap(V1Container::getName, e -> e)); final List<V1Container> allInitContainers = new ArrayList<>(); allInitContainers.addAll(podSpecOnlyInitContainers); allInitContainers.addAll(templateOnlyInitContainers); // Merge templateAlsoInitContainers and podSpecAlsoInitContainers. List<V1Container> mergedInitContainers = getMergedInitContainers(templateAlsoInitContainers, podSpecAlsoInitContainers); allInitContainers.addAll(mergedInitContainers); // This resets the init containers already part of pod-spec. podSpec.setInitContainers(allInitContainers); }
Merge InitContainers from the dynamically generated pod-spec and podSpecFromTemplate, such that: 1) Add all the init containers which are only part of podSpecFromTemplate. 2) Add all the init containers which are only part of pod-spec. 3) Add the init containers which are part of both pod-spec and podSpecFromTemplate by merging them such that: a) Skeleton of templateInitContainer is utilized. b) Environment variables from podSpecInitContainer are added to corresponding templateInitContainer. c) ImagePullPolicy, Image, and VolumeMounts are overridden from podSpecInitContainer to templateInitContainer. @param podSpec Already created podSpec using the {@link AzKubernetesV1SpecBuilder}. @param podSpecFromTemplate PodSpec from {@link AzKubernetesV1PodTemplate}.
mergeInitContainers
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/container/models/PodTemplateMergeUtils.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/container/models/PodTemplateMergeUtils.java
Apache-2.0
private static List<V1Container> getMergedInitContainers( List<V1Container> templateAlsoInitContainers, Map<String, V1Container> podSpecAlsoInitContainers) { final List<V1Container> mergedInitContainers = new ArrayList<>(); for (V1Container templateInitContainer : templateAlsoInitContainers) { V1Container podSpecInitContainer = podSpecAlsoInitContainers.get(templateInitContainer.getName()); mergeTemplateAndPodSpecContainer(templateInitContainer, podSpecInitContainer); // Add the modified templateInitContainer with overrides from corresponding // podSpecInitContainer. mergedInitContainers.add(templateInitContainer); } return mergedInitContainers; }
This method combines the templateAlsoInitContainers with their corresponding podSpecAlsoInitContainers. @param templateAlsoInitContainers List of all initContainers form the pod-spec template which are also part of dynamically generated pod-spec. @param podSpecAlsoInitContainers List of all initContainers from the dynamically generated pod-spec which are also part of pod-spec template. @return List of InitContainers by merging templateAlsoInitContainers and podAlsoInitContainers.
getMergedInitContainers
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/container/models/PodTemplateMergeUtils.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/container/models/PodTemplateMergeUtils.java
Apache-2.0
private static void mergeTemplateAndPodSpecContainer(V1Container templateContainer, V1Container podSpecContainer) { // Add env from podSpecContainer to templateContainer. final List<V1EnvVar> podSpecInitContainerEnv = podSpecContainer.getEnv(); if (null != podSpecInitContainerEnv && !podSpecInitContainerEnv.isEmpty()) { podSpecInitContainerEnv.forEach(templateContainer::addEnvItem); } // Override name from the podSpecContainer to templateContainer. if (null != podSpecContainer.getName()) { templateContainer.setName(podSpecContainer.getName()); } // Override ImagePullPolicy from the corresponding podSpecContainer. if (null != podSpecContainer.getImagePullPolicy()) { templateContainer.setImagePullPolicy(podSpecContainer.getImagePullPolicy()); } // Override Image from the corresponding podSpecContainer. if (null != podSpecContainer.getImage()) { templateContainer.setImage(podSpecContainer.getImage()); } // Override Resources from the corresponding podSpecContainer. if (null != podSpecContainer.getResources()) { templateContainer.setResources(podSpecContainer.getResources()); } // Merge Volume Mounts with podSpecContainerVolumeMounts overriding // templateContainerVolumeMounts. final List<V1VolumeMount> templateContainerVolumeMounts = templateContainer.getVolumeMounts(); final List<V1VolumeMount> podSpecContainerVolumeMounts = podSpecContainer.getVolumeMounts(); final List<V1VolumeMount> mergedContainerVolumeMounts = getMergedContainerVolumeMounts( templateContainerVolumeMounts, podSpecContainerVolumeMounts); templateContainer.setVolumeMounts(mergedContainerVolumeMounts); }
This method combines the templateContainer and podSpecContainer such that: 1) Skeleton of templateContainer is utilized. 2) Environment variables from podSpecContainer are added to corresponding templateContainer. 3) ImagePullPolicy, Image, and VolumeMounts are overridden from podSpecContainer to templateContainer. @param templateContainer Container from the pod-spec template @param podSpecContainer Container from the dynamically generated pod-spec
mergeTemplateAndPodSpecContainer
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/container/models/PodTemplateMergeUtils.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/container/models/PodTemplateMergeUtils.java
Apache-2.0
private static List<V1VolumeMount> getMergedContainerVolumeMounts( final List<V1VolumeMount> templateContainerVolumeMounts, final List<V1VolumeMount> podSpecContainerVolumeMounts) { final List<V1VolumeMount> allContainerVolumeMounts = new ArrayList<>(); if (null != templateContainerVolumeMounts && null != podSpecContainerVolumeMounts) { allContainerVolumeMounts.addAll(podSpecContainerVolumeMounts); templateContainerVolumeMounts .stream() .filter(templateInitContainerVolumeMount -> podSpecContainerVolumeMounts.stream() .map(V1VolumeMount::getName) .noneMatch(name -> name.equals(templateInitContainerVolumeMount.getName())) ).forEach(vm -> allContainerVolumeMounts.add(vm)); } else if (null != templateContainerVolumeMounts) { allContainerVolumeMounts.addAll(templateContainerVolumeMounts); } else if (null != podSpecContainerVolumeMounts) { allContainerVolumeMounts.addAll(podSpecContainerVolumeMounts); } return allContainerVolumeMounts; }
This method merges VolumeMounts such that: 1) All VolumeMounts from the podSpecContainerVolumeMounts are added. 2) Only those VolumeMounts of templateContainerVolumeMounts are added which are not already part of podSpecContainerVolumeMounts. @param templateContainerVolumeMounts List of VolumeMounts from templateContainer. @param podSpecContainerVolumeMounts List of VolumeMounts from podSpecContainer. @return List of VolumeMounts after merging templateInitContainerVolumeMounts and podSpecInitContainerVolumeMounts.
getMergedContainerVolumeMounts
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/container/models/PodTemplateMergeUtils.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/container/models/PodTemplateMergeUtils.java
Apache-2.0