code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
public static Props loadOutputFileProps(final File file) { InputStream reader = null; try { LOGGER.info("output properties file=" + file.getAbsolutePath()); reader = new BufferedInputStream(new FileInputStream(file)); final Props outputProps = new Props(); final String content = Streams.asString(reader).trim(); if (!content.isEmpty()) { final Map<String, Object> propMap = (Map<String, Object>) JSONUtils.parseJSONFromString(content); for (final Map.Entry<String, Object> entry : propMap.entrySet()) { outputProps.put(entry.getKey(), entry.getValue().toString()); } } return outputProps; } catch (final FileNotFoundException e) { LOGGER.info( String.format("File[%s] wasn't found, returning empty props.", file) ); return new Props(); } catch (final Exception e) { LOGGER.error( "Exception thrown when trying to load output file props. Returning empty Props instead of failing. Is this really the best thing to do?", e); return new Props(); } finally { IOUtils.closeQuietly(reader); } }
Load output file into a Props object @param file output properties file @return Props object
loadOutputFileProps
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/FileIOUtils.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/FileIOUtils.java
Apache-2.0
public static File createOutputPropsFile(final String prefix, final String suffix, final String workingDir) { try { final File directory = new File(workingDir); final File tempFile = File.createTempFile(prefix, suffix, directory); return tempFile; } catch (final IOException e) { throw new RuntimeException("Failed to create temp output property file ", e); } }
Create Temp File in a working directory @param prefix file prefix @param suffix file suffix @param workingDir working directory @return File handle
createOutputPropsFile
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/FileIOUtils.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/FileIOUtils.java
Apache-2.0
public static KafkaLog4jAppender getAzkabanFlowKafkaLog4jAppender(final Props props, final Layout layout, final String execId, final String name) { return getAzkabanKafkaLog4jAppender(props, layout, execId, name, null, ConfigurationKeys.AZKABAN_FLOW_LOGGING_KAFKA_TOPIC); }
Get Azkaban Kafka Log4j Appender for given Azkaban flow. @param props Azkaban props @param layout Log4j layout @param execId Azkaban exec id @param name Azkaban flow id @return KafkaLog4jAppender
getAzkabanFlowKafkaLog4jAppender
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/KafkaLog4jUtils.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/KafkaLog4jUtils.java
Apache-2.0
public static KafkaLog4jAppender getAzkabanJobKafkaLog4jAppender(final Props props, final Layout layout, final String execId, final String name, final String jobAttempt) { return getAzkabanKafkaLog4jAppender(props, layout, execId, name, jobAttempt, ConfigurationKeys.AZKABAN_JOB_LOGGING_KAFKA_TOPIC); }
Get Azkaban Kafka Log4j Appender for given Azkaban job. @param props Azkaban props @param layout Log4j layout @param execId Azkaban exec id @param name Azkaban job's nested id @param jobAttempt Azkaban job attempt @return KafkaLog4jAppender
getAzkabanJobKafkaLog4jAppender
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/KafkaLog4jUtils.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/KafkaLog4jUtils.java
Apache-2.0
long getOsTotalFreeMemorySize() { return getAggregatedFreeMemorySize(MEM_KEYS); }
Includes OS cache and free swap. @return the total free memory size of the OS. 0 if there is an error or the OS doesn't support this memory check.
getOsTotalFreeMemorySize
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/OsMemoryUtil.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/OsMemoryUtil.java
Apache-2.0
long getOsTotalFreeMemorySizeFromStrings(final List<String> lines, final Set<String> memKeysToCombine) { long totalFree = 0; int count = 0; for (final String line : lines) { for (final String keyName : memKeysToCombine) { if (line.startsWith(keyName)) { count++; final long size = parseMemoryLine(line); if (size == 0) { return 0; } totalFree += size; } } } final int length = memKeysToCombine.size(); if (count != length) { final String errMsg = String .format("Expect %d keys in the meminfo file. Got %d. content: %s", length, count, lines); logger.error(errMsg); totalFree = 0; } return totalFree; }
@param lines text lines from the procinfo file @return the total size of free memory in kB. 0 if there is an error.
getOsTotalFreeMemorySizeFromStrings
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/OsMemoryUtil.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/OsMemoryUtil.java
Apache-2.0
long parseMemoryLine(final String line) { final int idx1 = line.indexOf(":"); final int idx2 = line.lastIndexOf("kB"); final String sizeString = line.substring(idx1 + 1, idx2 - 1).trim(); try { return Long.parseLong(sizeString); } catch (final NumberFormatException e) { final String err = "Failed to parse the meminfo file. Line: " + line; logger.error(err); return 0; } }
Example file: $ cat /proc/meminfo MemTotal: 65894008 kB MemFree: 59400536 kB Buffers: 409348 kB Cached: 4290236 kB SwapCached: 0 kB Make the method package private to make unit testing easier. Otherwise it can be made private. @param line the text for a memory usage statistics we are interested in @return size of the memory. unit kB. 0 if there is an error.
parseMemoryLine
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/OsMemoryUtil.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/OsMemoryUtil.java
Apache-2.0
@Override public String format(final LoggingEvent event) { if (event.getMessage() instanceof String) { return super.format(appendStackTraceToEvent(event)); } return super.format(event); }
When we use the log4j Kafka appender, it seems that the appender simply does not log the stack trace anywhere Seeing as the stack trace is a very important piece of information, we create our own PatternLayout class that appends the stack trace to the log message that reported it, so that all the information regarding that error can be found one in place.
format
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/PatternLayoutEscaped.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/PatternLayoutEscaped.java
Apache-2.0
private LoggingEvent appendStackTraceToEvent(final LoggingEvent event) { String message = event.getMessage().toString(); // If there is a stack trace available, print it out if (event.getThrowableInformation() != null) { final String[] s = event.getThrowableStrRep(); for (final String line : s) { message += "\n" + line; } } message = message .replace("\\", "\\\\") .replace("\n", "\\n") .replace("\"", "\\\"") .replace("\t", "\\t"); final Throwable throwable = event.getThrowableInformation() == null ? null : event.getThrowableInformation().getThrowable(); return new LoggingEvent(event.getFQNOfLoggerClass(), event.getLogger(), event.getTimeStamp(), event.getLevel(), message, throwable); }
Create a copy of event, but append a stack trace to the message (if it exists). Then it escapes the backslashes, tabs, newlines and quotes in its message as we are sending it as JSON and we don't want any corruption of the JSON object.
appendStackTraceToEvent
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/PatternLayoutEscaped.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/PatternLayoutEscaped.java
Apache-2.0
public static URI buildUri(final String host, final int port, final String path, final boolean isHttp, final Pair<String, String>... params) throws IOException { final URIBuilder builder = new URIBuilder(); builder.setScheme(isHttp ? "http" : "https").setHost(host).setPort(port); if (null != path && path.length() > 0) { builder.setPath(path); } if (params != null) { for (final Pair<String, String> pair : params) { builder.setParameter(pair.getFirst(), pair.getSecond()); } } try { return builder.build(); } catch (final URISyntaxException e) { throw new IOException(e); } }
helper function to build a valid URI. @param host host name. @param port host port. @param path extra path after host. @param isHttp indicates if whether Http or HTTPS should be used. @param params extra query parameters. @return the URI built from the inputs.
buildUri
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/RestfulApiClient.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/RestfulApiClient.java
Apache-2.0
protected static HttpEntityEnclosingRequestBase completeRequest( final HttpEntityEnclosingRequestBase request, final List<Pair<String, String>> params) throws UnsupportedEncodingException { if (request != null) { if (null != params && !params.isEmpty()) { final List<NameValuePair> formParams = params.stream() .map(pair -> new BasicNameValuePair(pair.getFirst(), pair.getSecond())) .collect(Collectors.toList()); final HttpEntity entity = new UrlEncodedFormEntity(formParams, "UTF-8"); request.setEntity(entity); } } return request; }
helper function to fill the request with header entries and posting body .
completeRequest
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/RestfulApiClient.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/RestfulApiClient.java
Apache-2.0
public T httpPost(final URI uri, Optional<Integer> httpTimeout, final List<Pair<String, String>> params) throws IOException { // shortcut if the passed url is invalid. if (null == uri) { logger.error(" unable to perform httpPost as the passed uri is null."); return null; } final HttpPost post = new HttpPost(uri); return this.sendAndReturn(completeRequest(post, params), httpTimeout); }
function to perform a Post http request. @param uri the URI of the request. @param params the form params to be posted, optional. @return the response object type of which is specified by user. @throws UnsupportedEncodingException, IOException
httpPost
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/RestfulApiClient.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/RestfulApiClient.java
Apache-2.0
protected CloseableHttpClient createHttpClient( final Optional<Integer> httpTimeout) { if (httpTimeout.isPresent()) { final int timeout = httpTimeout.get(); final RequestConfig requestConfig = RequestConfig.custom() .setConnectTimeout(timeout) .setSocketTimeout(timeout) .build(); return HttpClientBuilder.create().setDefaultRequestConfig(requestConfig).build(); } else { return HttpClientBuilder.create().build(); } }
For returning a HttpClient that will be used for any http requests within this class. This can be overridden by child classes to customize client, for example, for providing a TLS (https) enabled client. @return an http client instance from default settings.
createHttpClient
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/RestfulApiClient.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/RestfulApiClient.java
Apache-2.0
protected T sendAndReturn(final HttpUriRequest request, final Optional<Integer> httpTimeout) throws IOException { try (final CloseableHttpClient client = this.createHttpClient(httpTimeout)) { return this.parseResponse(client.execute(request)); } }
function to dispatch the request and pass back the response.
sendAndReturn
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/RestfulApiClient.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/RestfulApiClient.java
Apache-2.0
public static void configureJobCallback(@Nonnull final Logger logger, @Nonnull final Props props) { requireNonNull(logger, "Logger must not be null"); requireNonNull(props, "Properties can't be null"); final boolean jobCallbackEnabled = props.getBoolean(Constants.ConfigurationKeys.AZKABAN_EXECUTOR_JOBCALLBACK_ENABLED, true); logger.info("Job callback enabled? " + jobCallbackEnabled); if (jobCallbackEnabled) { JobCallbackManager.initialize(props); } }
Method to initialize jobcallback manager if it is enabled. @param logger : the logger object of calling class. @param props : Azkaban properties
configureJobCallback
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/ServerUtils.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/ServerUtils.java
Apache-2.0
public static String getVersionSetJsonString(final VersionSet versionSet) { final Map<String, String> imageToVersionStringMap = new HashMap<>(); for (final String imageType: versionSet.getImageToVersionMap().keySet()) { imageToVersionStringMap.put(imageType, versionSet.getImageToVersionMap().get(imageType).getVersion()); } return JSONUtils.toJSON(imageToVersionStringMap, true).replaceAll("\"", ""); }
Pretty format VersionSet @param versionSet the versionSet @return Readable versionSet in JSON format.
getVersionSetJsonString
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/ServerUtils.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/ServerUtils.java
Apache-2.0
public synchronized void swap() { this.primaryQueue = this.secondaryQueue; this.secondaryQueue = new ArrayList<>(); }
Swaps primaryQueue with secondary queue. The previous primary queue will be released.
swap
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/SwapQueue.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/SwapQueue.java
Apache-2.0
public synchronized int getSize() { return this.secondaryQueue.size() + this.primaryQueue.size(); }
Returns both the secondary and primary size
getSize
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/SwapQueue.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/SwapQueue.java
Apache-2.0
@Override public synchronized Iterator<T> iterator() { return this.primaryQueue.iterator(); }
Returns iterator over the primary queue.
iterator
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/SwapQueue.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/SwapQueue.java
Apache-2.0
public boolean canSystemGrantMemory(final long xmx) { final long freeMemSize = this.util.getOsTotalFreeMemorySize(); if (freeMemSize == 0) { // Fail open. // On the platforms that don't support the mem info file, the returned size will be 0. return true; } if (freeMemSize - xmx < LOW_MEM_THRESHOLD) { logger.info(String.format( "Free memory amount minus Xmx (%d - %d kb) is less than low mem threshold (%d kb), " + "memory request declined.", freeMemSize, xmx, LOW_MEM_THRESHOLD)); return false; } return true; }
@param xmx Xmx for the process @return true if the system can satisfy the memory request Given Xmx value (in kb) used by java process, determine if system can satisfy the memory request.
canSystemGrantMemory
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/SystemMemoryInfo.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/SystemMemoryInfo.java
Apache-2.0
public boolean isFreePhysicalMemoryAbove(final long memKb) { final long freeMemSize = this.util.getOsFreePhysicalMemorySize(); if (freeMemSize == 0) { // Fail open. // On the platforms that don't support the mem info file, the returned size will be 0. return true; } return freeMemSize - memKb > 0; }
@param memKb represents a memory value in kb @return true if available physical memory is greater than memKb Verifies if the currently available physical memory is greater than a given value.
isFreePhysicalMemoryAbove
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/SystemMemoryInfo.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/SystemMemoryInfo.java
Apache-2.0
public static Set<Dependency> filterNullFromDeps(Set<Dependency> dependencies) { return dependencies. stream(). filter(Objects::nonNull). collect(Collectors.toSet()); }
Helper method to filer out null from the set containing Dependency objects. Eg: If input set: {null, obj1, obj2 }, then output set: {obj1, obj2} @param dependencies Set of Dependency objects from which null has to be filtered out. @return Set of Dependency without any null in it.
filterNullFromDeps
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/ThinArchiveUtils.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/ThinArchiveUtils.java
Apache-2.0
public static List<ApplicationReport> getAllAliveAppReportsByExecID(final YarnClient yarnClient, final String flowExecID, final Logger log) throws IOException, YarnException { // format: tagName:tagValue Set<String> searchTags = ImmutableSet.of(AZKABAN_FLOW_EXEC_ID + ":" + flowExecID); log.info(String.format("Searching for alive yarn application reports with tag %s", searchTags)); return yarnClient.getApplications(null, YARN_APPLICATION_ALIVE_STATES, searchTags); }
Use the yarnClient to query the unfinished yarn applications for 1 flow execution @param yarnClient the yarnClient already connects to the cluster @param flowExecID the azkaban flow execution id whose yarn applications needs to be killed @return the set of all to-be-killed (alive) yarn applications' IDs @throws IOException for RPC issue @throws YarnException for YARN server issue
getAllAliveAppReportsByExecID
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/YarnUtils.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/YarnUtils.java
Apache-2.0
public static List<ApplicationReport> getAllAliveAppReportsByExecIDs(final YarnClient yarnClient, final Set<Integer> flowExecIDs, final Logger log) throws IOException, YarnException { if (flowExecIDs.isEmpty()) { return Collections.emptyList(); } Set<String> searchTags = flowExecIDs.stream() .map(id -> AZKABAN_FLOW_EXEC_ID + ":" + id) .collect(Collectors.toSet()); log.info(String.format("Searching for alive yarn application reports with tags %s", searchTags)); return yarnClient.getApplications(null, YARN_APPLICATION_ALIVE_STATES, searchTags); }
Use the yarnClient to query the unfinished yarn applications using a set of flow execution IDs (the union of yarn applications tagged with any of the flow execution IDs)
getAllAliveAppReportsByExecIDs
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/YarnUtils.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/YarnUtils.java
Apache-2.0
public static YarnClient createYarnClient(Props props, Logger log) { final YarnConfiguration yarnConf = new YarnConfiguration(); if (props.containsKey(YARN_CONF_DIRECTORY_PROPERTY)) { log.info("Job yarn conf dir: " + props.get(YARN_CONF_DIRECTORY_PROPERTY)); yarnConf.addResource( new Path(props.get(YARN_CONF_DIRECTORY_PROPERTY) + "/" + YARN_CONF_FILENAME)); } yarnConf.setLong(YARN_APP_TIMEOUT_PROPERTY_NAME, YARN_APP_TIMEOUT_IN_MILLIONSECONDS); final YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(yarnConf); yarnClient.start(); return yarnClient; }
Create, initialize and start a YarnClient connecting to the Yarn Cluster (resource manager), using the resources passed in with props. @param props the properties to create a YarnClient, the path to the "yarn-site.xml" to be used @param log
createYarnClient
java
azkaban/azkaban
azkaban-common/src/main/java/azkaban/utils/YarnUtils.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/utils/YarnUtils.java
Apache-2.0
public V1beta2VerticalPodAutoscalerCheckpointSpec containerName(String containerName) { this.containerName = containerName; return this; }
Specification of the checkpoint. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
containerName
java
azkaban/azkaban
azkaban-common/src/main/java/io/kubernetes/autoscaling/models/V1beta2VerticalPodAutoscalerCheckpointSpec.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/io/kubernetes/autoscaling/models/V1beta2VerticalPodAutoscalerCheckpointSpec.java
Apache-2.0
public V1beta2VerticalPodAutoscalerCheckpointStatusCpuHistogram bucketWeights(Object bucketWeights) { this.bucketWeights = bucketWeights; return this; }
Checkpoint of histogram for consumption of CPU.
bucketWeights
java
azkaban/azkaban
azkaban-common/src/main/java/io/kubernetes/autoscaling/models/V1beta2VerticalPodAutoscalerCheckpointStatusCpuHistogram.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/io/kubernetes/autoscaling/models/V1beta2VerticalPodAutoscalerCheckpointStatusCpuHistogram.java
Apache-2.0
public V1beta2VerticalPodAutoscalerCheckpointStatusMemoryHistogram bucketWeights(Object bucketWeights) { this.bucketWeights = bucketWeights; return this; }
Checkpoint of histogram for consumption of memory.
bucketWeights
java
azkaban/azkaban
azkaban-common/src/main/java/io/kubernetes/autoscaling/models/V1beta2VerticalPodAutoscalerCheckpointStatusMemoryHistogram.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/io/kubernetes/autoscaling/models/V1beta2VerticalPodAutoscalerCheckpointStatusMemoryHistogram.java
Apache-2.0
public V1beta2VerticalPodAutoscalerSpecTargetRef apiVersion(String apiVersion) { this.apiVersion = apiVersion; return this; }
TargetRef points to the controller managing the set of pods for the autoscaler to control - e.g. Deployment, StatefulSet. VerticalPodAutoscaler can be targeted at controller implementing scale subresource (the pod set is retrieved from the controller&#39;s ScaleStatus) or some well known controllers (e.g. for DaemonSet the pod set is read from the controller&#39;s spec). If VerticalPodAutoscaler cannot use specified target it will report ConfigUnsupported condition. Note that VerticalPodAutoscaler does not require full implementation of scale subresource - it will not use it to modify the replica count. The only thing retrieved is a label selector matching pods grouped by the target resource.
apiVersion
java
azkaban/azkaban
azkaban-common/src/main/java/io/kubernetes/autoscaling/models/V1beta2VerticalPodAutoscalerSpecTargetRef.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/io/kubernetes/autoscaling/models/V1beta2VerticalPodAutoscalerSpecTargetRef.java
Apache-2.0
public V1beta2VerticalPodAutoscalerStatusConditions lastTransitionTime(OffsetDateTime lastTransitionTime) { this.lastTransitionTime = lastTransitionTime; return this; }
VerticalPodAutoscalerCondition describes the state of a VerticalPodAutoscaler at a certain point.
lastTransitionTime
java
azkaban/azkaban
azkaban-common/src/main/java/io/kubernetes/autoscaling/models/V1beta2VerticalPodAutoscalerStatusConditions.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/io/kubernetes/autoscaling/models/V1beta2VerticalPodAutoscalerStatusConditions.java
Apache-2.0
public V1beta2VerticalPodAutoscalerStatusRecommendationContainerRecommendations containerName(String containerName) { this.containerName = containerName; return this; }
RecommendedContainerResources is the recommendation of resources computed by autoscaler for a specific container. Respects the container resource policy if present in the spec. In particular the recommendation is not produced for containers with &#x60;ContainerScalingMode&#x60; set to &#39;Off&#39;.
containerName
java
azkaban/azkaban
azkaban-common/src/main/java/io/kubernetes/autoscaling/models/V1beta2VerticalPodAutoscalerStatusRecommendationContainerRecommendations.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/io/kubernetes/autoscaling/models/V1beta2VerticalPodAutoscalerStatusRecommendationContainerRecommendations.java
Apache-2.0
public V1VerticalPodAutoscalerSpecRecommenders name(String name) { this.name = name; return this; }
VerticalPodAutoscalerRecommenderSelector points to a specific Vertical Pod Autoscaler recommender. In the future it might pass parameters to the recommender.
name
java
azkaban/azkaban
azkaban-common/src/main/java/io/kubernetes/autoscaling/models/V1VerticalPodAutoscalerSpecRecommenders.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/io/kubernetes/autoscaling/models/V1VerticalPodAutoscalerSpecRecommenders.java
Apache-2.0
@Test public void testCreateAzkabanEventReporter() { final Props props = new Props(); props.put(AZKABAN_EVENT_REPORTING_ENABLED, "true"); props.put(AZKABAN_EVENT_REPORTING_CLASS_PARAM, "azkaban.AzkabanEventReporterTest1"); final AzkabanCommonModule azkabanCommonModule = new AzkabanCommonModule(props); final AzkabanEventReporter azkabanEventReporter = azkabanCommonModule .createAzkabanEventReporter(); assertThat(azkabanEventReporter).isNotNull(); assertThat(azkabanEventReporter).isInstanceOf(AzkabanEventReporterTest1.class); }
Verify that alternate implementation of the <code>AzkabanEventReporter</code> is initialized.
testCreateAzkabanEventReporter
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/AzkabanCommonModuleTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/AzkabanCommonModuleTest.java
Apache-2.0
@Test public void testAzkabanEventReporterInvalidProperties() { final Props props = new Props(); props.put(AZKABAN_EVENT_REPORTING_ENABLED, "true"); props.put(AZKABAN_EVENT_REPORTING_CLASS_PARAM, "azkaban.execapp.reporter.AzkabanKafkaAvroEventReporter"); final AzkabanCommonModule azkabanCommonModule = new AzkabanCommonModule(props); assertThatIllegalArgumentException() .isThrownBy(() -> azkabanCommonModule.createAzkabanEventReporter()); }
Verify that <code>IllegalArgumentException</code> is thrown when required properties are missing.
testAzkabanEventReporterInvalidProperties
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/AzkabanCommonModuleTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/AzkabanCommonModuleTest.java
Apache-2.0
@Test public void testAzkabanEventReporterInvalidConstructor() { final Props props = new Props(); props.put(AZKABAN_EVENT_REPORTING_ENABLED, "true"); props.put(AZKABAN_EVENT_REPORTING_CLASS_PARAM, "azkaban.execapp.AzkabanEventReporterTest3"); AzkabanCommonModule azkabanCommonModule = new AzkabanCommonModule(props); assertThatExceptionOfType(RuntimeException.class) .isThrownBy(() -> azkabanCommonModule.createAzkabanEventReporter()); }
Verify that a <code>RuntimeException</code> is thrown when valid constructor is not found in the event reporter implementation.
testAzkabanEventReporterInvalidConstructor
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/AzkabanCommonModuleTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/AzkabanCommonModuleTest.java
Apache-2.0
@Test public void testLoadingSingleCluster() throws IOException { final File clusterDir = this.testDir.newFolder("single-cluster"); final File clusterConfig = new File(clusterDir, ClusterLoader.CLUSTER_CONF_FILE); try (final Writer fileWriter = new OutputStreamWriter( new FileOutputStream(clusterConfig), StandardCharsets.UTF_8)) { fileWriter .write("hadoop.security.manager.class=azkaban.security.HadoopSecurityManager_H_2_0\n"); fileWriter.write("A=a\n"); fileWriter.write("B=b\n"); } final ClusterRegistry clusterRegistry = new ClusterRegistry(); ClusterLoader.loadCluster(clusterDir, clusterRegistry); final Cluster cluster = clusterRegistry.getCluster(clusterDir.getName()); Assert.assertEquals(clusterDir.getName(), cluster.clusterId); }
Sanity check when a single cluster is configured.
testLoadingSingleCluster
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/cluster/ClusterLoaderTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/cluster/ClusterLoaderTest.java
Apache-2.0
@Test (expected = FileNotFoundException.class) public void testLoadingSingleClusterWithMissingClusterConfig() throws IOException { final File clusterDir = this.testDir.newFolder("single-cluster-no-config"); final ClusterRegistry clusterRegistry = new ClusterRegistry(); ClusterLoader.loadCluster(clusterDir, clusterRegistry); }
Verify an exception is thrown properly when a single cluster is configured but with its cluster.properties file missing.
testLoadingSingleClusterWithMissingClusterConfig
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/cluster/ClusterLoaderTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/cluster/ClusterLoaderTest.java
Apache-2.0
@Test public void testLoadingMultipleClusters() throws IOException { final File clustersDir = new File( getClass().getClassLoader().getResource("clusters").getFile()); final ClusterRegistry clusterRegistry = new ClusterRegistry(); final ClusterLoader clusterLoader = new ClusterLoader(clustersDir, clusterRegistry); final Cluster defaultCluster = clusterRegistry.getCluster("default"); Assert.assertEquals("default", defaultCluster.clusterId); final Cluster another = clusterRegistry.getCluster("another"); Assert.assertEquals("another", another.clusterId); }
Sanity check when multiple clusters are configured.
testLoadingMultipleClusters
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/cluster/ClusterLoaderTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/cluster/ClusterLoaderTest.java
Apache-2.0
@Test public void testGetClusterSecurityManager() throws ClassNotFoundException { final String fakeHadoopSecurityManagerClassName = "org.hello.world.HelloWorld"; final Props clusterProps = new Props(); final File hadoopSecurityManagerJar = new File(getClass().getClassLoader().getResource("helloworld.jar").getFile()); clusterProps.put(Cluster.LIBRARY_PATH_PREFIX + "hadoopsecuritymanager", hadoopSecurityManagerJar.getParentFile().getPath()); clusterProps.put(Cluster.HADOOP_SECURITY_MANAGER_DEPENDENCY_COMPONENTS, "hadoopsecuritymanager"); clusterProps.put(Cluster.HADOOP_SECURITY_MANAGER_CLASS_PROP, fakeHadoopSecurityManagerClassName); final Cluster cluster = new Cluster("default", clusterProps); final HadoopSecurityManagerClassLoader classLoader = cluster.getSecurityManagerClassLoader(); classLoader.loadClass(fakeHadoopSecurityManagerClassName); }
Test loading of the HadoopSecurityManager class figured for a given cluster from its dedicated HadoopSecurityManagerClassLoader instance.
testGetClusterSecurityManager
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/cluster/ClusterTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/cluster/ClusterTest.java
Apache-2.0
@Test public void testClassAvailableInHadoopSecurityManagerClassLoader() throws MalformedURLException, ClassNotFoundException { final ClassLoader currentClassLoader = getClass().getClassLoader(); final File helloworldJar = new File(currentClassLoader.getResource(SAMPLE_JAR).getFile()); final URL helloworlURL = helloworldJar.toURI().toURL(); final ClassLoader hadoopSecurityManagerClassLoader = new HadoopSecurityManagerClassLoader( new URL[]{helloworlURL}, currentClassLoader, "testCluster"); final Class clazz = hadoopSecurityManagerClassLoader.loadClass( "org.hello.world.HelloWorld"); Assert.assertEquals(hadoopSecurityManagerClassLoader, clazz.getClassLoader()); }
Test class loading of a class that is available only in the HadoopSecurityManagerClassLoader. The class is provided in 'helloworld.jar'.
testClassAvailableInHadoopSecurityManagerClassLoader
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/cluster/HadoopSecurityManagerClassLoaderTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/cluster/HadoopSecurityManagerClassLoaderTest.java
Apache-2.0
@Test public void testPropsClass() throws ClassNotFoundException { ClassLoader currentClassLoader = getClass().getClassLoader(); HadoopSecurityManagerClassLoader hadoopSecurityManagerClassLoader = new HadoopSecurityManagerClassLoader( new URL[] {}, currentClassLoader, "testCluster"); // make azkaban.utils.Props class available to the HadoopSecurityManagerClassLoader hadoopSecurityManagerClassLoader.addURL(Props.class); Class clazz = hadoopSecurityManagerClassLoader.loadClass(Props.class.getName()); Assert.assertEquals(currentClassLoader, clazz.getClassLoader()); }
Check {@link azkaban.utils.Props} is always loaded by its parent classloader.
testPropsClass
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/cluster/HadoopSecurityManagerClassLoaderTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/cluster/HadoopSecurityManagerClassLoaderTest.java
Apache-2.0
@Test public void testGetNewConnectionAfterClose() throws Exception { this.connection.setAutoCommit(false); /** * See {@link org.apache.commons.dbcp2.PoolableConnectionFactory#passivateObject}. * If the connection disables auto commit, when we close it, connection will be reset enabling auto commit, * and returned to connection pool. */ DbUtils.closeQuietly(this.connection); final Connection newConnection = this.h2DataSource.getConnection(); Assert.assertEquals(newConnection.getAutoCommit(), true); DbUtils.closeQuietly(newConnection); }
{@link AzkabanDataSource#getConnection} fetches a new connection object other than one in the above, if we don't close.
testGetNewConnectionAfterClose
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/database/AzkabanConnectionPoolTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/database/AzkabanConnectionPoolTest.java
Apache-2.0
@Test public void testFlowParamForDispatchMethod() throws Exception { initializeContainerizedDispatchImpl(); this.containerizedDispatchManager.getContainerRampUpCriteria().setRampUp(0); this.containerizedDispatchManager.getContainerJobTypeCriteria().updateAllowList(ImmutableSet.of("ALL")); DispatchMethod dispatchMethod = this.containerizedDispatchManager.getDispatchMethod(this.flow6); Assert.assertEquals(DispatchMethod.CONTAINERIZED, dispatchMethod); DispatchMethod dispatchMethodFor7 = this.containerizedDispatchManager.getDispatchMethod(this.flow7); Assert.assertEquals(DispatchMethod.POLL, dispatchMethodFor7); }
This test case is verifying that if dispatch method is marked for containerization in flow parameter then it should be respected first. If not then it should follow rest of the criteria. @throws Exception
testFlowParamForDispatchMethod
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/ContainerizedDispatchManagerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ContainerizedDispatchManagerTest.java
Apache-2.0
@Test public void jobLinkUrlBasedOnSparkHistoryServerUrlForUnroutedJobs() throws Exception { mockStatic(AuthenticationUtils.class); final HttpURLConnection connection = mock(HttpURLConnection.class); when(connection.getInputStream()).thenReturn( new ByteArrayInputStream("Failed to read the application".getBytes("UTF-8")) ); // mock AuthenticationUtils so that RM job link is no longer valid when(AuthenticationUtils.loginAuthenticatedURL(any(URL.class), anyString(), anyString())) .thenReturn(connection); // create a flow that contains one job that was never routed final ExecutableNode node = createExecutableNode("testJob", "spark", null); final ExecutableFlow flow = createSingleNodeFlow(node); // populate azkaban web server properties final Props azkProps = new Props(); final String resourceManagerUrl = "http://localhost:8088/cluster/app/application_${application.id}"; azkProps.put(ConfigurationKeys.RESOURCE_MANAGER_JOB_URL, resourceManagerUrl); final String historyServerUrl = "http://localhost:19888/jobhistory/job/job_${application.id}"; azkProps.put(ConfigurationKeys.HISTORY_SERVER_JOB_URL, historyServerUrl); final String sparkHistoryServerUrl = "http://localhost:18080/history/application_${application.id}/1/jobs"; azkProps.put(ConfigurationKeys.SPARK_HISTORY_SERVER_JOB_URL, sparkHistoryServerUrl); azkProps.put(ConfigurationKeys.AZKABAN_KEYTAB_PATH, "/fakepath"); azkProps.put(ConfigurationKeys.AZKABAN_KERBEROS_PRINCIPAL, "azkatest"); // final ExecutableFlow exFlow, final String jobId, final String applicationId, final Props azkProps) final String applicationId = "123456789"; final String jobLinkUrl = ExecutionControllerUtils.createJobLinkUrl( flow, node.getId(), applicationId, azkProps); final String expectedJobLinkUrl = sparkHistoryServerUrl.replace( ExecutionControllerUtils.OLD_APPLICATION_ID, applicationId); Assert.assertEquals(expectedJobLinkUrl, jobLinkUrl); }
Verify for a Spark job that is not routed to any cluster, its job link url is based on Spark History Server URL when the RM job link is invalid.
jobLinkUrlBasedOnSparkHistoryServerUrlForUnroutedJobs
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
Apache-2.0
@Test public void jobLinkUrlBasedOnResourceManagerUrlForUnroutedJobs() throws Exception { mockStatic(AuthenticationUtils.class); final HttpURLConnection connection = mock(HttpURLConnection.class); when(connection.getInputStream()).thenReturn( new ByteArrayInputStream("SUCCESS".getBytes("UTF-8")) ); // mock AuthenticationUtils so that RM request to validate job link succeeds when(AuthenticationUtils.loginAuthenticatedURL(any(URL.class), anyString(), anyString())) .thenReturn(connection); // create a flow that contains one node that was never routed, having no any cluster info final ExecutableNode node = createExecutableNode("testJob", "spark", null); final ExecutableFlow flow = createSingleNodeFlow(node); // populate azkaban web server properties final Props azkProps = new Props(); final String resourceManagerUrl = "http://localhost:8088/cluster/app/application_${application.id}"; azkProps.put(ConfigurationKeys.RESOURCE_MANAGER_JOB_URL, resourceManagerUrl); final String historyServerUrl = "http://localhost:19888/jobhistory/job/job_${application.id}"; azkProps.put(ConfigurationKeys.HISTORY_SERVER_JOB_URL, historyServerUrl); final String sparkHistoryServerUrl = "http://localhost:18080/history/application_${application.id}/1/jobs"; azkProps.put(ConfigurationKeys.SPARK_HISTORY_SERVER_JOB_URL, sparkHistoryServerUrl); azkProps.put(ConfigurationKeys.AZKABAN_KEYTAB_PATH, "/fakepath"); azkProps.put(ConfigurationKeys.AZKABAN_KERBEROS_PRINCIPAL, "azkatest"); // final ExecutableFlow exFlow, final String jobId, final String applicationId, final Props azkProps) final String applicationId = "123456789"; final String jobLinkUrl = ExecutionControllerUtils.createJobLinkUrl( flow, node.getId(), applicationId, azkProps); Assert.assertEquals(resourceManagerUrl.replace(ExecutionControllerUtils.OLD_APPLICATION_ID, applicationId), jobLinkUrl); }
Verify for a a given job that is not routed to any cluster, a job link url based on Resource Manager URL if returned when the RM job link is still valid.
jobLinkUrlBasedOnResourceManagerUrlForUnroutedJobs
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
Apache-2.0
@Test public void noJobLinkUrlForUnroutedJobWhenResourceManagerConnectionFails() throws Exception { mockStatic(AuthenticationUtils.class); // mock AuthenticationUtils so that RM request to validate job link fails when(AuthenticationUtils.loginAuthenticatedURL(any(URL.class), anyString(), anyString())) .thenThrow(new Exception("Connection failed")); // create a flow that contains one node that was never routed, having no any cluster info final ExecutableNode node = createExecutableNode("testJob", "spark", null); final ExecutableFlow flow = createSingleNodeFlow(node); // populate azkaban web server properties final Props azkProps = new Props(); final String resourceManagerUrl = "http://localhost:8088/cluster/app/application_${application.id}"; azkProps.put(ConfigurationKeys.RESOURCE_MANAGER_JOB_URL, resourceManagerUrl); final String historyServerUrl = "http://localhost:19888/jobhistory/job/job_${application.id}"; azkProps.put(ConfigurationKeys.HISTORY_SERVER_JOB_URL, historyServerUrl); final String sparkHistoryServerUrl = "http://localhost:18080/history/application_${application.id}/1/jobs"; azkProps.put(ConfigurationKeys.SPARK_HISTORY_SERVER_JOB_URL, sparkHistoryServerUrl); azkProps.put(ConfigurationKeys.AZKABAN_KEYTAB_PATH, "/fakepath"); azkProps.put(ConfigurationKeys.AZKABAN_KERBEROS_PRINCIPAL, "azkatest"); // final ExecutableFlow exFlow, final String jobId, final String applicationId, final Props azkProps) final String applicationId = "123456789"; final String jobLinkUrl = ExecutionControllerUtils.createJobLinkUrl( flow, node.getId(), applicationId, azkProps); Assert.assertEquals(null, jobLinkUrl); }
Verify for a given job that is not routed to any cluster, no job link URL is returned when the connection to RM to validate the RM job link fails.
noJobLinkUrlForUnroutedJobWhenResourceManagerConnectionFails
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
Apache-2.0
@Test public void jobLinkUrlBasedOnSparkHistoryServerUrlForRoutedJobs() throws Exception { mockStatic(AuthenticationUtils.class); final HttpURLConnection connection = mock(HttpURLConnection.class); when(connection.getInputStream()).thenReturn( new ByteArrayInputStream("Failed to read the application".getBytes("UTF-8")) ); // mock AuthenticationUtils so that RM job link is no longer valid when(AuthenticationUtils.loginAuthenticatedURL(any(URL.class), anyString(), anyString())) .thenReturn(connection); // create a flow that contains one job that was routed to a test cluster final String resourceManagerUrl = "http://localhost:8088/cluster/app/application_${application.id}"; final String historyServerUrl = "http://localhost:19888/jobhistory/job/job_${application.id}"; final String sparkHistoryServerUrl = "http://localhost:18080/history/application_${application.id}/1/jobs"; final String hadoopClusterUrl = "http://localhost:8088"; final ClusterInfo cluster = new ClusterInfo("testCluster", hadoopClusterUrl, resourceManagerUrl, historyServerUrl, sparkHistoryServerUrl); final ExecutableNode node = createExecutableNode("testJob", "spark", cluster); final ExecutableFlow flow = createSingleNodeFlow(node); // populate azkaban web server properties final Props azkProps = new Props(); azkProps.put(ConfigurationKeys.AZKABAN_KEYTAB_PATH, "/fakepath"); azkProps.put(ConfigurationKeys.AZKABAN_KERBEROS_PRINCIPAL, "azkatest"); // final ExecutableFlow exFlow, final String jobId, final String applicationId, final Props azkProps) final String applicationId = "123456789"; final String jobLinkUrl = ExecutionControllerUtils.createJobLinkUrl( flow, node.getId(), applicationId, azkProps); final String expectedJobLinkUrl = sparkHistoryServerUrl.replace(ExecutionControllerUtils.NEW_APPLICATION_ID, applicationId); Assert.assertEquals(expectedJobLinkUrl, jobLinkUrl); }
Verify for a given Spark job routed to a cluster previously, its job link url is based on Spark History Server URL when the RM job link is invalid.
jobLinkUrlBasedOnSparkHistoryServerUrlForRoutedJobs
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
Apache-2.0
@Test public void noJobLinkUrlForRoutedJobsWhenMissingFullClusterInfo() throws Exception { mockStatic(AuthenticationUtils.class); final HttpURLConnection connection = mock(HttpURLConnection.class); when(connection.getInputStream()).thenReturn( new ByteArrayInputStream("Failed to read the application".getBytes("UTF-8")) ); // mock AuthenticationUtils so that RM job link is no longer valid when(AuthenticationUtils.loginAuthenticatedURL(any(URL.class), anyString(), anyString())) .thenReturn(connection); final String resourceManagerUrl = "http://localhost:8088/cluster/app/application_${application.id}"; final String sparkHistoryServerUrl = "http://localhost:18080/history/application_${application.id}/1/jobs"; final String hadoopClusterUrl = "http://localhost:8088"; // create a cluster that is missing History Server URL final ClusterInfo cluster = new ClusterInfo("testCluster", hadoopClusterUrl, resourceManagerUrl, null, sparkHistoryServerUrl); final ExecutableNode node = createExecutableNode("testJob", "spark", cluster); // create a flow that contains one job that was routed to a test cluster final ExecutableFlow flow = createSingleNodeFlow(node); // populate azkaban web server properties final Props azkProps = new Props(); azkProps.put(ConfigurationKeys.AZKABAN_KEYTAB_PATH, "/fakepath"); azkProps.put(ConfigurationKeys.AZKABAN_KERBEROS_PRINCIPAL, "azkatest"); // final ExecutableFlow exFlow, final String jobId, final String applicationId, final Props azkProps) final String applicationId = "123456789"; final String jobLinkUrl = ExecutionControllerUtils.createJobLinkUrl( flow, node.getId(), applicationId, azkProps); Assert.assertNull(jobLinkUrl); }
Verify for a given job that is routed to a cluster previously, no job link url is returned if Resource Manager URL, Spark History Server URL or History Server URL, is missing.
noJobLinkUrlForRoutedJobsWhenMissingFullClusterInfo
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
Apache-2.0
@Test public void jobLinkUrlBasedOnResourceManagerUrlForRoutedJobs() throws Exception { mockStatic(AuthenticationUtils.class); final HttpURLConnection connection = mock(HttpURLConnection.class); when(connection.getInputStream()).thenReturn( new ByteArrayInputStream("SUCCESS".getBytes("UTF-8")) ); // mock AuthenticationUtils so that RM request to validate job link succeeds when(AuthenticationUtils.loginAuthenticatedURL(any(URL.class), anyString(), anyString())) .thenReturn(connection); // create a flow that contains one job that was routed to a test cluster final String resourceManagerUrl = "http://localhost:8088/cluster/app/application_${application.id}"; final String historyServerUrl = "http://localhost:19888/jobhistory/job/job_${application.id}"; final String sparkHistoryServerUrl = "http://localhost:18080/history/application_${application.id}/1/jobs"; final String hadoopClusterUrl = "http://localhost:8088"; final ClusterInfo cluster = new ClusterInfo("testCluster", hadoopClusterUrl, resourceManagerUrl, historyServerUrl, sparkHistoryServerUrl); final ExecutableNode node = createExecutableNode("testJob", "spark", cluster); final ExecutableFlow flow = createSingleNodeFlow(node); // populate azkaban web server properties final Props azkProps = new Props(); azkProps.put(ConfigurationKeys.AZKABAN_KEYTAB_PATH, "/fakepath"); azkProps.put(ConfigurationKeys.AZKABAN_KERBEROS_PRINCIPAL, "azkatest"); // final ExecutableFlow exFlow, final String jobId, final String applicationId, final Props azkProps) final String applicationId = "123456789"; final String jobLinkUrl = ExecutionControllerUtils.createJobLinkUrl( flow, node.getId(), applicationId, azkProps); final String expectedJobLinkUrl = resourceManagerUrl.replace( ExecutionControllerUtils.NEW_APPLICATION_ID, applicationId); Assert.assertEquals(expectedJobLinkUrl, jobLinkUrl); }
Verify for a a given job that is routed to a cluster previously, a job link url based on Resource Manager URL is returned when the RM job link is still valid.
jobLinkUrlBasedOnResourceManagerUrlForRoutedJobs
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionControllerUtilsTest.java
Apache-2.0
@Test public void testLockSuccessSelectAndUpdateExecutionWithLockingWithoutBatch() throws Exception { when(mysqlNamedLock .getLock(any(DatabaseTransOperator.class), any(String.class), any(Integer.class))) .thenReturn(true); when(mysqlNamedLock.releaseLock(any(DatabaseTransOperator.class), any(String.class))) .thenReturn(true); final long currentTime = System.currentTimeMillis(); final ExecutableFlow flow1 = submitNewFlow("exectest1", "exec1", currentTime, ExecutionOptions.DEFAULT_FLOW_PRIORITY, Status.READY, DispatchMethod.CONTAINERIZED); assertThat( this.executionFlowDao.selectAndUpdateExecutionWithLocking(false, 2, Status.READY, DispatchMethod.CONTAINERIZED).size()) .isEqualTo(1); Set<Integer> expectedSet = new HashSet<>(); expectedSet.add(flow1.getExecutionId()); assertThat(this.executionFlowDao.selectAndUpdateExecutionWithLocking(false, 2, Status.READY, DispatchMethod.CONTAINERIZED)) .isEqualTo(expectedSet); }
This test method is written to verify that selectAndUpdateExecutionWithLocking is working as expected when batch select is disabled and execution status to select READY. @throws Exception
testLockSuccessSelectAndUpdateExecutionWithLockingWithoutBatch
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
Apache-2.0
@Test public void testUpdateExecutableFlowNullSLAOptions() throws Exception { final ExecutableFlow flow = createTestFlow(); this.executionFlowDao.uploadExecutableFlow(flow); final ExecutableFlow fetchFlow = this.executionFlowDao.fetchExecutableFlow(flow.getExecutionId()); // set null sla option fetchFlow.getExecutionOptions().setSlaOptions(null); // Try updating flow try { this.executionFlowDao.updateExecutableFlow(fetchFlow); } catch (ExecutorManagerException e) { assert e.getMessage().contains("NPE"); } // Fetch flow again, the status must be READY not PREPARING as NPE is handled properly when //flow object is serialized. final ExecutableFlow readyFlow = this.executionFlowDao.fetchExecutableFlow(fetchFlow.getExecutionId()); assertThat(readyFlow.getStatus()).isEqualTo(Status.READY); }
Test the resiliency of ExecutableFlow when Sla Option is set to NULL. Make sure that the serialization of flow object does not break and the flow proceeds to next valid state.
testUpdateExecutableFlowNullSLAOptions
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
Apache-2.0
@Test public void testUpdateExecutionFlowVersionSet() throws Exception { final ExecutableFlow flow = createTestFlow(); this.executionFlowDao.uploadExecutableFlow(flow); flow.setVersionSet(createVersionSet()); this.executionFlowDao.updateExecutableFlow(flow); final ExecutableFlow fetchFlow = this.executionFlowDao.fetchExecutableFlow(flow.getExecutionId()); Assert.assertTrue(fetchFlow.getVersionSet() != null); assertThat(flow.getVersionSet().getImageToVersionMap()).isEqualTo(fetchFlow.getVersionSet().getImageToVersionMap()); }
Test when an executable flow sets its version set field, the information can be retrieved after updateExecutableFlow and fetchExecutableFlow @throws Exception
testUpdateExecutionFlowVersionSet
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
Apache-2.0
private VersionSet createVersionSet(){ final String testJsonString1 = "{\"azkaban-base\":{\"version\":\"7.0.4\",\"path\":\"path1\"," + "\"state\":\"ACTIVE\"},\"azkaban-config\":{\"version\":\"9.1.1\",\"path\":\"path2\"," + "\"state\":\"ACTIVE\"},\"spark\":{\"version\":\"8.0\",\"path\":\"path3\"," + "\"state\":\"ACTIVE\"}}"; final String testMd5Hex1 = "43966138aebfdc4438520cc5cd2aefa8"; return new VersionSet(testJsonString1, testMd5Hex1, 1); }
Create a version set from scratch @return a new version set
createVersionSet
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutionFlowDaoTest.java
Apache-2.0
@AfterClass public static void stop() throws Exception { tlsEnabledServer.stop(); }
This method is used to stop tls enabled jetty server. @throws Exception
stop
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorApiClientTest.java
Apache-2.0
@Before public void setUp() throws Exception { this.props = new Props(); this.props.put(ConfigurationKeys.AZKABAN_EXECUTOR_MAX_FAILURE_COUNT, 2); this.props.put(ConfigurationKeys.AZKABAN_ADMIN_ALERT_EMAIL, AZ_ADMIN_ALERT_EMAIL); this.loader = mock(ExecutorLoader.class); this.mailAlerter = mock(Emailer.class); this.alerterHolder = new AlerterHolder(this.props, (Emailer) this.mailAlerter); this.apiGateway = mock(ExecutorApiGateway.class); this.executorHealthChecker = new ExecutorHealthChecker(this.props, this.loader, this .apiGateway, this.alerterHolder); this.flow1 = TestUtils.createTestExecutableFlow("exectest1", "exec1", DispatchMethod.POLL); this.flow1.getExecutionOptions().setFailureEmails(Arrays.asList(FLOW_ADMIN_EMAIL.split(","))); this.flow1.setExecutionId(EXECUTION_ID_11); this.flow1.setStatus(Status.RUNNING); this.flow2 = TestUtils.createTestExecutableFlow("exectest1", "exec2", DispatchMethod.POLL); this.flow2.setExecutionId(EXECUTION_ID_12); this.flow2.setStatus(Status.RUNNING); this.executor1 = new Executor(1, "localhost", 12345, true); this.executor2 = new Executor(2, "localhost", 5678, true); when(this.loader.fetchActiveFlows(any())).thenReturn(this.activeFlows); }
Test case for executor health checker.
setUp
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
Apache-2.0
@Test public void checkExecutorHealthAlive() throws Exception { this.activeFlows.put(EXECUTION_ID_11, new Pair<>( new ExecutionReference(EXECUTION_ID_11, this.executor1, DispatchMethod.POLL), this.flow1)); when(this.apiGateway.callWithExecutionId(this.executor1.getHost(), this.executor1.getPort(), ConnectorParams.PING_ACTION, null, null, null, Optional.of(5000))).thenReturn(ImmutableMap.of(ConnectorParams .STATUS_PARAM, ConnectorParams.RESPONSE_ALIVE)); this.executorHealthChecker.checkExecutorHealth(); assertThat(this.flow1.getStatus()).isEqualTo(Status.RUNNING); verifyZeroInteractions(this.mailAlerter); }
Test running flow is not finalized and alert email is not sent when executor is alive.
checkExecutorHealthAlive
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
Apache-2.0
@Test public void checkExecutorHealthExecutorIdRemoved() throws Exception { this.activeFlows.put(EXECUTION_ID_11, new Pair<>( new ExecutionReference(EXECUTION_ID_11, null, DispatchMethod.POLL), this.flow1)); when(this.loader.fetchExecutableFlow(EXECUTION_ID_11)).thenReturn(this.flow1); this.executorHealthChecker.checkExecutorHealth(); verify(this.loader).updateExecutableFlow(this.flow1); assertThat(this.flow1.getStatus()).isEqualTo(Status.FAILED); }
Test running flow is finalized when its executor is removed from DB.
checkExecutorHealthExecutorIdRemoved
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
Apache-2.0
@Test public void checkExecutorHealthConsecutiveFailures() throws Exception { // By default mocked methods will return an empty collection. // Therefore underlying call to apiGateway.callWithExecutionId returns an empty Map for all // invocations of executorHealthChecker.checkExecutorHealth() in this test. this.activeFlows.put(EXECUTION_ID_11, new Pair<>( new ExecutionReference(EXECUTION_ID_11, this.executor1, DispatchMethod.POLL), this.flow1)); // Failed to ping executor. Failure count (=1) < MAX_FAILURE_COUNT (=2). Do not alert. this.executorHealthChecker.checkExecutorHealth(); verify(this.apiGateway).callWithExecutionId(this.executor1.getHost(), this.executor1.getPort(), ConnectorParams.PING_ACTION, null, null, null, Optional.of(5000)); verifyZeroInteractions(this.mailAlerter); // Pinged executor successfully. Failure count (=0) < MAX_FAILURE_COUNT (=2). Do not alert. when(this.apiGateway.callWithExecutionId(this.executor1.getHost(), this.executor1.getPort(), ConnectorParams.PING_ACTION, null, null, null, Optional.of(5000))).thenReturn(ImmutableMap.of(ConnectorParams .STATUS_PARAM, ConnectorParams.RESPONSE_ALIVE)); this.executorHealthChecker.checkExecutorHealth(); verifyZeroInteractions(this.mailAlerter); // Failed to ping executor. Failure count (=1) < MAX_FAILURE_COUNT (=2). Do not alert. when(this.apiGateway.callWithExecutionId(this.executor1.getHost(), this.executor1.getPort(), ConnectorParams.PING_ACTION, null, null, null, Optional.of(5000))).thenReturn(null); this.executorHealthChecker.checkExecutorHealth(); verifyZeroInteractions(this.mailAlerter); // Failed to ping executor again. Failure count (=2) = MAX_FAILURE_COUNT (=2). Alert AZ admin. when(this.loader.fetchExecutableFlow(flow1.getExecutionId())).thenReturn(flow1); this.executorHealthChecker.checkExecutorHealth(); verify(this.mailAlerter, times(1)).alertOnFailedExecutorHealthCheck(eq(this.executor1), eq(Arrays.asList(this.flow1)), any(ExecutorManagerException.class), eq(Arrays.asList(AZ_ADMIN_ALERT_EMAIL.split(",")))); // Verify remediation tasks are performed for unreachable executors. // Flow should be finalized with alerts sent over email. assertThat(this.flow1.getStatus()).isEqualTo(Status.FAILED); String expectedReason = "Executor was unreachable, executor-id: 1, executor-host: localhost, " + "executor-port: 12345"; verify(this.mailAlerter, times(1)).alertOnError(eq(flow1), eq(expectedReason)); }
Test alert emails are sent when there are consecutive failures to contact the executor.
checkExecutorHealthConsecutiveFailures
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
Apache-2.0
@Test public void testCheckExecutorHealthWrapperExceptionHandling() throws Exception { this.activeFlows.put(EXECUTION_ID_11, new Pair<>( new ExecutionReference(EXECUTION_ID_11, this.executor1, DispatchMethod.POLL), this.flow1)); when(this.apiGateway.callWithExecutionId(this.executor1.getHost(), this.executor1.getPort(), ConnectorParams.PING_ACTION, null, null, null, Optional.of(5000))).thenThrow(new RuntimeException("test " + "exception")); // this will throw, causing the test to fail in case the error is not caught correctly this.executorHealthChecker.checkExecutorHealthQuietly(); verifyZeroInteractions(this.mailAlerter); }
Test that the wrapper routine swallows any exceptions reported by underlying health checker.
testCheckExecutorHealthWrapperExceptionHandling
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
Apache-2.0
@Test public void testFailureDuringExecutorPing() throws Exception { this.activeFlows.put(EXECUTION_ID_11, new Pair<>( new ExecutionReference(EXECUTION_ID_11, this.executor1, DispatchMethod.POLL), this.flow1)); this.activeFlows.put(EXECUTION_ID_12, new Pair<>( new ExecutionReference(EXECUTION_ID_12, this.executor2, DispatchMethod.POLL), this.flow2)); // Throw a runtime exception for both executors. when(this.apiGateway.callWithExecutionId(this.executor1.getHost(), this.executor1.getPort(), ConnectorParams.PING_ACTION, null, null, null, Optional.of(5000))).thenThrow(new RuntimeException("test " + "exception")); when(this.apiGateway.callWithExecutionId(this.executor2.getHost(), this.executor2.getPort(), ConnectorParams.PING_ACTION, null, null, null, Optional.of(5000))).thenThrow(new RuntimeException("test " + "exception")); this.executorHealthChecker.checkExecutorHealth(); // Verify ping API is called for both executors. Implying that runtime exception for one of the // executors did not prevent the check on other executor. verify(this.apiGateway).callWithExecutionId(this.executor1.getHost(), this.executor1.getPort(), ConnectorParams.PING_ACTION, null, null, null, Optional.of(5000)); verify(this.apiGateway).callWithExecutionId(this.executor2.getHost(), this.executor2.getPort(), ConnectorParams.PING_ACTION, null, null, null, Optional.of(5000)); verifyZeroInteractions(this.mailAlerter); }
Test that runtime exceptions from the Ping API for one executor don't prevent healthchecks on other executors.
testFailureDuringExecutorPing
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
Apache-2.0
@Test public void testFailureDuringAlerting() throws Exception { this.activeFlows.clear(); this.activeFlows.put(EXECUTION_ID_11, new Pair<>( new ExecutionReference(EXECUTION_ID_11, this.executor1, DispatchMethod.POLL), this.flow1)); // Force a failure of the executor ping API ExecutorManagerException healthcheckException = new ExecutorManagerException("test exception"); when(this.apiGateway.callWithExecutionId( this.executor1.getHost(), this.executor1.getPort(), ConnectorParams.PING_ACTION, null, null, null, Optional.of(5000))) .thenThrow(healthcheckException); // Force an unchecked exception when sending alert emails for the healthcheck failure // Note that we can't use this.alerterHolder.get("email") in the when() as mockito // doesn't like nested mocks. doThrow(new RuntimeException("test runtime exception")) .when(this.mailAlerter) .alertOnFailedExecutorHealthCheck( this.executor1, Arrays.asList(this.flow1), healthcheckException, Arrays.asList(AZ_ADMIN_ALERT_EMAIL.split(","))); when(this.loader.fetchExecutableFlow(EXECUTION_ID_11)).thenReturn(this.flow1); for (int failureCount = 0; failureCount < props.getInt(ConfigurationKeys.AZKABAN_EXECUTOR_MAX_FAILURE_COUNT); failureCount++) { this.executorHealthChecker.checkExecutorHealth(); } // Confirm that cleanup for the executor is attempted despite failure to send emails. // verify() can't be called on executorHealthCheck.cleanUpForMissingExecutor as it's not being // mocked. Directly checking the flow update through the mocked 'loader' is a suitable proxy // for this. verify(this.loader).updateExecutableFlow(this.flow1); assertThat(this.flow1.getStatus()).isEqualTo(Status.FAILED); }
Test that any failures while sending alerts for unreachable executors don't prevent the finalization(cleanup) of flows running on that executor.
testFailureDuringAlerting
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
Apache-2.0
@Test public void testFailureDuringFinalization() throws Exception { this.activeFlows.put(EXECUTION_ID_11, new Pair<>( new ExecutionReference(EXECUTION_ID_11, this.executor1, DispatchMethod.POLL), this.flow1)); this.activeFlows.put(EXECUTION_ID_12, new Pair<>( new ExecutionReference(EXECUTION_ID_12, this.executor1, DispatchMethod.POLL), this.flow2)); when(this.loader.fetchExecutableFlow(EXECUTION_ID_11)).thenThrow(new RuntimeException( "test runtime exception")); when(this.loader.fetchExecutableFlow(EXECUTION_ID_12)).thenThrow(new RuntimeException( "test runtime exception")); this.executorHealthChecker.finalizeFlows(ImmutableList.of(this.flow1, flow2), "test finalize reason"); verify(this.loader).fetchExecutableFlow(flow1.getExecutionId()); verify(this.loader).fetchExecutableFlow(flow2.getExecutionId()); }
Test that exceptions during flow finalization do not block finalization of subsequent flow for an executor.
testFailureDuringFinalization
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorHealthCheckerTest.java
Apache-2.0
@Test public void testDispatchException() throws Exception { testSetUpForRunningFlows(); this.manager.start(); final ExecutableFlow flow1 = TestUtils.createTestExecutableFlow("exectest1", "exec1", DispatchMethod.PUSH); doReturn(flow1).when(this.executorLoader).fetchExecutableFlow(-1); mockFlowDoesNotExist(); when(this.apiGateway.callWithExecutable(any(), any(), eq(ConnectorParams.EXECUTE_ACTION))) .thenThrow(new ExecutorManagerException("Mocked dispatch exception")) .thenReturn(null); this.manager.submitExecutableFlow(flow1, this.user.getUserId()); waitFlowFinished(flow1); verify(this.apiGateway) .callWithExecutable(flow1, this.manager.fetchExecutor(1), ConnectorParams.EXECUTE_ACTION); verify(this.apiGateway) .callWithExecutable(flow1, this.manager.fetchExecutor(2), ConnectorParams.EXECUTE_ACTION); verify(this.executorLoader, Mockito.times(1)).unassignExecutor(-1); }
1. Executor 1 throws an exception when trying to dispatch to it 2. ExecutorManager should try next executor 3. Executor 2 accepts the dispatched execution
testDispatchException
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/ExecutorManagerTest.java
Apache-2.0
@Test public void getKeyStoreNull() { final KeyStore keyStore = keyStoreManager.getKeyStore(); Assert.assertNull(keyStore); }
Since the KeyStore is not set, it must be null
getKeyStoreNull
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/KeyStoreManagerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/KeyStoreManagerTest.java
Apache-2.0
@Test public void getKeyStoreNotNull() { this.keyStore = Mockito.mock(KeyStore.class); keyStoreManager.setKeyStore(this.keyStore); final KeyStore keyStore1 = keyStoreManager.getKeyStore(); Assert.assertNotNull(keyStore1); Assert.assertEquals(this.keyStore, keyStore1); }
KeyStore is set, it must not be null, and must be same as input when retrieved.
getKeyStoreNotNull
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/KeyStoreManagerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/KeyStoreManagerTest.java
Apache-2.0
@Test public void updateExecutionsUpdateCallFailsExecutorDoesntExist() throws Exception { mockUpdateCallFails(); when(this.executorLoader.fetchExecutor(anyInt())).thenReturn(null); DateTimeUtils.setCurrentMillisFixed(System.currentTimeMillis()); this.updater.updateExecutions(); verify(this.executionFinalizer).finalizeFlow( this.execution, "Not running on the assigned executor (any more)", null); }
Should finalize execution if executor doesn't exist in the DB.
updateExecutionsUpdateCallFailsExecutorDoesntExist
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/RunningExecutionsUpdaterTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/RunningExecutionsUpdaterTest.java
Apache-2.0
@Test public void testCPUAndMemoryRequestedInFlowParam() throws Exception { // User requested cpu and memory that are below max allowed cpu and memory and exceed min // allowed cpu and memory final Map<String, String> flowParam = new HashMap<>(); final String cpuRequestedInFlowParam = "3"; final String memoryRequestedInFlowParam = "6Gi"; flowParam.put(FlowParameters.FLOW_PARAM_FLOW_CONTAINER_CPU_REQUEST, cpuRequestedInFlowParam); flowParam .put(FlowParameters.FLOW_PARAM_FLOW_CONTAINER_MEMORY_REQUEST, memoryRequestedInFlowParam); Assert.assertTrue(this.kubernetesContainerizedImpl.getFlowContainerCPURequest(flowParam, null) .equals(cpuRequestedInFlowParam)); Assert.assertTrue(this.kubernetesContainerizedImpl.getFlowContainerMemoryRequest(flowParam, null) .equals(memoryRequestedInFlowParam)); // cpu and memory limit are determined dynamically based on requested cpu and memory String expectedCPULimit = this.kubernetesContainerizedImpl .getResourceLimitFromResourceRequest(cpuRequestedInFlowParam, CPU_REQUESTED_IN_PROPS, CPU_LIMIT_MULTIPLIER); String expectedMemoryLimit = this.kubernetesContainerizedImpl .getResourceLimitFromResourceRequest(memoryRequestedInFlowParam, MEMORY_REQUESTED_IN_PROPS, MEMORY_LIMIT_MULTIPLIER); Assert.assertTrue(expectedCPULimit.equals(cpuRequestedInFlowParam)); Assert.assertTrue(expectedMemoryLimit.equals(memoryRequestedInFlowParam)); // User requested cpu and memory that exceed max allowed cpu and memory final String greaterThanMaxCPURequestedInFlowParam = "5"; final String greaterThanMaxMemoryRequestedInFlowParam = "80Gi"; flowParam.put(FlowParameters.FLOW_PARAM_FLOW_CONTAINER_CPU_REQUEST, greaterThanMaxCPURequestedInFlowParam); flowParam.put(FlowParameters.FLOW_PARAM_FLOW_CONTAINER_MEMORY_REQUEST, greaterThanMaxMemoryRequestedInFlowParam); Assert.assertTrue(this.kubernetesContainerizedImpl.getFlowContainerCPURequest(flowParam, null) .equals(MAX_ALLOWED_CPU)); Assert.assertTrue(this.kubernetesContainerizedImpl.getFlowContainerMemoryRequest(flowParam, null).equals(MAX_ALLOWED_MEMORY)); // User requested cpu and memory that are below max allowed cpu and memory final String lessThanMinCPURequestedInFlowParam = "1m"; final String lessThanMinMemoryRequestedInFlowParam = "1Mi"; flowParam.put(FlowParameters.FLOW_PARAM_FLOW_CONTAINER_CPU_REQUEST, lessThanMinCPURequestedInFlowParam); flowParam.put(FlowParameters.FLOW_PARAM_FLOW_CONTAINER_MEMORY_REQUEST, lessThanMinMemoryRequestedInFlowParam); Assert.assertTrue(this.kubernetesContainerizedImpl.getFlowContainerCPURequest(flowParam, null) .equals(MIN_ALLOWED_CPU)); Assert.assertTrue(this.kubernetesContainerizedImpl.getFlowContainerMemoryRequest(flowParam, null).equals(MIN_ALLOWED_MEMORY)); // cpu and memory limit are determined dynamically based on requested cpu and memory expectedCPULimit = this.kubernetesContainerizedImpl .getResourceLimitFromResourceRequest(MAX_ALLOWED_CPU, CPU_REQUESTED_IN_PROPS, CPU_LIMIT_MULTIPLIER); expectedMemoryLimit = this.kubernetesContainerizedImpl .getResourceLimitFromResourceRequest(MAX_ALLOWED_MEMORY, MEMORY_REQUESTED_IN_PROPS, MEMORY_LIMIT_MULTIPLIER); Assert.assertTrue(expectedCPULimit.equals(MAX_ALLOWED_CPU)); Assert.assertTrue(expectedMemoryLimit.equals(MAX_ALLOWED_MEMORY)); // User requested memory of different unit, e.g. Ti, Mi final String MemoryRequestedInFlowParam1 = "7600Mi"; flowParam.put(FlowParameters.FLOW_PARAM_FLOW_CONTAINER_MEMORY_REQUEST, MemoryRequestedInFlowParam1); // 7600 Mi = 7.6 Gi is smaller than max allowed memory and higher than min allowed memory, user // requested memory should/be used Assert.assertTrue(this.kubernetesContainerizedImpl.getFlowContainerMemoryRequest(flowParam, null).equals(MemoryRequestedInFlowParam1)); final String MemoryRequestedInFlowParam2 = "0.1Ti"; flowParam.put(FlowParameters.FLOW_PARAM_FLOW_CONTAINER_MEMORY_REQUEST, MemoryRequestedInFlowParam2); // 0.1 Ti = 100 Gi > max allowed memory 32 Gi, user requested memory is replaced by max // allowed memory Assert.assertTrue(this.kubernetesContainerizedImpl.getFlowContainerMemoryRequest(flowParam, null).equals(MAX_ALLOWED_MEMORY)); // the memory request set by config should be used to get limit expectedMemoryLimit = this.kubernetesContainerizedImpl .getResourceLimitFromResourceRequest(MEMORY_REQUESTED_IN_PROPS, MEMORY_REQUESTED_IN_PROPS, MEMORY_LIMIT_MULTIPLIER); Assert.assertTrue(expectedMemoryLimit.equals(MEMORY_REQUESTED_IN_PROPS)); }
This test is used to verify that if cpu and memory for a flow container is requested from flow parameter then that is given more precedence over system configuration, the constraints are max allowed cpu and memory set in config. @throws Exception
testCPUAndMemoryRequestedInFlowParam
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/container/KubernetesContainerizedImplTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/container/KubernetesContainerizedImplTest.java
Apache-2.0
@Test public void testCPUAndMemoryRequestedFromVPARecommender() throws Exception { // Fully ramp up VPA so VPA is allowed to give resource recommendations to all flows. this.kubernetesContainerizedImpl.setVPARampUp(100); final Map<String, String> emptyFlowParam = new HashMap<>(); final Map<String, String> defaultFlowParam = new HashMap<>(); final String cpuRequestedInFlowParam = "3"; final String memoryRequestedInFlowParam = "6Gi"; defaultFlowParam.put(FlowParameters.FLOW_PARAM_FLOW_CONTAINER_CPU_REQUEST, cpuRequestedInFlowParam); defaultFlowParam .put(FlowParameters.FLOW_PARAM_FLOW_CONTAINER_MEMORY_REQUEST, memoryRequestedInFlowParam); // flow resource recommendation cannot be less than min allowed resource limits final FlowResourceRecommendation tooSmallFlowResourceRecommendation = new FlowResourceRecommendation(1, 1, "flow", "1m", "1Mi", null); Assert.assertTrue(this.kubernetesContainerizedImpl.getFlowContainerCPURequest(emptyFlowParam, tooSmallFlowResourceRecommendation.getCpuRecommendation()).equals(MIN_ALLOWED_CPU)); Assert.assertTrue(this.kubernetesContainerizedImpl.getFlowContainerMemoryRequest(emptyFlowParam, tooSmallFlowResourceRecommendation.getMemoryRecommendation()).equals(MIN_ALLOWED_MEMORY)); // flow resource recommendation cannot be greater than max allowed resource limits final FlowResourceRecommendation tooLargeFlowResourceRecommendation = new FlowResourceRecommendation(1, 1, "flow", "20", "1Ti", null); Assert.assertTrue(this.kubernetesContainerizedImpl.getFlowContainerCPURequest(emptyFlowParam, tooLargeFlowResourceRecommendation.getCpuRecommendation()).equals(MAX_ALLOWED_CPU)); Assert.assertTrue(this.kubernetesContainerizedImpl.getFlowContainerMemoryRequest(emptyFlowParam, tooLargeFlowResourceRecommendation.getMemoryRecommendation()).equals(MAX_ALLOWED_MEMORY)); // max of flow resource recommendation and flow param will be taken final FlowResourceRecommendation lessThanFlowParamFlowResourceRecommendation = new FlowResourceRecommendation(1, 1, "flow", "2", "5Gi", null); Assert.assertTrue(this.kubernetesContainerizedImpl.getFlowContainerCPURequest(defaultFlowParam, lessThanFlowParamFlowResourceRecommendation.getCpuRecommendation()).equals(cpuRequestedInFlowParam)); Assert.assertTrue(this.kubernetesContainerizedImpl.getFlowContainerMemoryRequest(defaultFlowParam, lessThanFlowParamFlowResourceRecommendation.getMemoryRecommendation()).equals(memoryRequestedInFlowParam)); // max of flow resource recommendation and flow param will be taken final FlowResourceRecommendation greaterThanFlowParamFlowResourceRecommendation = new FlowResourceRecommendation(1, 1, "flow", "4", "7Gi", null); Assert.assertTrue(this.kubernetesContainerizedImpl.getFlowContainerCPURequest(defaultFlowParam, greaterThanFlowParamFlowResourceRecommendation.getCpuRecommendation()).equals(greaterThanFlowParamFlowResourceRecommendation.getCpuRecommendation())); Assert.assertTrue(this.kubernetesContainerizedImpl.getFlowContainerMemoryRequest(defaultFlowParam, greaterThanFlowParamFlowResourceRecommendation.getMemoryRecommendation()).equals(greaterThanFlowParamFlowResourceRecommendation.getMemoryRecommendation())); // Reset it back to 0 for other unit tests. this.kubernetesContainerizedImpl.setVPARampUp(0); }
This test is used to verify that if CPU and memory for a flow container is not requested in flow param but is obtained from VPA recommender. @throws Exception
testCPUAndMemoryRequestedFromVPARecommender
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/container/KubernetesContainerizedImplTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/container/KubernetesContainerizedImplTest.java
Apache-2.0
@Test public void testCPUAndMemoryRequestedFromProperties() throws Exception { final Map<String, String> flowParam = new HashMap<>(); Assert.assertTrue(this.kubernetesContainerizedImpl.getFlowContainerCPURequest(flowParam, null).equals(CPU_REQUESTED_IN_PROPS)); Assert.assertTrue(this.kubernetesContainerizedImpl.getFlowContainerMemoryRequest(flowParam, null).equals(MEMORY_REQUESTED_IN_PROPS)); // cpu and memory limit are determined dynamically based on requested cpu and memory final String expectedCPULimit = this.kubernetesContainerizedImpl .getResourceLimitFromResourceRequest(CPU_REQUESTED_IN_PROPS, CPU_REQUESTED_IN_PROPS, CPU_LIMIT_MULTIPLIER); Assert.assertTrue(expectedCPULimit.equals(CPU_REQUESTED_IN_PROPS)); final String expectedMemoryLimit = this.kubernetesContainerizedImpl .getResourceLimitFromResourceRequest(MEMORY_REQUESTED_IN_PROPS, MEMORY_REQUESTED_IN_PROPS, MEMORY_LIMIT_MULTIPLIER); Assert.assertTrue(expectedMemoryLimit.equals(MEMORY_REQUESTED_IN_PROPS)); }
This test is used to verify that if CPU and memory for a flow container is not requested either in flow param or VPA recommender but defined in system configuration then that is used. @throws Exception
testCPUAndMemoryRequestedFromProperties
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/container/KubernetesContainerizedImplTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/container/KubernetesContainerizedImplTest.java
Apache-2.0
@Test public void testPodEnvVariablesFromFlowParam() throws Exception { final String azkabanBaseVersion = "azkaban-base.version"; final String azkabanConfigVersion = "azkaban-config.version"; final Map<String, String> flowParam = new HashMap<>(); flowParam.put(FlowParameters.FLOW_PARAM_POD_ENV_VAR + azkabanBaseVersion, "1.0.0"); flowParam.put(FlowParameters.FLOW_PARAM_POD_ENV_VAR + azkabanConfigVersion, "0.1.0"); flowParam.put("any.other.param", "test"); final Map<String, String> envVariables = new HashMap<>(); this.kubernetesContainerizedImpl.setupPodEnvVariables(envVariables, flowParam); assert (envVariables.size() == 2); assert (envVariables.get(azkabanBaseVersion.toUpperCase()).equals("1.0.0")); assert (envVariables.get(azkabanConfigVersion.toUpperCase()).equals("0.1.0")); }
This test is used to verify that if env variables are set correctly for pod which are set from flow param. @throws Exception
testPodEnvVariablesFromFlowParam
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/container/KubernetesContainerizedImplTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/container/KubernetesContainerizedImplTest.java
Apache-2.0
@Test public void testPreparingFlowEvent() throws Exception { final ExecutableFlow flow = createFlowWithMultipleJobtypes(); flow.setExecutionId(2); when(this.executorLoader.fetchExecutableFlow(flow.getExecutionId())).thenReturn(flow); when(imageRampupManager.getVersionByImageTypes(any(), any(Set.class), any(Set.class))) .thenReturn(getVersionMap()); final TreeSet<String> jobTypes = ContainerImplUtils.getJobTypesForFlow(flow); final Map<String, String> flowParam = new HashMap<>(); // empty map final Set<String> allImageTypes = new TreeSet<>(); allImageTypes.add(KubernetesContainerizedImpl.DEFAULT_AZKABAN_BASE_IMAGE_NAME); allImageTypes.add(KubernetesContainerizedImpl.DEFAULT_AZKABAN_CONFIG_IMAGE_NAME); allImageTypes.addAll(jobTypes); final VersionSet versionSet = this.kubernetesContainerizedImpl .fetchVersionSet(flow.getExecutionId(), flowParam, allImageTypes, flow); flow.setStatus(Status.PREPARING); flow.setVersionSet(versionSet); // Test event reported from a pod final Map<String, String> metaData = flowStatusChangeEventListener.getFlowMetaData(flow); Assert.assertTrue(metaData.get(EXECUTION_ID).equals("2")); Assert.assertTrue(metaData.get(FLOW_STATUS).equals("PREPARING")); final String versionSetJsonString = metaData.get(VERSION_SET); final Map<String, String> imageToVersionMap = new ObjectMapper().readValue(versionSetJsonString, new TypeReference<HashMap<String, String>>() { }); assertThat(imageToVersionMap.keySet()).isEqualTo(versionSet.getImageToVersionMap().keySet()); assertThat(imageToVersionMap.get("spark")).isEqualTo(versionSet.getImageToVersionMap() .get("spark").getVersion()); assertThat(imageToVersionMap.get(KubernetesContainerizedImpl.DEFAULT_AZKABAN_BASE_IMAGE_NAME)) .isEqualTo(versionSet.getImageToVersionMap() .get(KubernetesContainerizedImpl.DEFAULT_AZKABAN_BASE_IMAGE_NAME).getVersion()); }
Test a preparing flow to be executed in a container, whether the information of execution id, version set, flow status, can be processed by a PodEventListener @throws Exception
testPreparingFlowEvent
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/container/KubernetesContainerizedImplTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/container/KubernetesContainerizedImplTest.java
Apache-2.0
@Test public void testFlowPropertyAndParamsMerge() throws Exception { final ExecutableFlow flow = createTestFlow(); flow.setExecutionId(3); final Props flowProps = new Props(); flowProps.put("param.override.image.version", "1.2.3"); flowProps.put("regular.param", "4.5.6"); // Should be filtered out. when(this.projectLoader.fetchProjectProperty( flow.getProjectId(), flow.getVersion(), Constants.PARAM_OVERRIDE_FILE)).thenReturn(flowProps); final ExecutionOptions executionOptions = new ExecutionOptions(); flow.setExecutionOptions(executionOptions); final Map<String, String> flowParams = flow.getExecutionOptions().getFlowParameters(); Assert.assertEquals(0, flowParams.size()); // Merge the flow props and flow params flow.setFlowParamsFromProps( FlowLoaderUtils.loadPropsForExecutableFlow(this.projectLoader, flow)); final Map<String, String> mergedFlowPropsAndParams = flow.getExecutionOptions().getFlowParameters(); Assert.assertEquals(1, mergedFlowPropsAndParams.size()); Assert.assertTrue(mergedFlowPropsAndParams.containsKey("image.version")); Assert.assertEquals("1.2.3", mergedFlowPropsAndParams.get("image.version")); }
Test merging of flow properties and flow params. @throws Exception
testFlowPropertyAndParamsMerge
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/container/KubernetesContainerizedImplTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/container/KubernetesContainerizedImplTest.java
Apache-2.0
@Test public void testFlowPropertyAndParamsMergeWithOverwrite() throws Exception { final ExecutableFlow flow = createTestFlow(); flow.setExecutionId(3); final Props flowProps = new Props(); flowProps.put("param.override.image.version", "1.2.3"); flowProps.put("regular.param", "4.5.6"); // Should be filtered out. when(this.projectLoader.fetchProjectProperty( flow.getProjectId(), flow.getVersion(), Constants.PARAM_OVERRIDE_FILE)).thenReturn(flowProps); final ExecutionOptions executionOptions = new ExecutionOptions(); flow.setExecutionOptions(executionOptions); final Map<String, String> flowParams = flow.getExecutionOptions().getFlowParameters(); Assert.assertEquals(0, flowParams.size()); // flow params take priority. flowParams.put("image.version", "2.3.4"); // Merge the flow props and flow params flow.setFlowParamsFromProps( FlowLoaderUtils.loadPropsForExecutableFlow(this.projectLoader, flow)); final Map<String, String> mergedFlowPropsAndParams = flow.getExecutionOptions().getFlowParameters(); Assert.assertEquals(1, mergedFlowPropsAndParams.size()); Assert.assertTrue(mergedFlowPropsAndParams.containsKey("image.version")); Assert.assertEquals("1.2.3", mergedFlowPropsAndParams.get("image.version")); }
Test merging of flow properties and flow params where flow params override the flow property. @throws Exception
testFlowPropertyAndParamsMergeWithOverwrite
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/container/KubernetesContainerizedImplTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/container/KubernetesContainerizedImplTest.java
Apache-2.0
@Test public void testFlowPropertyAndParamsMergeNull() throws Exception { final ExecutableFlow flow = createTestFlow(); flow.setExecutionId(3); when(this.projectLoader.fetchProjectProperty( flow.getProjectId(), flow.getVersion(), Constants.PARAM_OVERRIDE_FILE)).thenReturn(null); final ExecutionOptions executionOptions = new ExecutionOptions(); flow.setExecutionOptions(executionOptions); final Map<String, String> flowParams = flow.getExecutionOptions().getFlowParameters(); Assert.assertEquals(0, flowParams.size()); // flow params take priority. flowParams.put("image.version", "2.3.4"); // Merge the flow props and flow params flow.setFlowParamsFromProps( FlowLoaderUtils.loadPropsForExecutableFlow(this.projectLoader, flow)); final Map<String, String> mergedFlowPropsAndParams = flow.getExecutionOptions().getFlowParameters(); Assert.assertEquals(1, mergedFlowPropsAndParams.size()); Assert.assertTrue(mergedFlowPropsAndParams.containsKey("image.version")); Assert.assertEquals("2.3.4", mergedFlowPropsAndParams.get("image.version")); }
Test merging of flow properties and flow params where props in null. @throws Exception
testFlowPropertyAndParamsMergeNull
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/container/KubernetesContainerizedImplTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/container/KubernetesContainerizedImplTest.java
Apache-2.0
private Map<String, VersionInfo> getVersionMap() { final Map<String, VersionInfo> versionMap = new TreeMap<>(); versionMap.put(KubernetesContainerizedImpl.DEFAULT_AZKABAN_BASE_IMAGE_NAME, new VersionInfo("7.0.4", "path1", State.ACTIVE)); versionMap.put(KubernetesContainerizedImpl.DEFAULT_AZKABAN_CONFIG_IMAGE_NAME, new VersionInfo("9.1.1", "path2", State.ACTIVE)); versionMap.put("spark", new VersionInfo("8.0", "path3", State.ACTIVE)); versionMap.put("kafkaPush", new VersionInfo("7.1", "path4", State.ACTIVE)); versionMap.put(DEPENDENCY1, new VersionInfo("6.4", "path5", State.ACTIVE)); return versionMap; }
Creates a version map, which contains key value pairs of image name and corresponding version number @return a version set map
getVersionMap
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/container/KubernetesContainerizedImplTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/container/KubernetesContainerizedImplTest.java
Apache-2.0
private Injector getInjector(final Props props) { props.put(AZKABAN_EVENT_REPORTING_ENABLED, "true"); props.put(AZKABAN_EVENT_REPORTING_CLASS_PARAM, "azkaban.project.AzkabanEventReporterTest"); props.put("database.type", "h2"); props.put("h2.path", "h2"); return Guice.createInjector(new AzkabanCommonModule(props)); }
Creates a Guice injector for Azkaban event reporter instantiation @param props @return
getInjector
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/container/KubernetesContainerizedImplTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/container/KubernetesContainerizedImplTest.java
Apache-2.0
public static void logDebugStatusMap(ConcurrentMap<String, Queue<AzPodStatus>> statusLogMap) { requireNonNull(statusLogMap, "status log map must not be null"); statusLogMap.forEach((podname, queue) -> { StringBuilder sb = new StringBuilder(podname + ": "); queue.forEach(status -> sb.append(status.toString() + ", ")); logger.debug(sb.toString()); }); }
Print the event log at debug verbosity. @param statusLogMap
logDebugStatusMap
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/container/watch/KubernetesWatchTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/container/watch/KubernetesWatchTest.java
Apache-2.0
@Override public void onEvent(Response<V1Pod> watchEvent) { logger.debug(String.format("%s : %s, %s, %s", watchEvent.type, watchEvent.object.getMetadata().getName(), watchEvent.object.getStatus().getMessage(), watchEvent.object.getStatus().getPhase())); AzPodStatus azPodStatus = AzPodStatusExtractor.getAzPodStatusFromEvent(watchEvent).getAzPodStatus(); logger.debug("AZ_POD_STATUS: " + azPodStatus); }
An implementation of {@link @RawPodWatchListener} that extracts the {@link AzPodStatus} for each pod watch event.
onEvent
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/executor/container/watch/KubernetesWatchTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/executor/container/watch/KubernetesWatchTest.java
Apache-2.0
@Test public void testFetchVersionByImageTypesCase1() throws Exception { final String jsonInput = JSONUtils.readJsonFileAsString("image_management/image_type_rampups" + ".json"); final Map<String, List<ImageRampup>> imageTypeRampups = convertToRampupMap(jsonInput); final String jsonImageTypeNewAndRampupVersion = JSONUtils.readJsonFileAsString( "image_management" + "/all_image_types_new_and_rampup_version.json"); final List<ImageVersionDTO> newAndRampupImageVersionDTOs = converterUtils.convertToDTOs( jsonImageTypeNewAndRampupVersion, ImageVersionDTO.class); final List<ImageVersion> newAndRampupImageVersions = this.imageVersionConverter.convertToDataModels(newAndRampupImageVersionDTOs); final Set<String> imageTypes = new TreeSet<>(); imageTypes.add("spark_job"); imageTypes.add("hive_job"); imageTypes.add("azkaban_core"); imageTypes.add("azkaban_config"); imageTypes.add("azkaban_exec"); when(this.imageRampupDao.getRampupByImageTypes(any(Set.class))).thenReturn(imageTypeRampups); when(this.imageVersionDao.findImageVersions(any(ImageMetadataRequest.class))).thenReturn(newAndRampupImageVersions); // Assert the for a flow, versions are always deterministic and remain the same final ExecutableFlow flow = TestUtils .createTestExecutableFlow("exectest1", "exec1", DispatchMethod.CONTAINERIZED); Map<String, VersionInfo> imageTypeVersionMap = this.imageRampupManger .getVersionByImageTypes(flow, imageTypes, new HashSet<>()); Assert.assertEquals("3.6.5", imageTypeVersionMap.get("azkaban_config").getVersion()); Assert.assertEquals("3.6.2", imageTypeVersionMap.get("azkaban_core").getVersion()); Assert.assertEquals("1.8.2", imageTypeVersionMap.get("azkaban_exec").getVersion()); Assert.assertEquals("2.1.3", imageTypeVersionMap.get("hive_job").getVersion()); Assert.assertEquals("1.1.2", imageTypeVersionMap.get("spark_job").getVersion()); }
The test is for getting the specified image type version from the the active rampups. @throws Exception
testFetchVersionByImageTypesCase1
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/imagemgmt/rampup/ImageRampupManagerImplTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/imagemgmt/rampup/ImageRampupManagerImplTest.java
Apache-2.0
@Test public void testFetchVersionByImageTypesCase2() throws Exception { final String jsonImageTypeRampups = JSONUtils.readJsonFileAsString("image_management/" + "image_type_rampups.json"); final Map<String, List<ImageRampup>> imageTypeRampups = convertToRampupMap(jsonImageTypeRampups); final Set<String> imageTypes = new TreeSet<>(); imageTypes.add("spark_job"); imageTypes.add("hive_job"); imageTypes.add("azkaban_core"); imageTypes.add("azkaban_config"); imageTypes.add("azkaban_exec"); imageTypes.add("pig_job"); imageTypes.add("hadoop_job"); final String jsonImageTypeActiveVersion = JSONUtils.readJsonFileAsString("image_management" + "/all_image_types_active_version.json"); final List<ImageVersionDTO> activeImageVersionDTOs = converterUtils.convertToDTOs( jsonImageTypeActiveVersion, ImageVersionDTO.class); final List<ImageVersion> activeImageVersions = this.imageVersionConverter.convertToDataModels(activeImageVersionDTOs); final String jsonImageTypeNewAndRampupVersion = JSONUtils.readJsonFileAsString( "image_management" + "/all_image_types_new_and_rampup_version.json"); final List<ImageVersionDTO> newAndRampupImageVersionDTOs = converterUtils.convertToDTOs( jsonImageTypeNewAndRampupVersion, ImageVersionDTO.class); final List<ImageVersion> newAndRampupImageVersions = this.imageVersionConverter.convertToDataModels(newAndRampupImageVersionDTOs); when(this.imageRampupDao.getRampupByImageTypes(any(Set.class))).thenReturn(imageTypeRampups); when(this.imageVersionDao.findImageVersions(any(ImageMetadataRequest.class))).thenReturn(newAndRampupImageVersions); when(this.imageVersionDao.getActiveVersionByImageTypes(any(Set.class))) .thenReturn(activeImageVersions); final Map<String, VersionInfo> imageTypeVersionMap = this.imageRampupManger .getVersionByImageTypes(null, imageTypes, new HashSet<>()); Assert.assertNotNull(imageTypeVersionMap); // Below image type versions are obtained from active ramp up. Version is selected randomly // based on rampup percentage. Assert.assertNotNull(imageTypeVersionMap.get("azkaban_config")); Assert.assertNotNull(imageTypeVersionMap.get("azkaban_core")); Assert.assertNotNull(imageTypeVersionMap.get("azkaban_exec")); Assert.assertNotNull(imageTypeVersionMap.get("hive_job")); Assert.assertNotNull(imageTypeVersionMap.get("spark_job")); // Below two image types are from based on active image version Assert.assertNotNull(imageTypeVersionMap.get("pig_job")); Assert.assertEquals("4.1.2", imageTypeVersionMap.get("pig_job").getVersion()); Assert.assertNotNull(imageTypeVersionMap.get("hadoop_job")); Assert.assertEquals("5.1.5", imageTypeVersionMap.get("hadoop_job").getVersion()); }
This test is for getting the specified image types version from rampup as well as based on active image version. The image types for which active rampup is present get the version from rampups. For the remaining images it get the latest active version from image versions. @throws Exception
testFetchVersionByImageTypesCase2
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/imagemgmt/rampup/ImageRampupManagerImplTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/imagemgmt/rampup/ImageRampupManagerImplTest.java
Apache-2.0
@Test(expected = ImageMgmtException.class) public void testFetchVersionByImageTypesFailureCase() throws Exception { final String jsonImageTypeRampups = JSONUtils.readJsonFileAsString("image_management/" + "image_type_rampups.json"); final Map<String, List<ImageRampup>> imageTypeRampups = convertToRampupMap(jsonImageTypeRampups); final Set<String> imageTypes = new TreeSet<>(); imageTypes.add("spark_job"); imageTypes.add("hive_job"); imageTypes.add("azkaban_core"); imageTypes.add("azkaban_config"); imageTypes.add("azkaban_exec"); imageTypes.add("pig_job"); imageTypes.add("hadoop_job"); imageTypes.add("kabootar_job"); imageTypes.add("wormhole_job"); final String jsonImageTypeActiveVersion = JSONUtils.readJsonFileAsString("image_management" + "/image_type_active_version.json"); final List<ImageVersionDTO> activeImageVersionDTOs = converterUtils.convertToDTOs( jsonImageTypeActiveVersion, ImageVersionDTO.class); final List<ImageVersion> activeImageVersions = this.imageVersionConverter.convertToDataModels(activeImageVersionDTOs); final String jsonImageTypeNewAndRampupVersion = JSONUtils.readJsonFileAsString( "image_management" + "/all_image_types_new_and_rampup_version.json"); final List<ImageVersionDTO> newAndRampupImageVersionDTOs = converterUtils.convertToDTOs( jsonImageTypeNewAndRampupVersion, ImageVersionDTO.class); final List<ImageVersion> newAndRampupImageVersions = this.imageVersionConverter.convertToDataModels(newAndRampupImageVersionDTOs); when(this.imageRampupDao.getRampupByImageTypes(any(Set.class))).thenReturn(imageTypeRampups); when(this.imageVersionDao.findImageVersions(any(ImageMetadataRequest.class))).thenReturn(newAndRampupImageVersions); when(this.imageVersionDao.getActiveVersionByImageTypes(any(Set.class))) .thenReturn(activeImageVersions); final Set<String> overlaySet = new HashSet<>(); overlaySet.add("kabootar_job"); final Map<String, VersionInfo> imageTypeVersionMap = this.imageRampupManger .getVersionByImageTypes(null, imageTypes, new HashSet<>()); Assert.assertNotNull(imageTypeVersionMap); // Below image type versions are obtained from active ramp up. Version is selected randomly // based on rampup percentage. Assert.assertNotNull(imageTypeVersionMap.get("azkaban_config")); Assert.assertNotNull(imageTypeVersionMap.get("azkaban_core")); Assert.assertNotNull(imageTypeVersionMap.get("azkaban_exec")); Assert.assertNotNull(imageTypeVersionMap.get("hive_job")); Assert.assertNotNull(imageTypeVersionMap.get("spark_job")); Assert.assertNotNull(imageTypeVersionMap.get("pig_job")); // Below two image types are from based on active image version Assert.assertEquals("4.1.2", imageTypeVersionMap.get("pig_job").getVersion()); Assert.assertNotNull(imageTypeVersionMap.get("hadoop_job")); Assert.assertEquals("5.1.5", imageTypeVersionMap.get("hadoop_job").getVersion()); }
For the given image types some of the versions are from active rampups, some of the versions are based on active image version. But there are some image types for which there is neither active rampups nor active image version, hence throws exception. @throws Exception
testFetchVersionByImageTypesFailureCase
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/imagemgmt/rampup/ImageRampupManagerImplTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/imagemgmt/rampup/ImageRampupManagerImplTest.java
Apache-2.0
@Test public void testFetchAllImageTypesVersion() throws Exception { final String jsonImageTypeRampups = JSONUtils.readJsonFileAsString("image_management/" + "image_type_rampups.json"); final Map<String, List<ImageRampup>> imageTypeRampups = convertToRampupMap(jsonImageTypeRampups); final String jsonAllImageTypes = JSONUtils.readJsonFileAsString("image_management/" + "all_image_types.json"); final List<ImageTypeDTO> allImageTypeDTOs = converterUtils.convertToDTOs(jsonAllImageTypes, ImageTypeDTO.class); final List<ImageType> allImageTypes = this.imageTypeConverter.convertToDataModels(allImageTypeDTOs); final String jsonImageTypeActiveVersion = JSONUtils.readJsonFileAsString("image_management" + "/all_image_types_active_version.json"); final List<ImageVersionDTO> activeImageVersionDTOs = converterUtils.convertToDTOs( jsonImageTypeActiveVersion, ImageVersionDTO.class); final List<ImageVersion> activeImageVersions = this.imageVersionConverter.convertToDataModels(activeImageVersionDTOs); final String jsonImageTypeNewAndRampupVersion = JSONUtils.readJsonFileAsString( "image_management" + "/all_image_types_new_and_rampup_version.json"); final List<ImageVersionDTO> newAndRampupImageVersionDTOs = converterUtils.convertToDTOs( jsonImageTypeNewAndRampupVersion, ImageVersionDTO.class); final List<ImageVersion> newAndRampupImageVersions = this.imageVersionConverter.convertToDataModels(newAndRampupImageVersionDTOs); when(this.imageRampupDao.getRampupForAllImageTypes()).thenReturn(imageTypeRampups); when(this.imageTypeDao.getAllImageTypes()).thenReturn(allImageTypes); when(this.imageVersionDao.findImageVersions(any(ImageMetadataRequest.class))).thenReturn(newAndRampupImageVersions); when(this.imageVersionDao.getActiveVersionByImageTypes(any(Set.class))) .thenReturn(activeImageVersions); final Map<String, VersionInfo> imageTypeVersionMap = this.imageRampupManger .getVersionForAllImageTypes(null); Assert.assertNotNull(imageTypeVersionMap); // Below image type versions are obtained from latest active version. Assert.assertNotNull(imageTypeVersionMap.get("azkaban_config")); Assert.assertNotNull(imageTypeVersionMap.get("azkaban_core")); Assert.assertNotNull(imageTypeVersionMap.get("azkaban_exec")); Assert.assertNotNull(imageTypeVersionMap.get("hive_job")); Assert.assertNotNull(imageTypeVersionMap.get("spark_job")); Assert.assertEquals("3.6.7", imageTypeVersionMap.get("azkaban_config").getVersion()); Assert.assertEquals("3.6.3", imageTypeVersionMap.get("azkaban_core").getVersion()); Assert.assertEquals("1.8.3", imageTypeVersionMap.get("azkaban_exec").getVersion()); Assert.assertEquals("2.1.4", imageTypeVersionMap.get("hive_job").getVersion()); Assert.assertEquals("1.1.3", imageTypeVersionMap.get("spark_job").getVersion()); // Below two image types are from based on active image version Assert.assertNotNull(imageTypeVersionMap.get("pig_job")); Assert.assertEquals("4.1.2", imageTypeVersionMap.get("pig_job").getVersion()); Assert.assertNotNull(imageTypeVersionMap.get("hadoop_job")); Assert.assertEquals("5.1.5", imageTypeVersionMap.get("hadoop_job").getVersion()); Assert.assertNotNull(imageTypeVersionMap.get("kafka_push_job")); Assert.assertEquals("3.1.2", imageTypeVersionMap.get("kafka_push_job").getVersion()); Assert.assertNotNull(imageTypeVersionMap.get("wormhole_job")); Assert.assertEquals("1.1.8", imageTypeVersionMap.get("wormhole_job").getVersion()); Assert.assertNotNull(imageTypeVersionMap.get("kabootar_job")); Assert.assertEquals("1.1.4", imageTypeVersionMap.get("kabootar_job").getVersion()); }
This test is a success test for getting version for all the available image types. The versions are from active image version. @throws Exception
testFetchAllImageTypesVersion
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/imagemgmt/rampup/ImageRampupManagerImplTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/imagemgmt/rampup/ImageRampupManagerImplTest.java
Apache-2.0
@Test public void test() throws IOException { VersionSetLoader loaderSpy = Mockito.spy(this.loader); String testJsonString1 = "{\"key1\":{\"version\":\"value1\",\"path\":\"path1\"," + "\"state\":\"ACTIVE\"},\"key2\":{\"version\":\"value2\",\"path\":\"path2\"," + "\"state\":\"ACTIVE\"},\"key3\":{\"version\":\"value3\",\"path\":\"path3\"," + "\"state\":\"ACTIVE\"}}"; String testMd5Hex1 = "43966138aebfdc4438520cc5cd2aefa8"; // Assert that it doesn't exist before Optional<VersionSet> versionSet = loaderSpy.getVersionSet("43966138aebfdc4438520cc5cd2aefa8"); Assert.assertFalse(versionSet.isPresent()); // Assert that it doesn't exist before versionSet = loaderSpy.getVersionSetById(1); Assert.assertFalse(versionSet.isPresent()); // Try to get versionSetId which internally inserts if it doesn't exist int versionSetId = loaderSpy.getVersionSet(testMd5Hex1, testJsonString1).get().getVersionSetId(); Assert.assertEquals(1, versionSetId); Mockito.verify(loaderSpy, Mockito.times(1)) .insertAndGetVersionSet(Mockito.anyString(), Mockito.anyString()); // Try to get versionSetId again which will return from the local copy versionSetId = loaderSpy.getVersionSet(testMd5Hex1, testJsonString1).get().getVersionSetId(); Assert.assertEquals(1, versionSetId); Mockito.verify(loaderSpy, Mockito.times(1)) .insertAndGetVersionSet(Mockito.anyString(), Mockito.anyString()); // Try getting it again using md5 versionSet = loaderSpy.getVersionSet("43966138aebfdc4438520cc5cd2aefa8"); Assert.assertTrue(versionSet.isPresent()); Assert.assertEquals(1, versionSet.get().getVersionSetId()); // Try getting it again using Id versionSet = loaderSpy.getVersionSetById(1); Assert.assertTrue(versionSet.isPresent()); Assert.assertEquals(1, versionSet.get().getVersionSetId()); // Try to remove boolean removed = loaderSpy.deleteVersionSet("43966138aebfdc4438520cc5cd2aefa8"); Assert.assertTrue(removed); // Assert that it doesn't exist now versionSet = loaderSpy.getVersionSet("43966138aebfdc4438520cc5cd2aefa8"); Assert.assertFalse(versionSet.isPresent()); // Verify that fetch doesn't return anything List<VersionSet> versionSets = loaderSpy.fetchAllVersionSets(); Assert.assertTrue(versionSets.isEmpty()); // Try to get versionSetId which internally inserts if it doesn't exist versionSetId = loaderSpy.getVersionSet(testMd5Hex1, testJsonString1).get().getVersionSetId(); // This time Id will be 2 due to autoincrement field Assert.assertEquals(2, versionSetId); // Twice total number of invocations of insertAndGetVersionSetId method Mockito.verify(loaderSpy, Mockito.times(2)) .insertAndGetVersionSet(Mockito.anyString(), Mockito.anyString()); // Verify that fetch returns one record versionSets = loaderSpy.fetchAllVersionSets(); Assert.assertEquals(1, versionSets.size()); }
This test executes various methods and verify various cases.
test
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/imagemgmt/version/JdbcVersionSetLoaderTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/imagemgmt/version/JdbcVersionSetLoaderTest.java
Apache-2.0
@Test public void testClassAvailableInJobClassLoader() throws MalformedURLException, ClassNotFoundException { final ClassLoader currentClassLoader = getClass().getClassLoader(); final File helloworldJar = new File(currentClassLoader.getResource(SAMPLE_JAR).getFile()); final URL helloworlURL = helloworldJar.toURI().toURL(); final ClassLoader jobClassLoader = new JobClassLoader( new URL[]{helloworlURL}, currentClassLoader, "testJob"); final Class clazz = jobClassLoader.loadClass("org.hello.world.HelloWorld"); Assert.assertEquals(jobClassLoader, clazz.getClassLoader()); }
Test class loading of a class that is available only in the JobClassloader. The class is provided in 'helloworld.jar'.
testClassAvailableInJobClassLoader
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/jobExecutor/JobClassLoaderTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/jobExecutor/JobClassLoaderTest.java
Apache-2.0
@Test public void testPropsClass() throws ClassNotFoundException { ClassLoader currentClassLoader = getClass().getClassLoader(); JobClassLoader jobClassLoader = new JobClassLoader( new URL[] {}, currentClassLoader, "testJob"); // make azkaban.utils.Props class available to the JobClassLoader jobClassLoader.addURL(Props.class); Class clazz = jobClassLoader.loadClass(Props.class.getName()); Assert.assertEquals(currentClassLoader, clazz.getClassLoader()); }
Check {@link azkaban.utils.Props} is already loaded by its parent classloader.
testPropsClass
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/jobExecutor/JobClassLoaderTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/jobExecutor/JobClassLoaderTest.java
Apache-2.0
@Test public void testOneUnixCommandWithProxyUserInsteadOfSubmitUser() throws Exception { // Initialize the Props this.props.removeLocal(CommonJobProperties.SUBMIT_USER); this.props.put(JobProperties.USER_TO_PROXY, "test_user"); this.props.put(ProcessJob.COMMAND, "ls -al"); this.job.run(); }
this job should run fine if the props contain user.to.proxy
testOneUnixCommandWithProxyUserInsteadOfSubmitUser
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/jobExecutor/ProcessJobTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/jobExecutor/ProcessJobTest.java
Apache-2.0
@Test(expected = RuntimeException.class) public void testOneUnixCommandWithRootUser() throws Exception { // Initialize the Props this.props.removeLocal(CommonJobProperties.SUBMIT_USER); this.props.put(JobProperties.USER_TO_PROXY, "root"); this.props.put("execute.as.user", "true"); this.props.put(ProcessJob.COMMAND, "ls -al"); this.job.run(); }
this job should fail because it sets user.to.proxy = root which is black listed
testOneUnixCommandWithRootUser
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/jobExecutor/ProcessJobTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/jobExecutor/ProcessJobTest.java
Apache-2.0
@Test public void testCommonPluginProps() throws Exception { final JobTypePluginSet pluginSet = this.manager.getJobTypePluginSet(); final Props props = pluginSet.getCommonPluginJobProps(); System.out.println(props.toString()); assertEquals("commonprop1", props.getString("commonprop1")); assertEquals("commonprop2", props.getString("commonprop2")); assertEquals("commonprop3", props.getString("commonprop3")); final Props priv = pluginSet.getCommonPluginLoadProps(); assertEquals("commonprivate1", priv.getString("commonprivate1")); assertEquals("commonprivate2", priv.getString("commonprivate2")); assertEquals("commonprivate3", priv.getString("commonprivate3")); assertEquals("azkaban.jobtype.FakeJavaJob2", priv.getString("default.proxyusers.jobtype.classes")); }
Tests that the common and common private properties are loaded correctly
testCommonPluginProps
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/jobtype/JobTypeManagerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/jobtype/JobTypeManagerTest.java
Apache-2.0
@Test public void testLoadedClasses() throws Exception { final JobTypePluginSet pluginSet = this.manager.getJobTypePluginSet(); final Props props = pluginSet.getCommonPluginJobProps(); System.out.println(props.toString()); assertEquals("commonprop1", props.getString("commonprop1")); assertEquals("commonprop2", props.getString("commonprop2")); assertEquals("commonprop3", props.getString("commonprop3")); assertNull(props.get("commonprivate1")); final Props priv = pluginSet.getCommonPluginLoadProps(); assertEquals("commonprivate1", priv.getString("commonprivate1")); assertEquals("commonprivate2", priv.getString("commonprivate2")); assertEquals("commonprivate3", priv.getString("commonprivate3")); // Testing the anothertestjobtype assertEquals("azkaban.jobtype.FakeJavaJob", pluginSet.getPluginClassName("anothertestjob")); final Props ajobProps = pluginSet.getPluginJobProps("anothertestjob"); final Props aloadProps = pluginSet.getPluginLoaderProps("anothertestjob"); // Loader props assertEquals("lib/*", aloadProps.get("jobtype.classpath")); assertEquals("azkaban.jobtype.FakeJavaJob", aloadProps.get("jobtype.class")); assertEquals("commonprivate1", aloadProps.get("commonprivate1")); assertEquals("commonprivate2", aloadProps.get("commonprivate2")); assertEquals("commonprivate3", aloadProps.get("commonprivate3")); // Job props assertEquals("commonprop1", ajobProps.get("commonprop1")); assertEquals("commonprop2", ajobProps.get("commonprop2")); assertEquals("commonprop3", ajobProps.get("commonprop3")); assertNull(ajobProps.get("commonprivate1")); assertEquals("azkaban.jobtype.FakeJavaJob2", pluginSet.getPluginClassName("testjob")); final Props tjobProps = pluginSet.getPluginJobProps("testjob"); final Props tloadProps = pluginSet.getPluginLoaderProps("testjob"); // Loader props assertNull(tloadProps.get("jobtype.classpath")); assertEquals("azkaban.jobtype.FakeJavaJob2", tloadProps.get("jobtype.class")); assertEquals("commonprivate1", tloadProps.get("commonprivate1")); assertEquals("commonprivate2", tloadProps.get("commonprivate2")); assertEquals("private3", tloadProps.get("commonprivate3")); assertEquals("0", tloadProps.get("testprivate")); // Job props assertEquals("commonprop1", tjobProps.get("commonprop1")); assertEquals("commonprop2", tjobProps.get("commonprop2")); assertEquals("1", tjobProps.get("pluginprops1")); assertEquals("2", tjobProps.get("pluginprops2")); assertEquals("3", tjobProps.get("pluginprops3")); assertEquals("pluginprops", tjobProps.get("commonprop3")); // Testing that the private properties aren't shared with the public ones assertNull(tjobProps.get("commonprivate1")); assertNull(tjobProps.get("testprivate")); }
Tests that the proper classes were loaded and that the common and the load properties are properly loaded.
testLoadedClasses
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/jobtype/JobTypeManagerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/jobtype/JobTypeManagerTest.java
Apache-2.0
@Test public void testJobPropsProcessor() throws Exception { final Props jobProps = new Props(); jobProps.put("type", "testjobwithpropsprocessor"); final Job job = this.manager.buildJobExecutor("testjobwithpropsprocessor", jobProps, this.logger); assertTrue(job instanceof FakeJavaJob); final FakeJavaJob fjj = (FakeJavaJob) job; final Props props = fjj.getJobProps(); assertEquals("commonprop1", props.get("commonprop1")); assertEquals("commonprop2", props.get("commonprop2")); assertEquals("commonprop3", props.get("commonprop3")); assertNull(props.get("commonprivate1")); assertEquals(TestJobPropsProcessor.INJECTED_ADDITION_PROP, props.get(TestJobPropsProcessor.INJECTED_ADDITION_PROP)); }
Configure a {@link JobPropsProcessor} for a jobtype plugin and verify the JobPropsProcessor are invoked correctly for jobs of that type.
testJobPropsProcessor
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/jobtype/JobTypeManagerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/jobtype/JobTypeManagerTest.java
Apache-2.0
@Test public void testJobTypeManagerJobSetupWithoutJobComponentDependency() { final JobTypeManager manager = new JobTypeManager(this.testPluginDirPath, null, this.getClass().getClassLoader(), this.clusterRouter, null); Props jobProps = new Props(); jobProps.put("type", "anothertestjob"); jobProps.put("propB", "b"); final JobParams jobParams = manager.createJobParams("anothertestjob", jobProps, LOG); Assert.assertTrue( jobParams.contextClassLoader instanceof HadoopSecurityManagerClassLoader); final Job job = JobTypeManager.createJob("anothertestjob", jobParams, LOG); jobProps = ((FakeJavaJob) job).getJobProps(); final String clusterClassPath = jobProps .getString(CommonJobProperties.TARGET_CLUSTER_CLASSPATH); Assert.assertFalse(clusterClassPath.contains("hive")); Assert.assertTrue(clusterClassPath.contains("hadoop")); final String jvmArgs = jobProps.getString(CommonJobProperties.TARGET_CLUSTER_NATIVE_LIB); Assert.assertFalse(jvmArgs.contains("hive")); Assert.assertTrue(jvmArgs.contains("hadoop")); }
Unit test of JobTypeManager's setup for jobs without 'job.dependency.components' specified.
testJobTypeManagerJobSetupWithoutJobComponentDependency
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/jobtype/JobTypeManagerWithDynamicClusterTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/jobtype/JobTypeManagerWithDynamicClusterTest.java
Apache-2.0
@Test public void testJobTypeManagerJobSetupWithJobComponentDependency() { final JobTypeManager manager = new JobTypeManager(this.testPluginDirPath, null, this.getClass().getClassLoader(), this.clusterRouter, null); Props jobProps = new Props(); jobProps.put("type", "anothertestjob"); jobProps.put("propB", "b"); jobProps.put(CommonJobProperties.JOB_CLUSTER_COMPONENTS_DEPENDENCIES, "hive"); final JobParams jobParams = manager.createJobParams("anothertestjob", jobProps, LOG); Assert.assertTrue( jobParams.contextClassLoader instanceof HadoopSecurityManagerClassLoader); final Job job = JobTypeManager.createJob("anothertestjob", jobParams, LOG); jobProps = ((FakeJavaJob) job).getJobProps(); final String clusterClassPath = jobProps .getString(CommonJobProperties.TARGET_CLUSTER_CLASSPATH); Assert.assertTrue(clusterClassPath.contains("hive")); Assert.assertTrue(clusterClassPath.contains("hadoop")); final String jvmArgs = jobProps.getString(CommonJobProperties.TARGET_CLUSTER_NATIVE_LIB); Assert.assertTrue(jvmArgs.contains("hadoop")); Assert.assertTrue(jvmArgs.contains("hive")); }
Unit test of JobTypeManager's setup for jobs with 'job.dependency.components' specified.
testJobTypeManagerJobSetupWithJobComponentDependency
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/jobtype/JobTypeManagerWithDynamicClusterTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/jobtype/JobTypeManagerWithDynamicClusterTest.java
Apache-2.0
@Test public void testJobRunnerObservesClusterSpecificParams() { final JobTypeManager manager = new JobTypeManager(this.testPluginDirPath, null, this.getClass().getClassLoader(), this.clusterRouter, null); Props jobProps = new Props(); jobProps.put("type", "anothertestjob"); manager.createJobParams("anothertestjob", jobProps, LOG); // verify the jobProps also observe the parameters injected in JobTypeManager jobProps = PropsUtils.resolveProps(jobProps); Assert.assertEquals("valB", jobProps.getString("PropA")); }
Verify that cluster-specific params can be observed by JobRunner through the passed-in parameter, `jobParams` to JobTypeManager.createJobParams() call.
testJobRunnerObservesClusterSpecificParams
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/jobtype/JobTypeManagerWithDynamicClusterTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/jobtype/JobTypeManagerWithDynamicClusterTest.java
Apache-2.0
@Test public void testPluginLoadPropsOverrideByClusterSpecificParams() { final JobTypeManager manager = new JobTypeManager(this.testPluginDirPath, null, this.getClass().getClassLoader(), this.clusterRouter, null); Props jobProps = new Props(); jobProps.put("type", "testjob"); jobProps.put(PIG_HOME, "/user/pig/path"); jobProps.put(CommonJobProperties.JOB_CLUSTER_COMPONENTS_DEPENDENCIES, "hadoop"); final JobParams jobParams = manager.createJobParams("testjob", jobProps, LOG); Assert.assertTrue( jobParams.contextClassLoader instanceof HadoopSecurityManagerClassLoader); final Job job = JobTypeManager.createJob("testjob", jobParams, LOG); Props sysProps = ((FakeJavaJob2) job).getSysProps(); Assert.assertEquals(null, sysProps.get("PropA")); // Create JobTypeManager again but with pluginOverrideProps this time. final String pluginLoadOverrideProps = "PropC,PropB"; final JobTypeManager manager1 = new JobTypeManager(this.testPluginDirPath, null, this.getClass().getClassLoader(), this.clusterRouter, pluginLoadOverrideProps); final JobParams jobParams1 = manager1.createJobParams("testjob", jobProps, LOG); final Job job1 = JobTypeManager.createJob("testjob", jobParams1, LOG); sysProps = ((FakeJavaJob2) job1).getSysProps(); Assert.assertEquals("valC", sysProps.get("PropC")); Assert.assertEquals("valB", sysProps.get("PropB")); }
Verify that overidden plugin load props are indeed overridden.
testPluginLoadPropsOverrideByClusterSpecificParams
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/jobtype/JobTypeManagerWithDynamicClusterTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/jobtype/JobTypeManagerWithDynamicClusterTest.java
Apache-2.0
@Test public void testPluginLoadPropsOverrideByClusterSpecificParamsNegative() { final Props jobProps = new Props(); jobProps.put("type", "testjob"); jobProps.put(PIG_HOME, "/user/pig/path"); jobProps.put(CommonJobProperties.JOB_CLUSTER_COMPONENTS_DEPENDENCIES, "hadoop"); // Create JobTypeManager with pluginOverrideProps, however, add propD which // is not a valid property in cluster props. final String pluginLoadOverrideProps = "PropC,PropD"; final JobTypeManager manager = new JobTypeManager(this.testPluginDirPath, null, this.getClass().getClassLoader(), this.clusterRouter, pluginLoadOverrideProps); final JobParams jobParams = manager.createJobParams("testjob", jobProps, LOG); final Job job = JobTypeManager.createJob("testjob", jobParams, LOG); final Props sysProps = ((FakeJavaJob2) job).getSysProps(); Assert.assertEquals("valC", sysProps.get("PropC")); Assert.assertEquals(null, sysProps.get("PropD")); }
Verify that overidden plugin load props and ignores if a property is missing.
testPluginLoadPropsOverrideByClusterSpecificParamsNegative
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/jobtype/JobTypeManagerWithDynamicClusterTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/jobtype/JobTypeManagerWithDynamicClusterTest.java
Apache-2.0
@Override public Props process(Props jobProps) { jobProps.put(INJECTED_ADDITION_PROP, INJECTED_ADDITION_PROP); return jobProps; }
A test implementation of {@link JobPropsProcessor} that injects a new property into jobs.
process
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/jobtype/TestJobPropsProcessor.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/jobtype/TestJobPropsProcessor.java
Apache-2.0
@Test public void managerStatusTest() { assertNotNull("Singleton Failed to instantiate", this.manager); assertTrue("Failed to enable metric manager", MetricReportManager.isAvailable()); this.manager.disableManager(); assertFalse("Failed to disable metric manager", MetricReportManager.isAvailable()); this.manager.enableManager(); assertTrue("Failed to enable metric manager", MetricReportManager.isAvailable()); }
Test enable disable and status methods
managerStatusTest
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/metric/MetricManagerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/metric/MetricManagerTest.java
Apache-2.0
@Test public void managerMetricMaintenanceTest() { assertEquals("Failed to add metric", this.manager.getAllMetrics().size(), 1); assertTrue("Failed to add metric", this.manager.getAllMetrics().contains(this.metric)); assertEquals("Failed to get metric by Name", this.manager.getMetricFromName("FakeMetric"), this.metric); }
Test adding and accessing metric methods
managerMetricMaintenanceTest
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/metric/MetricManagerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/metric/MetricManagerTest.java
Apache-2.0
@Test public void testParseStartupDependencies() throws Exception { Set<Dependency> finalDependencies = new HashSet<>(); try { // Get a raw json which contains null along with other valid dependencies String rawJson = getRawJSONDepsNullAndABC(); finalDependencies = parseStartupDependencies(rawJson); } catch (NullPointerException npe) { // If NullPointerException is thrown by parseStartupDependencies() it means the // method tried to create a Dependency object without checking for null. // Fail the test since it should always check for null before creating Dependency // object. fail("Received NullPointerException while parsing startup dependencies"); } // The Set of dependencies returned by parseStartupDependencies() should be without null // and same as Set of dependencies created by the test for valid dependencies in the // raw json. assertEquals(finalDependencies, getDepSetABC()); }
Test parsing of startup dependencies functionality. Even if json containing startup dependencies for a project contains a 'null' in the list, parseStartupDependencies should check for it and remove it from the final set of dependencies. @throws Exception
testParseStartupDependencies
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/project/ThinArchiveUtilsTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/project/ThinArchiveUtilsTest.java
Apache-2.0
@Test public void testWriteStartupDependencies() throws Exception { // Pass Set containing null and valid dependencies to writeStartupDependencies() writeStartupDependencies(startupDepsJsonFile, depSetABCAndNull); // Read the file that was written above to check if contains null String rawJson = FileUtils.readFileToString(startupDepsJsonFile); List<Map<String, String>> rawParseResult = ((HashMap<String, List<Map<String, String>>>) JSONUtils.parseJSONFromString(rawJson)) .get("dependencies"); if (rawParseResult == null) { // Fail the test if the file does not contain "dependencies" key fail("Could not find 'dependencies' key in startup-dependencies.json file."); } Set<Dependency> dependenciesFromFile = parseStartupDependencies(startupDepsJsonFile); for (Map<String, String> rawDependency : rawParseResult) { if (rawDependency != null) { dependenciesFromFile.add(new Dependency(rawDependency)); } else { // Fail the test if file contains a null fail("Found a null entry in startup-dependencies.json file."); } } // The Set of dependencies returned by parseStartupDependencies() should be without null // and same as Set of dependencies created by the test for valid dependencies. assertEquals(dependenciesFromFile, getDepSetABC()); }
Test writing of startup dependencies to file. When Set of dependencies containing 'null' along with valid Dependency objects is passed to writeStartupDependencies(), the method should filter out null before writing the dependencies to the file. After writing to file, the test reads the file to check if any null is present. @throws Exception
testWriteStartupDependencies
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/project/ThinArchiveUtilsTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/project/ThinArchiveUtilsTest.java
Apache-2.0