code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
@Test public void testNoValidatorsDir() { final Props props = new Props(this.baseProps); final XmlValidatorManager manager = new XmlValidatorManager(props); assertEquals( "XmlValidatorManager should contain 0 validator when no xml configuration " + "file is present.", manager.getValidatorsInfo().size(), 0); }
Test that no validator directory exists when there is no xml configuration.
testNoValidatorsDir
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/project/validator/XmlValidatorManagerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/project/validator/XmlValidatorManagerTest.java
Apache-2.0
@Test(expected = ValidatorManagerException.class) public void testValidatorDoesNotExist() { final Props props = new Props(this.baseProps); final URL validatorUrl = Resources.getResource("project/testValidators"); final URL configUrl = Resources.getResource("test-conf/azkaban-validators-test1.xml"); props.put(ValidatorConfigs.VALIDATOR_PLUGIN_DIR, validatorUrl.getPath()); props.put(ValidatorConfigs.XML_FILE_PARAM, configUrl.getPath()); new XmlValidatorManager(props); }
Test that if the xml config file specifies a validator classname that does not exist, XmlValidatorManager should throw an exception.
testValidatorDoesNotExist
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/project/validator/XmlValidatorManagerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/project/validator/XmlValidatorManagerTest.java
Apache-2.0
private void compare(SlaOption option1, SlaOption option2) { assertThat(option1.getType()).isEqualTo(option2.getType()); assertThat(option1.getDuration()).isEqualTo(option2.getDuration()); assertThat(option1.getFlowName()).isEqualTo(option2.getFlowName()); assertThat(option1.getJobName()).isEqualTo(option2.getJobName()); assertThat(option1.hasAlert()).isEqualTo(option2.hasAlert()); assertThat(option1.hasKill()).isEqualTo(option2.hasKill()); assertThat(option1.getEmails()).isEqualTo(option2.getEmails()); }
Compare if two {@link SlaOption} are the same
compare
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/sla/SlaOptionTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/sla/SlaOptionTest.java
Apache-2.0
@Test public void periodTimerTest() { // get a new timechecker, start from now, repeat every minute. should // evaluate to false now, and true a minute later. final long baseTimeInMilliSeconds = 1000; final ReadablePeriod period = TimeUtils.parsePeriodString("10s"); DateTimeUtils.setCurrentMillisFixed(baseTimeInMilliSeconds); final BasicTimeChecker timeChecker = new BasicTimeChecker("BasicTimeChecker_1", baseTimeInMilliSeconds, DateTimeZone.UTC, true, true, period, null); final Condition cond = getCondition(timeChecker); assertFalse(cond.isMet()); DateTimeUtils.setCurrentMillisFixed(baseTimeInMilliSeconds + 11 * 1000); assertTrue(cond.isMet()); cond.resetCheckers(); assertFalse(cond.isMet()); DateTimeUtils.setCurrentMillisFixed(baseTimeInMilliSeconds + 22 * 1000); assertTrue(cond.isMet()); DateTimeUtils.setCurrentMillisSystem(); }
This test manipulates global states (time) in org.joda.time.DateTimeUtils . Thus this test can run in parallel with tests that do the same.
periodTimerTest
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/trigger/builtin/BasicTimeCheckerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/trigger/builtin/BasicTimeCheckerTest.java
Apache-2.0
@Test public void testPSTtoPDTunderUTC() { // Use a Date that is before the date in cron expression (i.e. Before March 8 2020) final DateTime pastTime = new DateTime(2020, 1, 1, 00, 00, 0, DateTimeZone.UTC); // Cron expression for 10:30 am UTC on March 8 2020 final String cronExpression = "0 30 10 8 3 ? 2020"; final BasicTimeChecker timeChecker = new BasicTimeChecker("BasicTimeChecker_1", pastTime.getMillis(), DateTimeZone.UTC, true, true, null, cronExpression); System.out.println("getNextCheckTime = " + timeChecker.getNextCheckTime()); final Condition cond = getCondition(timeChecker); final DateTime spring2020UTC = new DateTime(2020, 3, 8, 10, 30, 0, DateTimeZone.UTC); final DateTime spring2020PDT = new DateTime(2020, 3, 8, 3, 30, 0, DateTimeZone.forID("America/Los_Angeles")); assertTrue(cond.getNextCheckTime() == spring2020UTC.getMillis()); assertTrue(cond.getNextCheckTime() == spring2020PDT.getMillis()); }
Test when PST-->PDT happens in 2020. -8:00 -> -7:00 See details why confusion happens during this change: https://en.wikipedia.org/wiki/Pacific_Time_Zone This test demonstrates that if the cron is under UTC settings, When daylight saving change occurs, 2:30 will be changed to 3:30 at that day.
testPSTtoPDTunderUTC
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/trigger/builtin/BasicTimeCheckerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/trigger/builtin/BasicTimeCheckerTest.java
Apache-2.0
@Test public void testPSTtoPDTdst2() { // Use a Date that is before the date in cron expression (i.e. Before March 8 2020) final DateTime pastTime = new DateTime(2020, 1, 1, 00, 00, 0, DateTimeZone.UTC); final String cronExpression = "0 30 2 8,9 3 ? 2020"; final BasicTimeChecker timeChecker = new BasicTimeChecker("BasicTimeChecker_1", pastTime.getMillis(), DateTimeZone.forID("America/Los_Angeles"), true, true, null, cronExpression); System.out.println("getNextCheckTime = " + timeChecker.getNextCheckTime()); final Condition cond = getCondition(timeChecker); final DateTime aTime = new DateTime(2020, 3, 9, 2, 30, 0, DateTimeZone.forID("America/Los_Angeles")); assertTrue(cond.getNextCheckTime() == aTime.getMillis()); }
Test when PST-->PDT happens in 2020. -8:00 -> -7:00 See details why confusion happens during this change: https://en.wikipedia.org/wiki/Pacific_Time_Zone This test demonstrates that 2:30 AM will not happen during the daylight saving day on Cron settings under PDT/PST. Since we let the cron triggered both at March 8th, and 9th, it will execute at March 9th.
testPSTtoPDTdst2
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/trigger/builtin/BasicTimeCheckerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/trigger/builtin/BasicTimeCheckerTest.java
Apache-2.0
@Test public void testPDTtoPSTdst1() { // Use a Date that is before the date in cron expression (i.e. Before Nov 1 2020) final DateTime pastTime = new DateTime(2020, 1, 1, 00, 00, 0, DateTimeZone.UTC); // 9:00 UTC == 1:00 PST (difference is 8 hours) final String cronExpression = "0 0 1 4,5 11 ? 2029"; final BasicTimeChecker timeChecker = new BasicTimeChecker("BasicTimeChecker_1", pastTime.getMillis(), DateTimeZone.forID("America/Los_Angeles"), true, true, null, cronExpression); System.out.println("getNextCheckTime = " + timeChecker.getNextCheckTime()); final Condition cond = getCondition(timeChecker); final DateTime winter2020 = new DateTime(2029, 11, 4, 9, 0, 0, DateTimeZone.UTC); final DateTime winter2020_2 = new DateTime(2029, 11, 4, 1, 0, 0, DateTimeZone.forID("America/Los_Angeles")); final DateTime winter2020_3 = new DateTime(2029, 11, 4, 2, 0, 0, DateTimeZone.forID("America/Los_Angeles")); assertTrue(cond.getNextCheckTime() == winter2020.getMillis()); // Both 1 and 2 o'clock can not pass the test. Based on milliseconds we got, // winter2020_2.getMillis() == 11/1/2020, 1:00:00 AM GMT-7:00 DST // winter2020_3.getMillis() == 11/1/2020, 2:00:00 AM GMT-8:00 // Both time doesn't match the second 1:00 AM assertFalse(cond.getNextCheckTime() == winter2020_2.getMillis()); assertFalse(cond.getNextCheckTime() == winter2020_3.getMillis()); }
Test when PDT-->PST happens in 2020. -7:00 -> -8:00 See details why confusion happens during this change: https://en.wikipedia.org/wiki/Pacific_Time_Zone This test cronDayLightPacificWinter1 is in order to compare against the cronDayLightPacificWinter2. In this Test, we let job run at 1:00 at Nov.1st, 2020. We know that we will have two 1:00 at that day. The test shows that the first 1:00 is skipped at that day. Schedule will still be executed once on that day.
testPDTtoPSTdst1
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/trigger/builtin/BasicTimeCheckerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/trigger/builtin/BasicTimeCheckerTest.java
Apache-2.0
@Test public void testPDTtoPSTdst2() { // Use a Date that is before the date in cron expression (i.e. Before Nov 1 2020) final DateTime pastTime = new DateTime(2020, 1, 1, 00, 00, 0, DateTimeZone.UTC); // 7:59 UTC == 0:59 PDT (difference is 7 hours) final String cronExpression = "0 59 0 4,5 11 ? 2029"; final BasicTimeChecker timeChecker = new BasicTimeChecker("BasicTimeChecker_1", pastTime.getMillis(), DateTimeZone.forID("America/Los_Angeles"), true, true, null, cronExpression); System.out.println("getNextCheckTime = " + timeChecker.getNextCheckTime()); final Condition cond = getCondition(timeChecker); // 7:59 UTC == 0:59 PDT (difference is 7 hours) final DateTime winter2020 = new DateTime(2029, 11, 4, 7, 59, 0, DateTimeZone.UTC); final DateTime winter2020_2 = new DateTime(2029, 11, 4, 0, 59, 0, DateTimeZone.forID("America/Los_Angeles")); // Local time remains the same. assertTrue(cond.getNextCheckTime() == winter2020.getMillis()); assertTrue(cond.getNextCheckTime() == winter2020_2.getMillis()); }
Test when PDT-->PST happens in 2020. -7:00 -> -8:00 See details why confusion happens during this change: https://en.wikipedia.org/wiki/Pacific_Time_Zone This test cronDayLightPacificWinter2 is in order to be compared against the cronDayLightPacificWinter1. In this Test, we let job run at 0:59 at Nov.1st, 2020. it shows that it is 7:59 UTC The test shows 7:59 UTC jump to 9:00 UTC.
testPDTtoPSTdst2
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/trigger/builtin/BasicTimeCheckerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/trigger/builtin/BasicTimeCheckerTest.java
Apache-2.0
@Test public void testUploadProjectFlag() throws Exception { final Permission permission = new Permission(); permission.addPermission(new Type[]{Type.UPLOADPROJECTS}); final int flags = permission.toFlags(); final Permission permission2 = new Permission(flags); assertThat(permission2.isPermissionSet(Type.UPLOADPROJECTS)).isTrue(); assertThat(permission.equals(permission2)).isTrue(); permission.removePermissions(new Type[]{Type.UPLOADPROJECTS}); final Type[] allPermissions = new Type[]{ Type.READ, Type.WRITE, Type.EXECUTE, Type.METRICS, Type.SCHEDULE, Type.CREATEPROJECTS }; permission.addPermission(allPermissions); assertThat(permission.isPermissionSet(Type.UPLOADPROJECTS)).isFalse(); }
Verify that the binary bit for UPLOADPROJECTS is not turned on by setting the other permissions.
testUploadProjectFlag
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/user/PermissionTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/user/PermissionTest.java
Apache-2.0
@Test public void testFilePropNotSet() throws Exception { final Props props = new Props(this.baseProps); // Should throw try { final XmlUserManager manager = new XmlUserManager(props, () -> this.fileWatcher); } catch (final UndefinedPropertyException e) { return; } fail("XmlUserManager should throw an exception when the file property isn't set"); }
Testing for when the xml path isn't set in properties.
testFilePropNotSet
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/user/XmlUserManagerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/user/XmlUserManagerTest.java
Apache-2.0
@Test public void testAutoReload() throws Exception { final Props props = new Props(this.baseProps); final Path filePath = getFilePath("testAutoReload"); props.put(XmlUserManager.XML_FILE_PARAM, filePath.toString()); final CountDownLatch managerLoaded = setupMocks(filePath); final UserManager manager = new XmlUserManager(props, () -> this.fileWatcher); // Get the user8 from existing XML with password == password8 User user8 = manager.getUser("user8", "password8"); // Modify the password for user8 // TODO : djaiswal : Find a better way to modify XML final List<String> lines = new ArrayList<>(); for (final String line : Files.readAllLines(filePath)) { if (line.contains("password8")) { lines.add(line.replace("password8", "passwordModified")); } else { lines.add(line); } } // Make sure the file gets reverted back. // Update the file Files.write(filePath, lines); managerLoaded.countDown(); // Wait until login fails with the old password Awaitility.await().atMost(60L, TimeUnit.SECONDS). pollInterval(100L, TimeUnit.MILLISECONDS).until( () -> { User user; try { user = manager.getUser("user8", "password8"); } catch (final UserManagerException e) { user = null; } return user == null; }); // Assert that login succeeds with the modified password user8 = manager.getUser("user8", "passwordModified"); assertEquals("user8", user8.getUserId()); Mockito.verify(this.fileWatcher, Mockito.timeout(10_000L).atLeast(3)).take(); }
Test auto reload of user XML
testAutoReload
java
azkaban/azkaban
azkaban-common/src/test/java/azkaban/user/XmlUserManagerTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/test/java/azkaban/user/XmlUserManagerTest.java
Apache-2.0
public <T> T query(final String baseQuery, final ResultSetHandler<T> resultHandler, final Object... params) throws SQLException { try { return this.queryRunner.query(baseQuery, resultHandler, params); } catch (final SQLException ex) { // todo kunkun-tang: Retry logics should be implemented here. logger.error("query failed", ex); if (this.dbMetrics != null) { this.dbMetrics.markDBFailQuery(); } throw ex; } }
Executes the given Azkaban related SELECT SQL operations. it will call {@link AzkabanDataSource#getConnection()} inside queryrunner.query. @param baseQuery The SQL query statement to execute. @param resultHandler The handler used to create the result object @param params Initialize the PreparedStatement's IN parameters @param <T> The type of object that the qeury handler returns @return The object returned by the handler.
query
java
azkaban/azkaban
azkaban-db/src/main/java/azkaban/db/DatabaseOperator.java
https://github.com/azkaban/azkaban/blob/master/azkaban-db/src/main/java/azkaban/db/DatabaseOperator.java
Apache-2.0
public int update(final String updateClause, final Object... params) throws SQLException { int retryCount = 0; SQLException exception; String errorMsg = "Update failed: Reached maximum number of retries: " + AzDBUtil.MAX_RETRIES_ON_DEADLOCK; do { try { return this.queryRunner.update(updateClause, params); } catch (final SQLException ex) { exception = ex; if (this.queryRunner.getDataSource() instanceof MySQLDataSource && ex.getErrorCode() == MySQLDataSource.MYSQL_ER_LOCK_DEADLOCK) { retryCount++; logger.warn("Deadlock detected when trying to execute: " + updateClause + " with values: " + Arrays.toString(params)); try { Thread.sleep(AzDBUtil.RETRY_WAIT_TIME); } catch (final InterruptedException e) { logger.info("Sleep during DB operation retry interrupted."); } } else { errorMsg = "update failed"; break; } } } while (retryCount < AzDBUtil.MAX_RETRIES_ON_DEADLOCK); logger.error(errorMsg, exception); if (this.dbMetrics != null) { this.dbMetrics.markDBFailUpdate(); } throw exception; }
Executes the given AZ related INSERT, UPDATE, or DELETE SQL statement. it will call {@link AzkabanDataSource#getConnection()} inside queryrunner.update. @param updateClause sql statements to execute @param params Initialize the PreparedStatement's IN parameters @return The number of rows updated.
update
java
azkaban/azkaban
azkaban-db/src/main/java/azkaban/db/DatabaseOperator.java
https://github.com/azkaban/azkaban/blob/master/azkaban-db/src/main/java/azkaban/db/DatabaseOperator.java
Apache-2.0
public int[] batch(final String sqlCommand, final Object[]... params) throws SQLException { try { return this.queryRunner.batch(sqlCommand, params); } catch (final SQLException ex) { logger.error("batch operation failed", ex); if (this.dbMetrics != null) { this.dbMetrics.markDBFailUpdate(); } throw ex; } }
Execute a batch operation @param sqlCommand sqlCommand template @param params parameters @return result
batch
java
azkaban/azkaban
azkaban-db/src/main/java/azkaban/db/DatabaseOperator.java
https://github.com/azkaban/azkaban/blob/master/azkaban-db/src/main/java/azkaban/db/DatabaseOperator.java
Apache-2.0
public AzkabanDataSource getDataSource() { return (AzkabanDataSource) this.queryRunner.getDataSource(); }
@return datasource wrapped in the database operator.
getDataSource
java
azkaban/azkaban
azkaban-db/src/main/java/azkaban/db/DatabaseOperator.java
https://github.com/azkaban/azkaban/blob/master/azkaban-db/src/main/java/azkaban/db/DatabaseOperator.java
Apache-2.0
public long getLastInsertId() throws SQLException { // A default connection: autocommit = true. long num = -1; try { num = ((Number) this.queryRunner .query(this.conn, "SELECT LAST_INSERT_ID();", new ScalarHandler<>(1))) .longValue(); } catch (final SQLException ex) { logger.error("can not get last insertion ID"); throw ex; } return num; }
returns the last id from a previous insert statement. Note that last insert and this operation should use the same connection. @return the last inserted id in mysql per connection.
getLastInsertId
java
azkaban/azkaban
azkaban-db/src/main/java/azkaban/db/DatabaseTransOperator.java
https://github.com/azkaban/azkaban/blob/master/azkaban-db/src/main/java/azkaban/db/DatabaseTransOperator.java
Apache-2.0
public <T> T query(final String querySql, final ResultSetHandler<T> resultHandler, final Object... params) throws SQLException { try { return this.queryRunner.query(this.conn, querySql, resultHandler, params); } catch (final SQLException ex) { //RETRY Logic should be implemented here if needed. throw ex; } finally { // Note: CAN NOT CLOSE CONNECTION HERE. } }
@param querySql @param resultHandler @param params @param <T> @return @throws SQLException
query
java
azkaban/azkaban
azkaban-db/src/main/java/azkaban/db/DatabaseTransOperator.java
https://github.com/azkaban/azkaban/blob/master/azkaban-db/src/main/java/azkaban/db/DatabaseTransOperator.java
Apache-2.0
public int update(final String updateClause, final Object... params) throws SQLException { try { return this.queryRunner.update(this.conn, updateClause, params); } catch (final SQLException ex) { //RETRY Logic should be implemented here if needed. throw ex; } finally { // Note: CAN NOT CLOSE CONNECTION HERE. } }
@param updateClause @param params @return @throws SQLException
update
java
azkaban/azkaban
azkaban-db/src/main/java/azkaban/db/DatabaseTransOperator.java
https://github.com/azkaban/azkaban/blob/master/azkaban-db/src/main/java/azkaban/db/DatabaseTransOperator.java
Apache-2.0
public Connection getConnection() { return this.conn; }
@return the JDBC connection associated with this operator.
getConnection
java
azkaban/azkaban
azkaban-db/src/main/java/azkaban/db/DatabaseTransOperator.java
https://github.com/azkaban/azkaban/blob/master/azkaban-db/src/main/java/azkaban/db/DatabaseTransOperator.java
Apache-2.0
void markDBConnection() { /* * This method should be Thread Safe. * Two reasons that we don't make this function call synchronized: * 1). drop wizard metrics deals with concurrency internally; * 2). mark is basically a math addition operation, which should not cause race condition issue. */ this.dbConnectionMeter.mark(); }
Mark the occurrence of an DB query event.
markDBConnection
java
azkaban/azkaban
azkaban-db/src/main/java/azkaban/db/DBMetrics.java
https://github.com/azkaban/azkaban/blob/master/azkaban-db/src/main/java/azkaban/db/DBMetrics.java
Apache-2.0
@Override public Connection getConnection() throws SQLException { this.dbMetrics.markDBConnection(); final long startMs = System.currentTimeMillis(); Connection connection = null; int retryAttempt = 1; while (retryAttempt < AzDBUtil.MAX_DB_RETRY_COUNT) { try { /** * when DB connection could not be fetched (e.g., network issue), or connection can not be validated, * {@link BasicDataSource} throws a SQL Exception. {@link BasicDataSource#dataSource} will be reset to null. * createDataSource() will create a new dataSource. * Every Attempt generates a thread-hanging-time, about 75 seconds, which is hard coded, and can not be changed. */ connection = createDataSource().getConnection(); /** * If connection is null or connection is read only, retry to find available connection. * When DB fails over from master to slave, master is set to read-only mode. We must keep * finding correct data source and sql connection. */ if (connection == null || isReadOnly(connection)) { throw new SQLException("Failed to find DB connection Or connection is read only. "); } else { // Evalaute how long it takes to get DB Connection. this.dbMetrics.setDBConnectionTime(System.currentTimeMillis() - startMs); return connection; } } catch (final SQLException ex) { /** * invalidate connection and reconstruct it later. if remote IP address is not reachable, * it will get hang for a while and throw exception. */ this.dbMetrics.markDBFailConnection(); try { invalidateConnection(connection); } catch (final Exception e) { logger.error("can not invalidate connection.", e); } logger.error("Failed to find write-enabled DB connection. Wait 15 seconds and retry." + " No.Attempt = " + retryAttempt, ex); /** * When database is completed down, DB connection fails to be fetched immediately. So we need * to sleep 15 seconds for retry. */ sleep(1000L * 15); retryAttempt++; } } return connection; }
This method overrides {@link BasicDataSource#getConnection()}, in order to have retry logics. We don't make the call synchronized in order to guarantee normal cases performance.
getConnection
java
azkaban/azkaban
azkaban-db/src/main/java/azkaban/db/MySQLDataSource.java
https://github.com/azkaban/azkaban/blob/master/azkaban-db/src/main/java/azkaban/db/MySQLDataSource.java
Apache-2.0
public static void writeJSON(final HttpServletResponse resp, final Object obj) throws IOException { resp.setContentType(JSON_MIME_TYPE); final ObjectMapper mapper = new ObjectMapper(); final OutputStream stream = resp.getOutputStream(); mapper.writeValue(stream, obj); }
Write an Object to HttpResponse in Json format. @param resp @param obj @throws IOException
writeJSON
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/common/ServletUtils.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/common/ServletUtils.java
Apache-2.0
public static Path getCurrentDir() { // AZ_HOME must provide a correct path, if not then azHome is set to current working dir. final String azHome = Optional.ofNullable(System.getenv(Constants.AZ_HOME)).orElse(""); return Paths.get(azHome).toAbsolutePath(); }
FlowPreparer implementation for containerized execution. It creates the PROJECT_DIR and downloads and unzips the project zip in it thus READYing it for execution.
getCurrentDir
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/container/ContainerizedFlowPreparer.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/container/ContainerizedFlowPreparer.java
Apache-2.0
@VisibleForTesting void setResourceUtilization() { final FlowRunnerProxy flowRunnerProxy = this.flowRunner.getProxy(); final String cpuRequest = System .getenv(Constants.ContainerizedDispatchManagerProperties.ENV_CPU_REQUEST); final String memoryRequest = System .getenv(Constants.ContainerizedDispatchManagerProperties.ENV_MEMORY_REQUEST); // cpuRequest and memoryRequest are converted to Quantity object to get the parsed value from // the human readable string. Example: // 1KiB will be converted to 1024 bytes // 500m will be converted to 0.500 if (null != cpuRequest) { final Quantity cpuRequestQuantity = new Quantity(cpuRequest); flowRunnerProxy.setCpuUtilization(cpuRequestQuantity.getNumber().doubleValue()); } if (null != memoryRequest) { final Quantity memoryRequestQuantity = new Quantity(memoryRequest); try { flowRunnerProxy.setMemoryUtilization(memoryRequestQuantity.getNumber().longValueExact()); } catch (ArithmeticException e) { logger.info("Unable to set Memory Utilization", e); } } }
This method reads the CPU and MEMORY REQUEST ENV variables. If they are present then they will be used to set the Resource Utilization in FlowRunner object via FlowRunnerProxy.
setResourceUtilization
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/container/FlowContainer.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/container/FlowContainer.java
Apache-2.0
private void createFlowRunner(final ExecutableFlow flow) throws ExecutorManagerException { // Prepare the flow with project dependencies. this.flowPreparer.setup(flow); // Setup flow watcher FlowWatcher watcher = null; final ExecutionOptions options = flow.getExecutionOptions(); if (options.getPipelineExecutionId() != null) { final int pipelinedExecId = options.getPipelineExecutionId(); watcher = new RemoteFlowWatcher(pipelinedExecId, this.executorLoader); } int numJobThreads = this.numJobThreadPerFlow; if (options.getFlowParameters().containsKey(FLOW_NUM_JOB_THREADS)) { try { if (!ProjectWhitelist.isXmlFileLoaded()) { ProjectWhitelist.load(azkabanProps); } final int numJobs = Integer.valueOf(options.getFlowParameters().get( FLOW_NUM_JOB_THREADS)); logger.info("Num of job threads read from flow parameter is " + numJobs); if (numJobs > 0 && (numJobs <= numJobThreads || ProjectWhitelist .isProjectWhitelisted(flow.getProjectId(), WhitelistType.NumJobPerFlow))) { numJobThreads = numJobs; } } catch (final Exception e) { throw new ExecutorManagerException( "Failed to set the number of job threads " + options.getFlowParameters().get(FLOW_NUM_JOB_THREADS) + " for flow " + flow.getExecutionId(), e); } } // TODO : figure out the metrics // Create the FlowRunner final MetricsManager metricsManager = new MetricsManager(new MetricRegistry()); final CommonMetrics commonMetrics = new CommonMetrics(metricsManager); final ExecMetrics execMetrics = new ExecMetrics(metricsManager); this.flowRunner = new FlowRunner(flow, this.executorLoader, this.executionLogsLoader, this.projectLoader, this.jobTypeManager, this.azkabanProps, this.eventReporter, this.alerterHolder, commonMetrics, execMetrics); this.flowRunner.setFlowWatcher(watcher) .setJobLogSettings(this.jobLogChunkSize, this.jobLogNumFiles) .setValidateProxyUser(this.validateProxyUser) .setNumJobThreads(numJobThreads) .addListener(this); }
Create Flow Runner and setup the flow execution directory with project dependencies. @param flow Executable flow object. @return FlowRunner object. @throws ExecutorManagerException
createFlowRunner
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/container/FlowContainer.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/container/FlowContainer.java
Apache-2.0
private void setupKeyStore() throws ExecutorManagerException { // Fetch keyStore props and use it to get the KeyStore, put it in JobTypeManager final Props commonPluginLoadProps = this.jobTypeManager.getCommonPluginLoadProps(); if (commonPluginLoadProps != null) { // Load HadoopSecurityManager HadoopSecurityManager hadoopSecurityManager = null; try { final String hadoopSecurityClassName = commonPluginLoadProps.getString(HadoopJobUtils.HADOOP_SECURITY_MANAGER_CLASS_PARAM); final Class<?> hadoopSecurityManagerClass = HadoopProxy.class.getClassLoader().loadClass(hadoopSecurityClassName); logger.info("Loading hadoop security manager " + hadoopSecurityManagerClass.getName()); hadoopSecurityManager = (HadoopSecurityManager) Utils.callConstructor(hadoopSecurityManagerClass, commonPluginLoadProps); } catch (final Exception e) { logger.error("Could not instantiate Hadoop Security Manager ", e); throw new RuntimeException("Failed to get hadoop security manager!" + e.getCause(), e); } if (commonPluginLoadProps.getBoolean("use.polp.keystores", false)){ final Map<String, KeyStore> keyStoreMap = hadoopSecurityManager.getKeyStoreMap(commonPluginLoadProps); if (keyStoreMap == null) { logger.error("Failed to Prefetch KeyStore Map of Proxy Users"); throw new ExecutorManagerException("Failed to Prefetch KeyStore Map of Proxy Users"); } } else { final KeyStore keyStore = hadoopSecurityManager.getKeyStore(commonPluginLoadProps); if (keyStore == null) { logger.error("Failed to Prefetch KeyStore"); throw new ExecutorManagerException("Failed to Prefetch KeyStore"); } } logger.info("In-memory Keystore is setup, delete the cert file"); // Delete the cert file from disk as the KeyStore is already cached above. final Path certFilePath = Paths.get(this.azkabanProps.get( Constants.ConfigurationKeys.CSR_KEYSTORE_LOCATION)); deleteSymlinkedFile(certFilePath); } }
Setup in-memory keystore to be reused for all the job executions in the flow. @throws ExecutorManagerException
setupKeyStore
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/container/FlowContainer.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/container/FlowContainer.java
Apache-2.0
public LogData readFlowLogs(final int execId, final int startByte, final int length) throws ExecutorManagerException { logger.info("readFlowLogs called"); if (this.flowRunner == null) { logger.warn(String.format("Attempt to read flow logs before flow execId: %d got a chance to start", execId)); throw new ExecutorManagerException("The flow has not launched yet!"); } final File dir = this.flowRunner.getExecutionDir(); if (dir == null || !dir.exists()) { logger.warn(String.format("Error reading file. Execution directory does not exist for flow execId: %d", execId)); throw new ExecutorManagerException("Error reading file. Execution directory does not exist"); } try { if (this.logFile.exists()) { return FileIOUtils.readUtf8File(this.logFile, startByte, length); } else { logger.warn(String.format("Flow log file does not exist for flow execId: %d", execId)); throw new ExecutorManagerException("Flow log file does not exist."); } } catch (final IOException e) { logger.error(String.format("IOException while trying to read flow log file for flow execId: %d", execId)); throw new ExecutorManagerException(e); } }
Return accumulated flow logs with the specified length from the flow container starting from the given byte offset. @param execId @param startByte @param length @return @throws ExecutorManagerException
readFlowLogs
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/container/FlowContainer.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/container/FlowContainer.java
Apache-2.0
public LogData readJobLogs(final int execId, final String jobId, final int attempt, final int startByte, final int length) throws ExecutorManagerException { logger.info("readJobLogs called"); if (this.flowRunner == null) { logger.warn(String.format("Attempt to read job logs before flow got a chance to start. " + "Flow execId: %d, jobId: %s", execId, jobId)); throw new ExecutorManagerException("The flow has not launched yet!"); } final File dir = this.flowRunner.getExecutionDir(); if (dir == null || !dir.exists()) { logger.warn(String.format("Error reading jobLogs. Execution dir does not exist. execId: %d, jobId: %s", execId, jobId)); throw new ExecutorManagerException( "Error reading file. Execution directory does not exist."); } try { final File logFile = this.flowRunner.getJobLogFile(jobId, attempt); if (logFile != null && logFile.exists()) { return FileIOUtils.readUtf8File(logFile, startByte, length); } else { logger.warn(String.format("Job log file does not exist. Flow execId: %d, jobId: %s", execId, jobId)); throw new ExecutorManagerException("Job log file does not exist."); } } catch (final IOException e) { logger.error(String.format("IOException while trying to read Job logs. execId: %d, jobId: %s", execId, jobId)); throw new ExecutorManagerException(e); } }
Return accumulated job logs for a specific job starting with the provided byte offset. @param execId @param jobId @param attempt @param startByte @param length @return @throws ExecutorManagerException
readJobLogs
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/container/FlowContainer.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/container/FlowContainer.java
Apache-2.0
private void logVersionSet(final ExecutableFlow flow) { final VersionSet versionSet = flow.getVersionSet(); if (versionSet == null) { // Should not happen. logger.error("VersionSet is not set for the flow"); } else { logger.info("VersionSet: " + ServerUtils.getVersionSetJsonString(versionSet)); } }
Log the versionSet for this flow execution @param flow Executable flow.
logVersionSet
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/container/FlowContainer.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/container/FlowContainer.java
Apache-2.0
private void logVPAEnabled(final ExecutableFlow flow) { if (flow.isVPAEnabled()) { logger.info(String.format("This flow execution pod %s is autoscaled by Azkaban. If this " + "execution ends with Out-Of-Memory Killed, please reach out to Azkaban team for " + "help.", flow.getExecutionId())); } }
Log if this flow execution pod is autoscaled by VPA @param flow Executable flow.
logVPAEnabled
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/container/FlowContainer.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/container/FlowContainer.java
Apache-2.0
public static void deleteSymlinkedFile(final Path symlinkedFilePath) throws ExecutorManagerException { if (Files.isSymbolicLink(symlinkedFilePath)) { Path filePath = null; try { filePath = Files.readSymbolicLink(symlinkedFilePath); } catch (final IOException e) { logger.error(String.format("Error reading symlink %s", symlinkedFilePath), e); throw new ExecutorManagerException(e); } // Delete the symlink and then delete the symlinked file deleteSymlinkedFile(filePath); } // Delete the file, it could be a symlink try { Files.delete(symlinkedFilePath); } catch (final IOException e) { logger.error(String.format("Error deleting : %s", symlinkedFilePath), e); throw new ExecutorManagerException(e); } }
Deletes all the symlinks and targeted files recursively. @param symlinkedFilePath Path to file, could be a symlink @throws ExecutorManagerException
deleteSymlinkedFile
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/container/FlowContainer.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/container/FlowContainer.java
Apache-2.0
@VisibleForTesting void shutdown() { logger.info("Shutting down the container"); if (this.flowRunner != null) { while (!this.flowFuture.isDone()) { // This should not happen immediately as submitFlowRunner is a blocking call. try { Thread.sleep(100); } catch (final InterruptedException e) { logger.error(String.format("The sleep while waiting for execution : %d to finish was " + "interrupted", this.execId)); } } } else { logger.warn("Flowrunner is null, the flow execution never started!"); } boolean result = false; try { this.executorService.shutdown(); // Wait upto 10 seconds for clean shutdown, otherwise, System.exit() will wipe out // everything logger.info("Awaiting Shutdown of executing flow"); result = this.executorService.awaitTermination( SHUTDOWN_TIMEOUT_IN_SECONDS, TimeUnit.SECONDS); } catch (final InterruptedException e) { logger.error(e.getMessage()); } if (!result) { logger.warn("ExecutorService did not shut down cleanly yet. Ignoring it."); } try { this.jettyServer.stop(); this.jettyServer.destroy(); } catch (final Exception e) { // Eat up the exception logger.error("Error shutting down JettyServer while winding down the FlowContainer", e); } logger.info("Sayonara!"); closeLogger(); System.exit(0); }
Shutdown the Container. This shuts down the ExecutorService which runs the flow execution as well as JettyServer.
shutdown
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/container/FlowContainer.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/container/FlowContainer.java
Apache-2.0
void updateDagStatus() { // A dag may have nodes that are disabled. It's safer to scan all the nodes. // Assume the overhead is minimal. If it is not the case, we can optimize later. boolean failed = false; for (final Node node : this.nodes) { final Status nodeStatus = node.getStatus(); if (!nodeStatus.isTerminal()) { return; } if (nodeStatus == Status.FAILURE) { failed = true; } } // Update the dag status only after all nodes have reached terminal states. updateDagStatusInternal(failed); }
Update the final dag status when all nodes are done. <p>If any node has not reached its terminal state, this method will simply return.
updateDagStatus
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/dag/Dag.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/dag/Dag.java
Apache-2.0
public Node createNode(final String name, final NodeProcessor nodeProcessor) { checkIsBuilt(); if (this.nameToNodeMap.get(name) != null) { throw new DagException(String.format("Node names in %s need to be unique. The name " + "(%s) already exists.", this, name)); } final Node node = new Node(name, nodeProcessor, this.dag); this.nameToNodeMap.put(name, node); return node; }
Creates a new node and adds it to the DagBuilder. @param name name of the node @param nodeProcessor node processor associated with this node @return a new node @throws DagException if the name is not unique in the DAG.
createNode
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/dag/DagBuilder.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/dag/DagBuilder.java
Apache-2.0
private void checkIsBuilt() { if (this.isBuilt) { final String msg = String .format("The DAG (%s) is built already. Can't create new nodes.", this); throw new DagException(msg); } }
Throws an exception if the {@link DagBuilder#build()} method has been called.
checkIsBuilt
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/dag/DagBuilder.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/dag/DagBuilder.java
Apache-2.0
public void addParentNode(final String childNodeName, final String parentNodeName) { checkIsBuilt(); final Node child = this.nameToNodeMap.get(childNodeName); if (child == null) { throw new DagException(String.format("Unknown child node (%s). Did you create the node?", childNodeName)); } final Node parent = this.nameToNodeMap.get(parentNodeName); if (parent == null) { throw new DagException( String.format("Unknown parent node (%s). Did you create the node?", parentNodeName)); } child.addParent(parent); }
Add a parent node to a child node. All the names should have been registered with this builder with the {@link DagBuilder#createNode(String, NodeProcessor)} call. @param childNodeName name of the child node @param parentNodeName name of the parent node
addParentNode
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/dag/DagBuilder.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/dag/DagBuilder.java
Apache-2.0
public Dag build() { checkIsBuilt(); checkCircularDependencies(); this.isBuilt = true; return this.dag; }
Builds the dag. <p>Once this method is called, subsequent calls via NodeBuilder to modify the nodes's relationships in the dag will have no effect on the returned Dag object. </p> @return the Dag reflecting the current state of the DagBuilder
build
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/dag/DagBuilder.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/dag/DagBuilder.java
Apache-2.0
private void checkCircularDependencies() { class CircularDependencyChecker { // The nodes that need to be visited private final Set<Node> toVisit = new HashSet<>(DagBuilder.this.nameToNodeMap.values()); // The nodes that have finished traversing all their parent nodes private final Set<Node> finished = new HashSet<>(); // The nodes that are waiting for their parent nodes to finish visit. private final Set<Node> ongoing = new HashSet<>(); // One sample of nodes that form a circular dependency private final List<Node> sampleCircularNodes = new ArrayList<>(); /** * Checks if the builder contains nodes that form a circular dependency ring. * * @throws DagException if true */ private void check() { while (!this.toVisit.isEmpty()) { final Node node = removeOneNodeFromToVisitSet(); if (checkNode(node)) { final String msg = String.format("Circular dependency detected. Sample: %s", this.sampleCircularNodes); throw new DagException(msg); } } } /** * Removes one node from the toVisit set and returns that node. * * @return a node */ private Node removeOneNodeFromToVisitSet() { final Iterator<Node> iterator = this.toVisit.iterator(); final Node node = iterator.next(); iterator.remove(); return node; } /** * Checks if the node is part of a group of nodes that form a circular dependency ring. * * <p>If true, the node will be added to the sampleCircularNodes list</p> * * @param node node to check * @return true if it is */ private boolean checkNode(final Node node) { if (this.finished.contains(node)) { return false; } if (this.ongoing.contains(node)) { this.sampleCircularNodes.add(node); return true; } this.toVisit.remove(node); this.ongoing.add(node); for (final Node parent : node.getParents()) { if (checkNode(parent)) { this.sampleCircularNodes.add(node); return true; } } this.ongoing.remove(node); this.finished.add(node); return false; } } final CircularDependencyChecker checker = new CircularDependencyChecker(); checker.check(); }
Checks if the builder contains nodes that form a circular dependency ring. <p>The depth first algorithm is described in this article <a href="https://en.wikipedia.org/wiki/Topological_sorting">https://en.wikipedia.org/wiki/Topological_sorting</a> </p> @throws DagException if true
checkCircularDependencies
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/dag/DagBuilder.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/dag/DagBuilder.java
Apache-2.0
private void check() { while (!this.toVisit.isEmpty()) { final Node node = removeOneNodeFromToVisitSet(); if (checkNode(node)) { final String msg = String.format("Circular dependency detected. Sample: %s", this.sampleCircularNodes); throw new DagException(msg); } } }
Checks if the builder contains nodes that form a circular dependency ring. @throws DagException if true
check
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/dag/DagBuilder.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/dag/DagBuilder.java
Apache-2.0
private Node removeOneNodeFromToVisitSet() { final Iterator<Node> iterator = this.toVisit.iterator(); final Node node = iterator.next(); iterator.remove(); return node; }
Removes one node from the toVisit set and returns that node. @return a node
removeOneNodeFromToVisitSet
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/dag/DagBuilder.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/dag/DagBuilder.java
Apache-2.0
Dag getDag() { return this.dag; }
Node in a DAG: Directed acyclic graph.
getDag
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/dag/Node.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/dag/Node.java
Apache-2.0
private boolean isReady() { if (this.status != Status.READY) { // e.g. if the node is disabled, it is not ready to run. return false; } for (final Node parent : this.parents) { if (!parent.status.isSuccessEffectively()) { return false; } } return true; }
Checks if the node is ready to run. @return true if the node is ready to run
isReady
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/dag/Node.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/dag/Node.java
Apache-2.0
void markSuccess() { // It's possible that the dag is killed before this method is called. assertRunningOrKilling(); changeStatus(Status.SUCCESS); for (final Node child : this.children) { child.runIfAllowed(); } this.dag.updateDagStatus(); }
Transitions the node to the success state.
markSuccess
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/dag/Node.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/dag/Node.java
Apache-2.0
void runIfAllowed() { if (isReady()) { changeStatus(Status.RUNNING); } }
Checks if all the dependencies are met and run if they are.
runIfAllowed
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/dag/Node.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/dag/Node.java
Apache-2.0
void kill() { assert (this.dag.getStatus() == Status.KILLING); if (this.status == Status.READY || this.status == Status.BLOCKED) { // If the node is disabled, keep the status as disabled. changeStatus(Status.CANCELED); } else if (this.status == Status.RUNNING) { changeStatus(Status.KILLING); } // If the node has finished, leave the status intact. }
Kills a node. <p>A node is not designed to be killed individually. This method expects {@link Dag#kill()} method to kill all nodes. Thus this method itself doesn't need to propagate the kill signal to the node's children nodes.
kill
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/dag/Node.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/dag/Node.java
Apache-2.0
private void downloadAllDependencies(final ProjectDirectoryMetadata proj, final int execId, final File folder, final Set<Dependency> dependencies) { // Download all of the dependencies from storage LOGGER.info("Downloading {} JAR dependencies... Project: {}, ExecId: {}", dependencies.size(), proj, execId); final Set<DependencyFile> depFiles = dependencies .stream() .map(d -> getDependencyFile(folder, d)) .collect(Collectors.toSet()); try { final long start = System.currentTimeMillis(); this.dependencyTransferManager.downloadAllDependencies(depFiles, proj.getProjectName()); LOGGER.info("Downloading {} JAR dependencies for project {} when preparing " + "execution [execid {}] completed in {} second(s)", dependencies.size(), proj, execId, (System.currentTimeMillis() - start) / 1000); } catch (final DependencyTransferException e) { LOGGER.error("Unable to download one or more dependencies when preparing execId {} for " + "project {}.", execId, proj); throw e; } }
Download necessary JAR dependencies from storage @param proj project to download @param execId execution id number @param folder root of unzipped project @param dependencies the set of dependencies to download
downloadAllDependencies
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/AbstractFlowPreparer.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/AbstractFlowPreparer.java
Apache-2.0
public static AzkabanExecutorServer getApp() { return app; }
Returns the currently executing executor server, if one exists.
getApp
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/AzkabanExecutorServer.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/AzkabanExecutorServer.java
Apache-2.0
private void configureMetricReports() throws MetricException { final Props props = getAzkabanProps(); if (props != null && props.getBoolean("executor.metric.reports", false)) { logger.info("Starting to configure Metric Reports"); final MetricReportManager metricManager = MetricReportManager.getInstance(); final IMetricEmitter metricEmitter = new InMemoryMetricEmitter(props); metricManager.addMetricEmitter(metricEmitter); logger.info("Adding number of failed flow metric"); metricManager.addMetric(new NumFailedFlowMetric(metricManager, props .getInt(METRIC_INTERVAL + NumFailedFlowMetric.NUM_FAILED_FLOW_METRIC_NAME, props.getInt(METRIC_INTERVAL + "default")))); logger.info("Adding number of failed jobs metric"); metricManager.addMetric(new NumFailedJobMetric(metricManager, props .getInt(METRIC_INTERVAL + NumFailedJobMetric.NUM_FAILED_JOB_METRIC_NAME, props.getInt(METRIC_INTERVAL + "default")))); logger.info("Adding number of running Jobs metric"); metricManager.addMetric(new NumRunningJobMetric(metricManager, props .getInt(METRIC_INTERVAL + NumRunningJobMetric.NUM_RUNNING_JOB_METRIC_NAME, props.getInt(METRIC_INTERVAL + "default")))); logger.info("Adding number of running flows metric"); metricManager.addMetric(new NumRunningFlowMetric(this.runnerManager, metricManager, props.getInt(METRIC_INTERVAL + NumRunningFlowMetric.NUM_RUNNING_FLOW_METRIC_NAME, props.getInt(METRIC_INTERVAL + "default")))); logger.info("Adding number of queued flows metric"); metricManager.addMetric(new NumQueuedFlowMetric(this.runnerManager, metricManager, props.getInt(METRIC_INTERVAL + NumQueuedFlowMetric.NUM_QUEUED_FLOW_METRIC_NAME, props.getInt(METRIC_INTERVAL + "default")))); logger.info("Completed configuring Metric Reports"); } }
Configure Metric Reporting as per azkaban.properties settings
configureMetricReports
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/AzkabanExecutorServer.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/AzkabanExecutorServer.java
Apache-2.0
public int getPort() { final Connector[] connectors = this.server.getConnectors(); checkState(connectors.length >= 1, "Server must have at least 1 connector"); // The first connector is created upon initializing the server. That's the one that has the port. return connectors[0].getLocalPort(); }
Get the current server port @return the port at which the executor server is running
getPort
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/AzkabanExecutorServer.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/AzkabanExecutorServer.java
Apache-2.0
public void shutdown() { logger.warn("Shutting down AzkabanExecutorServer..."); new Thread(() -> { // Hack: Sleep for a little time to allow API calls to complete sleep(Duration.ofSeconds(2)); shutdownInternal(); }, "shutdown").start(); }
Shutdown the server. - performs a safe shutdown. Waits for completion of current tasks - spawns a shutdown thread and returns immediately.
shutdown
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/AzkabanExecutorServer.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/AzkabanExecutorServer.java
Apache-2.0
private void shutdownInternal() { getFlowRampManager().shutdown(); getFlowRunnerManager().shutdown(); // Sleep for an hour to wait for web server updater thread // {@link azkaban.executor.RunningExecutionsUpdaterThread#updateExecutions} to finalize updating sleep(Duration.ofHours(1)); // trigger shutdown hook System.exit(0); }
(internal API) Note: This should be run in a separate thread. <p> Shutdown the server. (blocking call) - waits for jobs to finish - doesn't accept any new jobs
shutdownInternal
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/AzkabanExecutorServer.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/AzkabanExecutorServer.java
Apache-2.0
public Timer.Context getFlowSetupTimerContext() { return this.flowSetupTimer.time(); }
@return the {@link Timer.Context} for the timer.
getFlowSetupTimerContext
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/ExecMetrics.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/ExecMetrics.java
Apache-2.0
public Timer.Context getFlowStartupDelayTimerContext() { return this.flowStartupDelayTimer.time(); }
@return the {@link Timer.Context} for the flow-startup-delay timer.
getFlowStartupDelayTimerContext
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/ExecMetrics.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/ExecMetrics.java
Apache-2.0
@VisibleForTesting void updateLastModifiedTime(final Path path) { try { Files.setLastModifiedTime(path, FileTime.fromMillis(System.currentTimeMillis())); } catch (final IOException ex) { LOGGER.warn("Error when updating last modified time for {}", path, ex); } }
Update last modified time of the file if it exists. @param path path to the target file
updateLastModifiedTime
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/FlowPreparer.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/FlowPreparer.java
Apache-2.0
private boolean isRampFeatureActivated() { if (isRampFeatureEnabled || executableRampMap != null) { if (!executableRampMap.getActivatedAll().isEmpty()) { return true; } } return false; }
Check if the system is activating the ramp feature, aka some system configuration is ramping.
isRampFeatureActivated
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/FlowRampManager.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/FlowRampManager.java
Apache-2.0
public void shutdown() { if (isRampPollingServiceEnabled) { LOGGER.warn("Shutting down FlowRampManager..."); pollingService.shutdown(); // Persistent cached data into DB saveSettings(); LOGGER.warn("Shutdown FlowRampManager complete."); } }
This shuts down the flow ramp. The call is blocking and awaits execution of all jobs.
shutdown
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/FlowRampManager.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/FlowRampManager.java
Apache-2.0
@VisibleForTesting synchronized void loadSettings() { loadExecutableRamps(); loadExecutableRampItems(); loadExecutableRampDependencies(); loadExecutableRampExceptionalFlowItems(); loadExecutableRampExceptionalJobItems(); latestDataBaseSynchronizationTimeStamp = System.currentTimeMillis(); LOGGER.info(String.format("Ramp Settings had been successfully loaded at [%d].", latestDataBaseSynchronizationTimeStamp)); }
Load all ramp Settings from DB
loadSettings
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/FlowRampManager.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/FlowRampManager.java
Apache-2.0
@VisibleForTesting synchronized void loadExecutableRamps() { try { if (executableRampMap == null) { executableRampMap = executorLoader.fetchExecutableRampMap(); } else { executableRampMap.refresh(executorLoader.fetchExecutableRampMap()); } } catch (ExecutorManagerException e) { LOGGER.error("Load all active Executable Ramp failure"); } }
Load All active ramps, Key = rampId
loadExecutableRamps
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/FlowRampManager.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/FlowRampManager.java
Apache-2.0
@VisibleForTesting synchronized void loadExecutableRampItems() { try { if (executableRampItemsMap == null) { executableRampItemsMap = executorLoader.fetchExecutableRampItemsMap(); } else { executableRampItemsMap.refresh(executorLoader.fetchExecutableRampItemsMap()); } } catch (ExecutorManagerException e) { LOGGER.error("Load Executable Ramp Items failure"); } }
Load All dependency properties into the executableRampProperties Map, Key = rampId,
loadExecutableRampItems
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/FlowRampManager.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/FlowRampManager.java
Apache-2.0
@VisibleForTesting synchronized void loadExecutableRampDependencies() { try { if (executableRampDependencyMap == null) { executableRampDependencyMap = executorLoader.fetchExecutableRampDependencyMap(); } else { executableRampDependencyMap.refresh(executorLoader.fetchExecutableRampDependencyMap()); } } catch (ExecutorManagerException e) { LOGGER.error("Load Executable Ramp Dependencies failure"); } }
Load All Default dependency values for ramp When the dependency does not have ramp setting, the default Value will be applied.
loadExecutableRampDependencies
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/FlowRampManager.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/FlowRampManager.java
Apache-2.0
@VisibleForTesting synchronized void loadExecutableRampExceptionalFlowItems() { try { if (executableRampExceptionalFlowItemsMap == null) { executableRampExceptionalFlowItemsMap = executorLoader.fetchExecutableRampExceptionalFlowItemsMap(); } else { executableRampExceptionalFlowItemsMap.refresh(executorLoader.fetchExecutableRampExceptionalFlowItemsMap()); } } catch (ExecutorManagerException e) { LOGGER.error("Load Executable Ramp Exceptional Items on Flow Level Failure"); } }
Load All Ramp Exceptional Items on Flow Level
loadExecutableRampExceptionalFlowItems
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/FlowRampManager.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/FlowRampManager.java
Apache-2.0
@VisibleForTesting synchronized void loadExecutableRampExceptionalJobItems() { try { if (executableRampExceptionalJobItemsMap == null) { executableRampExceptionalJobItemsMap = executorLoader.fetchExecutableRampExceptionalJobItemsMap(); } else { executableRampExceptionalJobItemsMap.refresh(executorLoader.fetchExecutableRampExceptionalJobItemsMap()); } } catch (ExecutorManagerException e) { LOGGER.error("Load Executable Ramp Exceptional Items on Job Level Failure"); } }
Load All Ramp Exceptional Items on Job Level
loadExecutableRampExceptionalJobItems
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/FlowRampManager.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/FlowRampManager.java
Apache-2.0
@VisibleForTesting synchronized void updateExecutedRampFlows(Map.Entry<String, ExecutableRampExceptionalItems> entry) { try { // Save all Identified workflow into the DB executorLoader.updateExecutedRampFlows(entry.getKey(), entry.getValue()); } catch (ExecutorManagerException e) { LOGGER.error("Fail to append ramp items into DB.", e); } }
Save All Ramp Exceptional Items on Flow Level
updateExecutedRampFlows
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/FlowRampManager.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/FlowRampManager.java
Apache-2.0
private boolean parentIsOnTheSameLevel(final ExecutableNode node) { return node.getParentFlow() instanceof ExecutableFlow; }
Detects if the "parent" is actually the root job node.
parentIsOnTheSameLevel
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/FlowRunner.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/FlowRunner.java
Apache-2.0
public void setEffectiveUser(final String jobId, final String effectiveUser, final Optional<String> jobType) { if (StringUtils.isBlank(jobId)) { logger.error("Job effective user can't be set as jobId string is blank."); return; } if (StringUtils.isBlank(effectiveUser)) { logger.error("Job effective user can't be set as effectiveUser string is blank."); return; } final String previousVal; if (!jobType.isPresent()) { logger.error("Job effective user can't be set as jobType is absent."); return; } // Currently noop is the only jobtype for ignoredJobEffectiveUsers, but in future there can // be more. if (jobType.get().equals("noop")) { previousVal = FlowRunner.this.ignoredJobEffectiveUsers.put(jobId, effectiveUser); } else { previousVal = FlowRunner.this.jobEffectiveUsers.put(jobId, effectiveUser); } if (null != previousVal) { logger.info( String.format("Updated effectiveUser map for id: %s, prevVal: %s, newVal: %s", jobId, previousVal, effectiveUser)); } else { logger.info(String .format("Updated effectiveUser map for id: %s, val: %s", jobId, effectiveUser)); } }
@param jobId @param effectiveUser @param jobType
setEffectiveUser
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/FlowRunner.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/FlowRunner.java
Apache-2.0
public void setCpuUtilization(final Double cpuUtilized) { FlowRunner.this.cpuUtilized = Optional.ofNullable(cpuUtilized); }
@param cpuUtilized measured in cpu Units. One cpu is equivalent to 1 vCPU/Core for cloud providers and 1 hyperthread on bare-metal Intel processors. Fractional values are allowed.
setCpuUtilization
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/FlowRunner.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/FlowRunner.java
Apache-2.0
@VisibleForTesting static void propagateMetadataFromProps(final Map<String, String> metaData, final Props inputProps, final String nodeType, final String nodeName, final Logger logger) { if (null == metaData || null == inputProps || null == logger || Strings.isNullOrEmpty(nodeType) || Strings.isNullOrEmpty(nodeName)) { throw new IllegalArgumentException("Input params should not be null or empty."); } // Backward compatibility: Unless user specifies, this will be absent from flows and jobs // .. if so, do a no-op like before if (!inputProps.containsKey(AZKABAN_EVENT_REPORTING_PROPERTIES_TO_PROPAGATE)) { return; } final String propsToPropagate = inputProps .getString(AZKABAN_EVENT_REPORTING_PROPERTIES_TO_PROPAGATE); if (Strings.isNullOrEmpty(propsToPropagate)) { // Nothing to propagate logger.info( String.format("No properties to propagate to metadata for %s: %s", nodeType, nodeName)); return; } else { logger.info(String .format("Propagating: %s to metadata for %s: %s", propsToPropagate, nodeType, nodeName)); } final List<String> propsToPropagateList = SPLIT_ON_COMMA.splitToList(propsToPropagate); for (final String propKey : propsToPropagateList) { if (!inputProps.containsKey(propKey)) { logger.warn(String.format("%s does not contains: %s property; " + "skipping propagation to metadata", nodeName, propKey)); continue; } metaData.put(propKey, inputProps.getString(propKey)); } }
Propagate properties (specified in {@code AZKABAN_EVENT_REPORTING_PROPERTIES_TO_PROPAGATE}) to metadata for event reporting. @param metaData Metadata map to update with properties. @param inputProps Input properties for flow or job. @param nodeType Flow or job. @param nodeName Flow or job name. @param logger Logger from invoking class for log sanity.
propagateMetadataFromProps
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/FlowRunner.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/FlowRunner.java
Apache-2.0
public boolean changePollingInterval(final long pollingIntervalMillis) { final long oldVal = this.azkabanProps.getLong(ConfigurationKeys.AZKABAN_POLLING_INTERVAL_MS, Constants.DEFAULT_AZKABAN_POLLING_INTERVAL_MS); if (!this.pollingService.restart(pollingIntervalMillis)) { return false; } if (this.azkabanProps.containsKey(ConfigurationKeys.AZKABAN_POLLING_INTERVAL_MS)) { this.azkabanProps.put(ConfigurationKeys.AZKABAN_POLLING_INTERVAL_MS, pollingIntervalMillis); } LOGGER.info(String.format("Changed polling interval from %d to %d milliseconds", oldVal, pollingIntervalMillis)); return true; }
Change the polling interval to the newly specified value and also update the value that's specified in the props @param pollingIntervalMillis The new polling interval. @return true if the Polling interval has changed successfully
changePollingInterval
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/FlowRunnerManager.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/FlowRunnerManager.java
Apache-2.0
private void addStartupDependencyPathToProps(final Props props) { if (this.storage.getDependencyRootPath() != null) { props.put(ThinArchiveUtils.DEPENDENCY_STORAGE_ROOT_PATH_PROP, this.storage.getDependencyRootPath()); } }
Add the startup dependency path to props if the current storage instance returns a non-null dependencyRootPath. @param props Props to add the startup dependency path to.
addStartupDependencyPathToProps
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/FlowRunnerManager.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/FlowRunnerManager.java
Apache-2.0
public void shutdown() { LOGGER.warn("Shutting down FlowRunnerManager..."); if (isPollDispatchMethodEnabled()) { this.pollingService.shutdown(); } this.executorService.shutdown(); boolean result = false; while (!result) { LOGGER.info("Awaiting Shutdown. # of executing flows: " + getNumRunningFlows()); try { result = this.executorService.awaitTermination(1, TimeUnit.MINUTES); } catch (final InterruptedException e) { LOGGER.error(e.getMessage()); } } this.flowPreparer.shutdown(); LOGGER.warn("Shutdown FlowRunnerManager complete."); }
This shuts down the flow runner. The call is blocking and awaits execution of all jobs.
shutdown
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/FlowRunnerManager.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/FlowRunnerManager.java
Apache-2.0
private boolean handleNonReadyStatus() { synchronized (this.syncObject) { Status nodeStatus = this.node.getStatus(); boolean quickFinish = false; final long time = System.currentTimeMillis(); if (Status.isStatusFinished(nodeStatus)) { quickFinish = true; } else if (nodeStatus == Status.DISABLED) { changeStatus(Status.SKIPPED, time); quickFinish = true; } else if (this.isKilled()) { changeStatus(Status.KILLED, time); quickFinish = true; } if (quickFinish) { this.node.setStartTime(time); fireEvent(Event.create(this, EventType.JOB_STARTED, new EventData(node))); this.node.setEndTime(time); fireEvent(Event.create(this, EventType.JOB_FINISHED, new EventData(node))); return true; } return false; } }
Used to handle non-ready and special status's (i.e. KILLED). Returns true if they handled anything.
handleNonReadyStatus
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/JobRunner.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/JobRunner.java
Apache-2.0
private boolean IsSpecifiedWorkingDirectoryValid() { final File usersWorkingDir = new File(this.props.get(AbstractProcessJob.WORKING_DIR)); try { if (!usersWorkingDir.getCanonicalPath().startsWith(this.workingDir.getCanonicalPath())) { return false; } } catch (final IOException e) { this.logger.error("Failed to validate user's " + AbstractProcessJob.WORKING_DIR + " property.", e); return false; } return true; }
Validates execution directory specified by user.
IsSpecifiedWorkingDirectoryValid
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/JobRunner.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/JobRunner.java
Apache-2.0
private void insertJVMAargs() { final String flowName = this.node.getParentFlow().getFlowId(); final String jobId = this.node.getId(); String jobJVMArgs = String.format( "'-Dazkaban.flowid=%s' '-Dazkaban.execid=%s' '-Dazkaban.jobid=%s'", flowName, this.executionId, jobId); final String previousJVMArgs = this.props.get(JavaProcessJob.JVM_PARAMS); jobJVMArgs += (previousJVMArgs == null) ? "" : " " + previousJVMArgs; // Add useful Java options for java jobs which are provided through properties final String javaOpts = insertJavaOptions(); jobJVMArgs += (javaOpts == null) ? "" : " " + javaOpts; this.logger.info("job JVM args: " + jobJVMArgs); this.props.put(JavaProcessJob.JVM_PARAMS, jobJVMArgs); }
Add useful JVM arguments so it is easier to map a running Java process to a flow, execution id and job
insertJVMAargs
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/JobRunner.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/JobRunner.java
Apache-2.0
private String insertJavaOptions() { if (this.jobtypeManager.getCommonPluginLoadProps() == null) { return null; } final String appendJavaOpts = this.jobtypeManager.getCommonPluginLoadProps().getString( Constants.AZ_JOBS_JAVA_OPTS, null); logger.info("JAVA OPTS appended to each command : " + appendJavaOpts); return appendJavaOpts; }
Add useful Java options for java jobs provided through properties.
insertJavaOptions
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/JobRunner.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/JobRunner.java
Apache-2.0
private ProjectDirectoryMetadata fetchProjectMetadata(final Path project) { ProjectDirectoryMetadata projectDirectoryMetadata = this.cachedProjects.get(project); try { if (projectDirectoryMetadata == null) { final String fileName = project.getFileName().toString(); final int projectId = Integer.parseInt(fileName.split("\\.")[0]); final int versionNum = Integer.parseInt(fileName.split("\\.")[1]); projectDirectoryMetadata = new ProjectDirectoryMetadata(projectId, versionNum, project.toFile()); /* * Calculate used-space (Equivalent of du command) only if the metadata for * this project was never fetched before. This optimization is important as * recursive space calculation is a very expensive operation. */ projectDirectoryMetadata.setDirSizeInByte( AbstractFlowPreparer.calculateDirSizeAndSave(projectDirectoryMetadata.getInstalledDir())); } projectDirectoryMetadata.setLastAccessTime( Files.getLastModifiedTime(Paths.get(projectDirectoryMetadata.getInstalledDir().toString(), AbstractFlowPreparer.PROJECT_DIR_SIZE_FILE_NAME))); } catch (final Exception e) { log.warn("Error while loading project dir metadata for project {}", project.getFileName(), e); } return projectDirectoryMetadata; }
Get metadata from the OS for the underlying path, lastAccessTime is fetched from the OS regardless of whether the given project already exists, but the space calculation for a project directory is only performed one-time. @param project path for the project cache. Project filepath encodes projectID & version within the filename @return OS Metadata for the given path
fetchProjectMetadata
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/ProjectCacheCleaner.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/ProjectCacheCleaner.java
Apache-2.0
public void deleteProjectDirsIfNecessary(final long newProjectSizeInBytes) { final long cachePartitionSize = this.projectCacheDir.getTotalSpace(); final long availablePartitionSize = this.projectCacheDir.getUsableSpace(); final long start = System.currentTimeMillis(); loadAllProjects(); log.info("Loading {} project dirs metadata completed in {} msecs", cachedProjects.size(), System.currentTimeMillis() - start); final long currentCacheSize = getProjectDirsTotalSizeInBytes(); final long projectCacheDirCapacity = currentCacheSize + availablePartitionSize; boolean throttleAfterDeletion = false; final long highWatermark = (long) (projectCacheDirCapacity * this.percentageOfDisk); final long throttleWatermark = (long) (projectCacheDirCapacity * this.throttlePercentage); long projectedCacheSize = currentCacheSize + newProjectSizeInBytes; log.info("Partition = {} MB, Total Capacity = {} MB, Cache Size = {} MB, Projected Size = {} MB", bytesToMB(cachePartitionSize), bytesToMB(projectCacheDirCapacity), bytesToMB(currentCacheSize), bytesToMB(projectedCacheSize)); log.info("High Watermark = {} MB, Throttle Watermark = {} MB", bytesToMB(highWatermark), bytesToMB(throttleWatermark)); if (projectedCacheSize >= throttleWatermark) { throttleAfterDeletion = true; } if (projectedCacheSize >= highWatermark) { log.info("Projected cache size exceeds High Watermark. LRU Eviction will kick in"); deleteLeastRecentlyUsedProjects(projectedCacheSize - highWatermark); } if (throttleAfterDeletion) { /* * Block till already submitted cleanup is done. */ log.info("Throttle Watermark was hit. Blocking till LRU eviction is complete."); finishPendingCleanup(); } }
Deleting least recently accessed project dirs when there's no room to accommodate new project. The logic: 1. Calculates the total dynamic size available for the project cache. This = (Usable space left in the disk partition + Space currently occupied by the project cache). 2. Calculates high water mark & throttle water marks based on the above number. 3. If the occupied bytes > high water mark, lazy (Non-blocking) LRU eviction kicks in 4. If the occupied bytes > throttle water mark, the method will block until LRU eviction is complete. In each case, LRU eviction attempts to keep the occupied space below high water mark. @param newProjectSizeInBytes space in bytes the new project will add to the existing cache
deleteProjectDirsIfNecessary
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/ProjectCacheCleaner.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/ProjectCacheCleaner.java
Apache-2.0
public String queryState() { if (projectsUnderDeletion.isEmpty()) { return STATE_AVAILABLE; } return STATE_CLEANING; }
@return Return the current state of the cleaner service
queryState
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/ProjectCacheCleaner.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/ProjectCacheCleaner.java
Apache-2.0
public void shutdown() { try { new ExecutorServiceUtils().gracefulShutdown(deletionService, Duration.ofDays(1)); } catch (final InterruptedException e) { log.warn("Error when deleting files", e); } }
Makes sure the Cache deletion process cleanly terminates so the possibility of unclean cache directories is eliminated.
shutdown
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/ProjectCacheCleaner.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/ProjectCacheCleaner.java
Apache-2.0
@Override protected void doPost(final HttpServletRequest req, final HttpServletResponse resp) throws ServletException, IOException { final boolean noCache = null != req && Boolean.valueOf(req.getParameter(noCacheParamName)); if (noCache || System.currentTimeMillis() - lastRefreshedTime > cacheTimeInMilliseconds) { this.populateStatistics(noCache); } JSONUtils.toJSON(cachedstats, resp.getOutputStream(), true); }
Handle all requests to Statistics Servlet {@inheritDoc}
doPost
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/ServerStatisticsServlet.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/ServerStatisticsServlet.java
Apache-2.0
protected synchronized void populateStatistics(final boolean noCache) { //check again before starting the work. if (noCache || System.currentTimeMillis() - lastRefreshedTime > cacheTimeInMilliseconds) { final ExecutorInfo stats = new ExecutorInfo(); fillRemainingMemoryPercent(stats); fillRemainingFlowCapacityAndLastDispatchedTime(stats); fillCpuUsage(stats); cachedstats = stats; lastRefreshedTime = System.currentTimeMillis(); } }
call the data providers to fill the returning data container for statistics data. This function refreshes the static cached copy of data in case if necessary.
populateStatistics
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/ServerStatisticsServlet.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/ServerStatisticsServlet.java
Apache-2.0
protected void fillRemainingFlowCapacityAndLastDispatchedTime(final ExecutorInfo stats) { final AzkabanExecutorServer server = AzkabanExecutorServer.getApp(); if (server != null) { final FlowRunnerManager runnerMgr = AzkabanExecutorServer.getApp().getFlowRunnerManager(); final int assignedFlows = runnerMgr.getNumRunningFlows() + runnerMgr.getNumQueuedFlows(); stats.setRemainingFlowCapacity(runnerMgr.getMaxNumRunningFlows() - assignedFlows); stats.setNumberOfAssignedFlows(assignedFlows); stats.setLastDispatchedTime(runnerMgr.getLastFlowSubmittedTime()); } else { logger.error("failed to get data for remaining flow capacity or LastDispatchedTime" + " as the AzkabanExecutorServer has yet been initialized."); } }
fill the result set with the remaining flow capacity . @param stats reference to the result container which contains all the results, this specific method will only work on the property "remainingFlowCapacity".
fillRemainingFlowCapacityAndLastDispatchedTime
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/ServerStatisticsServlet.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/ServerStatisticsServlet.java
Apache-2.0
protected void fillCpuUsage(final ExecutorInfo stats) { if (exists_Bash && exists_Cat && exists_LoadAvg) { try { final ArrayList<String> output = Utils .runProcess("/bin/bash", "-c", "/bin/cat /proc/loadavg"); // process the output from bash call. if (output.size() > 0) { final String[] splitedresult = output.get(0).split("\\s+"); double cpuUsage = 0.0; try { cpuUsage = Double.parseDouble(splitedresult[0]); } catch (final NumberFormatException e) { logger.error("yielding 0.0 for CPU usage as output is invalid -" + output.get(0)); } logger.info("System load : " + cpuUsage); stats.setCpuUpsage(cpuUsage); } } catch (final Exception ex) { logger.error("failed fetch system load info " + "as exception is captured when fetching result from bash call. Ex -" + ex .getMessage()); } } else { logger.error( "failed fetch system load info, one or more files from the following list are missing - " + "'/bin/bash'," + "'/bin/cat'," + "'/proc/loadavg'"); } }
<pre> fill the result set with the CPU usage . Note : As the 'Top' bash call doesn't yield accurate result for the system load, the implementation has been changed to load from the "proc/loadavg" which keeps the moving average of the system load, we are pulling the average for the recent 1 min. </pre> @param stats reference to the result container which contains all the results, this specific method will only work on the property "cpuUsage".
fillCpuUsage
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/ServerStatisticsServlet.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/ServerStatisticsServlet.java
Apache-2.0
@Override protected void doPost(final HttpServletRequest req, final HttpServletResponse resp) throws ServletException, IOException { final Map<String, Object> ret = new HashMap<>(); if (hasParam(req, ACTION_PARAM)) { final String action = getParam(req, ACTION_PARAM); if (action.equals(STATS_SET_REPORTINGINTERVAL)) { handleChangeMetricInterval(req, ret); } else if (action.equals(STATS_SET_CLEANINGINTERVAL)) { handleChangeCleaningInterval(req, ret); } else if (action.equals(STATS_SET_MAXREPORTERPOINTS)) { handleChangeEmitterPoints(req, ret); } else if (action.equals(STATS_GET_ALLMETRICSNAME)) { handleGetAllMMetricsName(req, ret); } else if (action.equals(STATS_GET_METRICHISTORY)) { handleGetMetricHistory(req, ret); } else if (action.equals(STATS_SET_ENABLEMETRICS)) { handleChangeManagerStatusRequest(req, ret, true); } else if (action.equals(STATS_SET_DISABLEMETRICS)) { handleChangeManagerStatusRequest(req, ret, false); } else { ret.put(RESPONSE_ERROR, "Invalid action"); } } JSONUtils.toJSON(ret, resp.getOutputStream(), true); }
Handle all requests to Stats Servlet {@inheritDoc}
doPost
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/StatsServlet.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/StatsServlet.java
Apache-2.0
private void handleChangeEmitterPoints(final HttpServletRequest req, final Map<String, Object> ret) { try { final long numInstance = getLongParam(req, STATS_MAP_EMITTERNUMINSTANCES); if (MetricReportManager.isAvailable()) { final MetricReportManager metricManager = MetricReportManager.getInstance(); final InMemoryMetricEmitter memoryEmitter = extractInMemoryMetricEmitter(metricManager); memoryEmitter.setReportingInstances(numInstance); ret.put(STATUS_PARAM, RESPONSE_SUCCESS); } else { ret.put(RESPONSE_ERROR, "MetricManager is not available"); } } catch (final Exception e) { logger.error(e); ret.put(RESPONSE_ERROR, e.getMessage()); } }
Update number of display snapshots for /stats graphs
handleChangeEmitterPoints
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/StatsServlet.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/StatsServlet.java
Apache-2.0
private void handleGetAllMMetricsName(final HttpServletRequest req, final Map<String, Object> ret) { if (MetricReportManager.isAvailable()) { final MetricReportManager metricManager = MetricReportManager.getInstance(); final List<IMetric<?>> result = metricManager.getAllMetrics(); if (result.size() == 0) { ret.put(RESPONSE_ERROR, "No Metric being tracked"); } else { final List<String> metricNames = new LinkedList<>(); for (final IMetric<?> metric : result) { metricNames.add(metric.getName()); } ret.put("data", metricNames); } } else { ret.put(RESPONSE_ERROR, "MetricReportManager is not available"); } }
Get all the metrics tracked by metric manager
handleGetAllMMetricsName
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/StatsServlet.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/StatsServlet.java
Apache-2.0
@Override public void run() { logger.info("Running trigger for " + this); if (isTriggerExpired()) { logger.info(this + " expired"); return; } logger.info("Check if trigger condition met for " + this); final boolean isTriggerConditionMet = this.triggerCondition.isMet(); logger.info("Trigger condition for execid = " + this.execId + " met? = " + isTriggerConditionMet); if (isTriggerConditionMet) { logger.info("Condition " + this.triggerCondition.getExpression() + " met"); for (final TriggerAction action : this.actions) { try { if (action instanceof KillExecutionAction) { logger.info("Killing execution " + this.execId); } action.doAction(); } catch (final Exception e) { logger.error("Failed to do action " + action.getDescription() + " for execution " + this.execId, e); } } } }
Perform the action if trigger condition is met
run
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/Trigger.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/Trigger.java
Apache-2.0
@Override public synchronized Ratio getRatio() { final long hitCount = Arrays.stream(this.hits.getSnapshot().getValues()).sum(); return Ratio.of(hitCount, this.hits.getSnapshot().size()); }
Project cache hit ratio of last 100 cache accesses. <p>The advantage of sampling last 100 caches accesses over time-based sampling like last hour's cache accesses is the former is more deterministic. Suppose there's only few execution in last hour, then hit ratio might not be truly informative, which doesn't necessarily reflect performance of the cache.</p>
getRatio
java
azkaban/azkaban
azkaban-exec-server/src/main/java/azkaban/execapp/metric/ProjectCacheHitRatio.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/main/java/azkaban/execapp/metric/ProjectCacheHitRatio.java
Apache-2.0
@Test public void testSslDisabledJettyServer() throws Exception { ExecJettyServerModule execJettyServerModule = new ExecJettyServerModule(); Props props = new Props(); props.put("jetty.use.ssl", "false"); props.put("executor.port", JETTY_PORT); Server jettyServer = execJettyServerModule.createJettyServer(props); Assert.assertEquals(1, jettyServer.getConnectors().length); Assert.assertTrue(jettyServer.getConnectors()[0] instanceof SocketConnector); SocketConnector socketConnector = (SocketConnector) jettyServer.getConnectors()[0]; Assert.assertEquals(JETTY_PORT, socketConnector.getPort()); final Context root = new Context(jettyServer, "/", Context.SESSIONS); root.addServlet(new ServletHolder(new SimpleServlet()), "/simple"); jettyServer.start(); final ExecutorApiClient tlsDisabledClient = new ExecutorApiClient(new Props()); final String postResponse = tlsDisabledClient .doPost(new URI(SimpleServlet.TLS_DISABLED_URI), DispatchMethod.CONTAINERIZED, Optional.empty(),null); Assert.assertEquals(SimpleServlet.POST_RESPONSE_STRING, postResponse); jettyServer.stop(); }
This test tries to create the jetty-server and verify that it is reachable on the attached port via Http.
testSslDisabledJettyServer
java
azkaban/azkaban
azkaban-exec-server/src/test/java/azkaban/common/ExecJettyServerModuleTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/common/ExecJettyServerModuleTest.java
Apache-2.0
@Test public void testSslEnabledJettyServer() throws Exception { ExecJettyServerModule execJettyServerModule = new ExecJettyServerModule(); Props props = new Props(); props.put("jetty.use.ssl", "true"); props.put("executor.ssl.port", JETTY_TLS_PORT); props.put("jetty.keystore", KEYSTORE_PATH); props.put("jetty.password", DEFAULT_PASSWORD); props.put("jetty.keypassword", DEFAULT_PASSWORD); props.put("jetty.truststore", TRUSTSTORE_PATH); props.put("jetty.trustpassword", DEFAULT_PASSWORD); Server jettyServer = execJettyServerModule.createJettyServer(props); Assert.assertEquals(1, jettyServer.getConnectors().length); Assert.assertTrue(jettyServer.getConnectors()[0] instanceof SslSocketConnector); SslSocketConnector sslSocketConnector = (SslSocketConnector) jettyServer.getConnectors()[0]; Assert.assertEquals(JETTY_TLS_PORT, sslSocketConnector.getPort()); final Context root = new Context(jettyServer, "/", Context.SESSIONS); root.addServlet(new ServletHolder(new SimpleServlet()), "/simple"); jettyServer.start(); Props clientProps = new Props(); clientProps.put(EXECUTOR_CLIENT_TLS_ENABLED, "true"); clientProps .put(EXECUTOR_CLIENT_TRUSTSTORE_PATH, TRUSTSTORE_PATH); clientProps.put(EXECUTOR_CLIENT_TRUSTSTORE_PASSWORD, "changeit"); final ExecutorApiClient tlsEnabledClient = new ExecutorApiClient(clientProps); final String postResponse = tlsEnabledClient .doPost(new URI(SimpleServlet.TLS_ENABLED_URI), DispatchMethod.CONTAINERIZED, Optional.empty(),null); Assert.assertEquals(SimpleServlet.POST_RESPONSE_STRING, postResponse); jettyServer.stop(); }
This test tries to create the jetty-server and verify that it is reachable on the attached port via Https.
testSslEnabledJettyServer
java
azkaban/azkaban
azkaban-exec-server/src/test/java/azkaban/common/ExecJettyServerModuleTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/common/ExecJettyServerModuleTest.java
Apache-2.0
@Test public void oneNodeSuccess() throws Exception { createNodeInTestDag("a"); addToExpectedSequence("fa", Status.RUNNING); addToExpectedSequence("a", Status.RUNNING); addToExpectedSequence("a", Status.SUCCESS); addToExpectedSequence("fa", Status.SUCCESS); buildDagRunAndVerify(); }
Tests a DAG with one node which will run successfully.
oneNodeSuccess
java
azkaban/azkaban
azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
Apache-2.0
@Test public void twoNodesSuccess() throws Exception { createNodeInTestDag("a"); createNodeInTestDag("b"); this.dagBuilder.addParentNode("b", "a"); addToExpectedSequence("fa", Status.RUNNING); addToExpectedSequence("a", Status.RUNNING); addToExpectedSequence("a", Status.SUCCESS); addToExpectedSequence("b", Status.RUNNING); addToExpectedSequence("b", Status.SUCCESS); addToExpectedSequence("fa", Status.SUCCESS); buildDagRunAndVerify(); }
Tests a DAG with two nodes which will run successfully. a | b
twoNodesSuccess
java
azkaban/azkaban
azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
Apache-2.0
@Test public void threeNodesSuccess() throws Exception { createNodeInTestDag("a"); createNodeInTestDag("b"); createNodeInTestDag("c"); this.dagBuilder.addParentNode("b", "a"); this.dagBuilder.addParentNode("c", "a"); addToExpectedSequence("fa", Status.RUNNING); addToExpectedSequence("a", Status.RUNNING); addToExpectedSequence("a", Status.SUCCESS); addToExpectedSequence("b", Status.RUNNING); addToExpectedSequence("c", Status.RUNNING); addToExpectedSequence("b", Status.SUCCESS); addToExpectedSequence("c", Status.SUCCESS); addToExpectedSequence("fa", Status.SUCCESS); buildDagRunAndVerify(); }
Tests a DAG with three nodes which will run successfully. <pre> a / \ b c </pre>
threeNodesSuccess
java
azkaban/azkaban
azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
Apache-2.0
@Test public void oneNodeFailure() throws Exception { createNodeInTestDag("a"); this.nodesToFail.add("a"); addToExpectedSequence("fa", Status.RUNNING); addToExpectedSequence("a", Status.RUNNING); addToExpectedSequence("a", Status.FAILURE); addToExpectedSequence("fa", Status.FAILURE); buildDagRunAndVerify(); }
Tests a DAG with one node which will fail.
oneNodeFailure
java
azkaban/azkaban
azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
Apache-2.0
@Test public void twoNodesFailFirst() throws Exception { createNodeInTestDag("a"); createNodeInTestDag("b"); this.dagBuilder.addParentNode("b", "a"); this.nodesToFail.add("a"); addToExpectedSequence("fa", Status.RUNNING); addToExpectedSequence("a", Status.RUNNING); addToExpectedSequence("a", Status.FAILURE); addToExpectedSequence("b", Status.CANCELED); addToExpectedSequence("fa", Status.FAILURE); buildDagRunAndVerify(); }
Tests a DAG with two nodes, fails the first one. Expects the child node to be marked canceled. a (fail) | b
twoNodesFailFirst
java
azkaban/azkaban
azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
Apache-2.0
@Test public void threeNodesFailSecond() throws Exception { createNodeInTestDag("a"); createNodeInTestDag("b"); createNodeInTestDag("c"); this.dagBuilder.addParentNode("b", "a"); this.dagBuilder.addParentNode("c", "a"); this.nodesToFail.add("b"); addToExpectedSequence("fa", Status.RUNNING); addToExpectedSequence("a", Status.RUNNING); addToExpectedSequence("a", Status.SUCCESS); addToExpectedSequence("b", Status.RUNNING); addToExpectedSequence("c", Status.RUNNING); addToExpectedSequence("b", Status.FAILURE); addToExpectedSequence("c", Status.SUCCESS); addToExpectedSequence("fa", Status.FAILURE); buildDagRunAndVerify(); }
Tests a DAG with three nodes with one failure. Expects the sibling nodes to finish. <pre> a / \ b (fail) c </pre>
threeNodesFailSecond
java
azkaban/azkaban
azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
Apache-2.0
@Test public void simple_sub_dag_success_case() throws Exception { final TestSubDagProcessor testSubDagProcessor = new TestSubDagProcessor (this.dagService, this.statusChangeRecorder); final DagBuilder subDagBuilder = new DagBuilder("fb", testSubDagProcessor); subDagBuilder.createNode("a", this.nodeProcessor); subDagBuilder.createNode("b", this.nodeProcessor); final Dag bDag = subDagBuilder.build(); final TestSubDagNodeProcessor testSubDagNodeProcessor = new TestSubDagNodeProcessor (this.dagService, this.statusChangeRecorder, bDag, testSubDagProcessor); final String SUB_DAG_NAME = "sfb"; this.dagBuilder.createNode(SUB_DAG_NAME, testSubDagNodeProcessor); createNodeInTestDag("c"); this.dagBuilder.addParentNode("c", SUB_DAG_NAME); final Dag dag = this.dagBuilder.build(); addToExpectedSequence("fa", Status.RUNNING); addToExpectedSequence(SUB_DAG_NAME, Status.RUNNING); addToExpectedSequence("fb", Status.RUNNING); addToExpectedSequence("a", Status.RUNNING); addToExpectedSequence("b", Status.RUNNING); addToExpectedSequence("a", Status.SUCCESS); addToExpectedSequence("b", Status.SUCCESS); addToExpectedSequence("fb", Status.SUCCESS); addToExpectedSequence(SUB_DAG_NAME, Status.SUCCESS); addToExpectedSequence("c", Status.RUNNING); addToExpectedSequence("c", Status.SUCCESS); addToExpectedSequence("fa", Status.SUCCESS); runAndVerify(dag); }
Tests a DAG with one subDag, all successful. <pre> sfb | c subDag: fb a b </pre>
simple_sub_dag_success_case
java
azkaban/azkaban
azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
Apache-2.0
private void createNodeInTestDag(final String name) { this.dagBuilder.createNode(name, this.nodeProcessor); }
Creates a node and add to the test dag.
createNodeInTestDag
java
azkaban/azkaban
azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
Apache-2.0
@Test public void waiting_nodes_are_canceled_when_killed() { final Node aNode = createAndAddNode("a"); aNode.setStatus(Status.RUNNING); final Node bNode = createAndAddNode("b"); bNode.addParent(aNode); this.testFlow.setStatus(Status.RUNNING); this.testFlow.kill(); assertThat(aNode.getStatus()).isEqualTo(Status.KILLING); assertThat(bNode.getStatus()).isEqualTo(Status.CANCELED); assertThat(this.testFlow.getStatus()).isEqualTo(Status.KILLING); }
Tests ready nodes are canceled when the dag is killed.
waiting_nodes_are_canceled_when_killed
java
azkaban/azkaban
azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
Apache-2.0
@Test public void multiple_waiting_nodes_are_canceled_when_killed() { final Node aNode = createAndAddNode("a"); aNode.setStatus(Status.RUNNING); final Node bNode = createAndAddNode("b"); bNode.addParent(aNode); final Node cNode = createAndAddNode("c"); cNode.addParent(aNode); final Node dNode = createAndAddNode("d"); dNode.addParent(cNode); this.testFlow.setStatus(Status.RUNNING); this.testFlow.kill(); assertThat(aNode.getStatus()).isEqualTo(Status.KILLING); assertThat(bNode.getStatus()).isEqualTo(Status.CANCELED); assertThat(dNode.getStatus()).isEqualTo(Status.CANCELED); assertThat(dNode.getStatus()).isEqualTo(Status.CANCELED); assertThat(this.testFlow.getStatus()).isEqualTo(Status.KILLING); }
Tests multiple ready nodes are canceled when the dag is killed. <pre> a (running) / \ b c \ d </pre>
multiple_waiting_nodes_are_canceled_when_killed
java
azkaban/azkaban
azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
Apache-2.0
@Test public void multiple_waiting_children_are_canceled_when_parent_failed() { final Node aNode = createAndAddNode("a"); aNode.setStatus(Status.RUNNING); final Node bNode = createAndAddNode("b"); bNode.addParent(aNode); final Node cNode = createAndAddNode("c"); cNode.addParent(bNode); this.testFlow.setStatus(Status.RUNNING); aNode.markFailed(); assertThat(bNode.getStatus()).isEqualTo(Status.CANCELED); assertThat(cNode.getStatus()).isEqualTo(Status.CANCELED); }
Tests multiple ready nodes are canceled when the parent node failed. <pre> a (running) | b | c </pre>
multiple_waiting_children_are_canceled_when_parent_failed
java
azkaban/azkaban
azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
Apache-2.0