code
stringlengths 23
201k
| docstring
stringlengths 17
96.2k
| func_name
stringlengths 0
235
| language
stringclasses 1
value | repo
stringlengths 8
72
| path
stringlengths 11
317
| url
stringlengths 57
377
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
@Test
public void twoNodesSuccess() throws Exception {
createNodeInTestDag("a");
createNodeInTestDag("b");
this.dagBuilder.addParentNode("b", "a");
addToExpectedSequence("fa", Status.RUNNING);
addToExpectedSequence("a", Status.RUNNING);
addToExpectedSequence("a", Status.SUCCESS);
addToExpectedSequence("b", Status.RUNNING);
addToExpectedSequence("b", Status.SUCCESS);
addToExpectedSequence("fa", Status.SUCCESS);
buildDagRunAndVerify();
}
|
Tests a DAG with two nodes which will run successfully.
a
|
b
|
twoNodesSuccess
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
|
Apache-2.0
|
@Test
public void threeNodesSuccess() throws Exception {
createNodeInTestDag("a");
createNodeInTestDag("b");
createNodeInTestDag("c");
this.dagBuilder.addParentNode("b", "a");
this.dagBuilder.addParentNode("c", "a");
addToExpectedSequence("fa", Status.RUNNING);
addToExpectedSequence("a", Status.RUNNING);
addToExpectedSequence("a", Status.SUCCESS);
addToExpectedSequence("b", Status.RUNNING);
addToExpectedSequence("c", Status.RUNNING);
addToExpectedSequence("b", Status.SUCCESS);
addToExpectedSequence("c", Status.SUCCESS);
addToExpectedSequence("fa", Status.SUCCESS);
buildDagRunAndVerify();
}
|
Tests a DAG with three nodes which will run successfully.
<pre>
a
/ \
b c
</pre>
|
threeNodesSuccess
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
|
Apache-2.0
|
@Test
public void oneNodeFailure() throws Exception {
createNodeInTestDag("a");
this.nodesToFail.add("a");
addToExpectedSequence("fa", Status.RUNNING);
addToExpectedSequence("a", Status.RUNNING);
addToExpectedSequence("a", Status.FAILURE);
addToExpectedSequence("fa", Status.FAILURE);
buildDagRunAndVerify();
}
|
Tests a DAG with one node which will fail.
|
oneNodeFailure
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
|
Apache-2.0
|
@Test
public void twoNodesFailFirst() throws Exception {
createNodeInTestDag("a");
createNodeInTestDag("b");
this.dagBuilder.addParentNode("b", "a");
this.nodesToFail.add("a");
addToExpectedSequence("fa", Status.RUNNING);
addToExpectedSequence("a", Status.RUNNING);
addToExpectedSequence("a", Status.FAILURE);
addToExpectedSequence("b", Status.CANCELED);
addToExpectedSequence("fa", Status.FAILURE);
buildDagRunAndVerify();
}
|
Tests a DAG with two nodes, fails the first one.
Expects the child node to be marked canceled.
a (fail)
|
b
|
twoNodesFailFirst
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
|
Apache-2.0
|
@Test
public void threeNodesFailSecond() throws Exception {
createNodeInTestDag("a");
createNodeInTestDag("b");
createNodeInTestDag("c");
this.dagBuilder.addParentNode("b", "a");
this.dagBuilder.addParentNode("c", "a");
this.nodesToFail.add("b");
addToExpectedSequence("fa", Status.RUNNING);
addToExpectedSequence("a", Status.RUNNING);
addToExpectedSequence("a", Status.SUCCESS);
addToExpectedSequence("b", Status.RUNNING);
addToExpectedSequence("c", Status.RUNNING);
addToExpectedSequence("b", Status.FAILURE);
addToExpectedSequence("c", Status.SUCCESS);
addToExpectedSequence("fa", Status.FAILURE);
buildDagRunAndVerify();
}
|
Tests a DAG with three nodes with one failure.
Expects the sibling nodes to finish.
<pre>
a
/ \
b (fail) c
</pre>
|
threeNodesFailSecond
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
|
Apache-2.0
|
@Test
public void simple_sub_dag_success_case() throws Exception {
final TestSubDagProcessor testSubDagProcessor = new TestSubDagProcessor
(this.dagService, this.statusChangeRecorder);
final DagBuilder subDagBuilder = new DagBuilder("fb", testSubDagProcessor);
subDagBuilder.createNode("a", this.nodeProcessor);
subDagBuilder.createNode("b", this.nodeProcessor);
final Dag bDag = subDagBuilder.build();
final TestSubDagNodeProcessor testSubDagNodeProcessor = new TestSubDagNodeProcessor
(this.dagService, this.statusChangeRecorder, bDag, testSubDagProcessor);
final String SUB_DAG_NAME = "sfb";
this.dagBuilder.createNode(SUB_DAG_NAME, testSubDagNodeProcessor);
createNodeInTestDag("c");
this.dagBuilder.addParentNode("c", SUB_DAG_NAME);
final Dag dag = this.dagBuilder.build();
addToExpectedSequence("fa", Status.RUNNING);
addToExpectedSequence(SUB_DAG_NAME, Status.RUNNING);
addToExpectedSequence("fb", Status.RUNNING);
addToExpectedSequence("a", Status.RUNNING);
addToExpectedSequence("b", Status.RUNNING);
addToExpectedSequence("a", Status.SUCCESS);
addToExpectedSequence("b", Status.SUCCESS);
addToExpectedSequence("fb", Status.SUCCESS);
addToExpectedSequence(SUB_DAG_NAME, Status.SUCCESS);
addToExpectedSequence("c", Status.RUNNING);
addToExpectedSequence("c", Status.SUCCESS);
addToExpectedSequence("fa", Status.SUCCESS);
runAndVerify(dag);
}
|
Tests a DAG with one subDag, all successful.
<pre>
sfb
|
c
subDag: fb
a b
</pre>
|
simple_sub_dag_success_case
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
|
Apache-2.0
|
private void createNodeInTestDag(final String name) {
this.dagBuilder.createNode(name, this.nodeProcessor);
}
|
Creates a node and add to the test dag.
|
createNodeInTestDag
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagServiceTest.java
|
Apache-2.0
|
@Test
public void dag_finish_with_only_disabled_nodes() {
final Node aNode = createAndAddNode("a");
aNode.setStatus(Status.DISABLED);
this.testFlow.start();
assertThat(aNode.getStatus()).isEqualTo(Status.DISABLED);
assertThat(this.testFlow.getStatus()).isEqualTo(Status.SUCCESS);
}
|
Tests the dag state ( including its nodes' states) transitions.
Focuses on how the dag state changes in response to one external request.
|
dag_finish_with_only_disabled_nodes
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
Apache-2.0
|
@Test
public void running_nodes_can_be_killed() {
final Node aNode = createAndAddNode("a");
aNode.setStatus(Status.RUNNING);
this.testFlow.setStatus(Status.RUNNING);
this.testFlow.kill();
assertThat(aNode.getStatus()).isEqualTo(Status.KILLING);
assertThat(this.testFlow.getStatus()).isEqualTo(Status.KILLING);
}
|
Tests the dag state ( including its nodes' states) transitions.
Focuses on how the dag state changes in response to one external request.
|
running_nodes_can_be_killed
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
Apache-2.0
|
@Test
public void kill_node_in_terminal_state_should_have_no_effect() {
for (final Status status : Status.TERMINAL_STATES) {
kill_dag_in_this_state_should_have_no_effect(status);
}
}
|
Tests the dag state ( including its nodes' states) transitions.
Focuses on how the dag state changes in response to one external request.
|
kill_node_in_terminal_state_should_have_no_effect
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
Apache-2.0
|
@Test
public void kill_node_in_killing_state_should_have_no_effect() {
kill_dag_in_this_state_should_have_no_effect(Status.KILLING);
}
|
Tests the dag state ( including its nodes' states) transitions.
Focuses on how the dag state changes in response to one external request.
|
kill_node_in_killing_state_should_have_no_effect
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
Apache-2.0
|
private void kill_dag_in_this_state_should_have_no_effect(final Status status) {
// given
this.testFlow.setStatus(status);
// when
this.testFlow.kill();
// then
assertThat(this.testFlow.getStatus()).isEqualTo(status);
verify(this.mockDagProcessor, never()).changeStatus(any(), any());
}
|
Tests the dag state ( including its nodes' states) transitions.
Focuses on how the dag state changes in response to one external request.
|
kill_dag_in_this_state_should_have_no_effect
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
Apache-2.0
|
@Test
public void waiting_nodes_are_canceled_when_killed() {
final Node aNode = createAndAddNode("a");
aNode.setStatus(Status.RUNNING);
final Node bNode = createAndAddNode("b");
bNode.addParent(aNode);
this.testFlow.setStatus(Status.RUNNING);
this.testFlow.kill();
assertThat(aNode.getStatus()).isEqualTo(Status.KILLING);
assertThat(bNode.getStatus()).isEqualTo(Status.CANCELED);
assertThat(this.testFlow.getStatus()).isEqualTo(Status.KILLING);
}
|
Tests ready nodes are canceled when the dag is killed.
|
waiting_nodes_are_canceled_when_killed
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
Apache-2.0
|
@Test
public void multiple_waiting_nodes_are_canceled_when_killed() {
final Node aNode = createAndAddNode("a");
aNode.setStatus(Status.RUNNING);
final Node bNode = createAndAddNode("b");
bNode.addParent(aNode);
final Node cNode = createAndAddNode("c");
cNode.addParent(aNode);
final Node dNode = createAndAddNode("d");
dNode.addParent(cNode);
this.testFlow.setStatus(Status.RUNNING);
this.testFlow.kill();
assertThat(aNode.getStatus()).isEqualTo(Status.KILLING);
assertThat(bNode.getStatus()).isEqualTo(Status.CANCELED);
assertThat(dNode.getStatus()).isEqualTo(Status.CANCELED);
assertThat(dNode.getStatus()).isEqualTo(Status.CANCELED);
assertThat(this.testFlow.getStatus()).isEqualTo(Status.KILLING);
}
|
Tests multiple ready nodes are canceled when the dag is killed.
<pre>
a (running)
/ \
b c
\
d
</pre>
|
multiple_waiting_nodes_are_canceled_when_killed
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
Apache-2.0
|
@Test
public void multiple_waiting_children_are_canceled_when_parent_failed() {
final Node aNode = createAndAddNode("a");
aNode.setStatus(Status.RUNNING);
final Node bNode = createAndAddNode("b");
bNode.addParent(aNode);
final Node cNode = createAndAddNode("c");
cNode.addParent(bNode);
this.testFlow.setStatus(Status.RUNNING);
aNode.markFailed();
assertThat(bNode.getStatus()).isEqualTo(Status.CANCELED);
assertThat(cNode.getStatus()).isEqualTo(Status.CANCELED);
}
|
Tests multiple ready nodes are canceled when the parent node failed.
<pre>
a (running)
|
b
|
c
</pre>
|
multiple_waiting_children_are_canceled_when_parent_failed
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
Apache-2.0
|
@Test
public void blocked_nodes_are_canceled_when_killed() {
final Node aNode = createAndAddNode("a");
aNode.setStatus(Status.RUNNING);
final Node bNode = createAndAddNode("b");
bNode.addParent(aNode);
bNode.setStatus(Status.BLOCKED);
this.testFlow.setStatus(Status.RUNNING);
this.testFlow.kill();
assertThat(aNode.getStatus()).isEqualTo(Status.KILLING);
assertThat(bNode.getStatus()).isEqualTo(Status.CANCELED);
}
|
Tests blocked nodes are canceled when the dag is killed.
|
blocked_nodes_are_canceled_when_killed
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
Apache-2.0
|
@Test
public void success_node_state_remain_the_same_when_killed() {
final Node aNode = createAndAddNode("a");
aNode.setStatus(Status.SUCCESS);
final Node bNode = createAndAddNode("b");
bNode.setStatus(Status.RUNNING);
bNode.addParent(aNode);
this.testFlow.kill();
assertThat(aNode.getStatus()).isEqualTo(Status.SUCCESS);
assertThat(bNode.getStatus()).isEqualTo(Status.KILLING);
}
|
Tests success nodes' states remain the same when the dag is killed.
<pre>
a (success)
/
b (running)
</pre>
|
success_node_state_remain_the_same_when_killed
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
Apache-2.0
|
@Test
public void failed_node_state_remain_the_same_when_killed() {
final Node aNode = createAndAddNode("a");
aNode.setStatus(Status.RUNNING);
final Node bNode = createAndAddNode("b");
bNode.setStatus(Status.FAILURE);
this.testFlow.kill();
assertThat(aNode.getStatus()).isEqualTo(Status.KILLING);
assertThat(bNode.getStatus()).isEqualTo(Status.FAILURE);
}
|
Tests failed nodes' states remain the same when the dag is killed.
This can happen when running jobs are allowed to finish when a node fails.
<pre>
a (running) b (failure)
</pre>
|
failed_node_state_remain_the_same_when_killed
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
Apache-2.0
|
private Node createAndAddNode(final String name) {
final Node node = TestUtil.createNodeWithNullProcessor(name, this.testFlow);
this.testFlow.addNode(node);
return node;
}
|
Creates a node and add to the test dag.
@param name node name
@return Node object
|
createAndAddNode
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/DagTest.java
|
Apache-2.0
|
void recordNode(final Node node) {
this.sequence.add(new Pair<>(node.getName(), node.getStatus()));
}
|
Records the sequence of nodes and dag status change.
|
recordNode
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/StatusChangeRecorder.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/StatusChangeRecorder.java
|
Apache-2.0
|
void recordDag(final Dag dag) {
this.sequence.add(new Pair<>(dag.getName(), dag.getStatus()));
}
|
Records the sequence of nodes and dag status change.
|
recordDag
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/StatusChangeRecorder.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/StatusChangeRecorder.java
|
Apache-2.0
|
private void assertMapSizeMatchEnumSize(
final Map<Status, Boolean> map) {
final int mapSize = map.size();
final int enumSize = Status.values().length;
assertThat(enumSize).isEqualTo(mapSize);
}
|
Asserts the given map contains the same number of entries as the number of values of the {@link
Status} has.
@param map a map that contains status and its associated boolean value
|
assertMapSizeMatchEnumSize
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/StatusTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/StatusTest.java
|
Apache-2.0
|
@Override
public void changeStatus(final Node node, final Status status) {
System.out.println(node);
this.statusChangeRecorder.recordNode(node);
switch (node.getStatus()) {
case RUNNING:
// Don't mark the job finished. Simulate a long running job.
this.nodeRunningLatch.countDown();
break;
case KILLING:
this.dagService.markNodeKilled(node);
break;
default:
break;
}
}
|
A node processor that tests killing a node.
@param nodeRunningLatch signal that the node has started running
|
changeStatus
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/TestKillNodeProcessor.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/TestKillNodeProcessor.java
|
Apache-2.0
|
@Override
public void changeStatus(final Node node, final Status status) {
System.out.println(node);
this.statusChangeRecorder.recordNode(node);
switch (status) {
case RUNNING:
//Discover the node in the parent Dag this subDag is associated with.
//This allows the subtag processor to inform the parent dag the status change.
this.testSubDagProcessor.setNode(node);
this.dagService.startDag(this.dag);
break;
case KILLING:
this.dagService.killDag(this.dag);
break;
default:
break;
}
}
|
Triggers the sub DAG state change when the sub DAG node in the parent DAG's status changes.
@param node the node to change
@param status the new status
|
changeStatus
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/TestSubDagNodeProcessor.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/TestSubDagNodeProcessor.java
|
Apache-2.0
|
@Override
public void changeStatus(final Dag dag, final Status status) {
System.out.println(dag);
this.statusChangeRecorder.recordDag(dag);
requireNonNull(this.node, "Node for the subDag in the parent DAG can't be null.");
switch (status) {
case SUCCESS:
this.dagService.markNodeSuccess(this.node);
break;
case FAILURE:
this.dagService.markNodeFailed(this.node);
break;
default:
break;
}
}
|
Transfers the node state in the parent DAG when the sub DAG status changes.
@param dag the dag to change
@param status the new status
|
changeStatus
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/TestSubDagProcessor.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/TestSubDagProcessor.java
|
Apache-2.0
|
public void setNode(final Node node) {
this.node = node;
}
|
Sets the node that this subflow belongs.
<p>
Can't pass this information in the constructor since it will cause a circular dependency
problem.
@param node the node as part of the parent flow
|
setNode
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/TestSubDagProcessor.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/TestSubDagProcessor.java
|
Apache-2.0
|
static Node createNodeWithNullProcessor(final String name, final Dag dag) {
return new Node(name, mock(NodeProcessor.class), dag);
}
|
Creates a node with a processor that does nothing.
@param name node name
|
createNodeWithNullProcessor
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/dag/TestUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/dag/TestUtil.java
|
Apache-2.0
|
@Ignore
@Test
public void runFlowOnArbitraryCondition() throws Exception {
final HashMap<String, String> flowProps = new HashMap<>();
setUp(CONDITIONAL_FLOW_7, flowProps);
final ExecutableFlow flow = this.runner.getExecutableFlow();
assertStatus(flow, "jobA", Status.SUCCEEDED);
assertStatus(flow, "jobB", Status.CANCELLED);
assertFlowStatus(flow, Status.KILLED);
// The arbitrary code should be restricted from creating a new file.
final File file = new File("new.txt");
Assert.assertFalse(file.exists());
}
|
JobB has defined "condition: var fImport = new JavaImporter(java.io.File); with(fImport) { var
f = new File('new'); f.createNewFile(); }"
Null ProtectionDomain will restrict this arbitrary code from creating a new file.
However it will not kick in when the change for condition whitelisting is implemented.
As a result, this test case will be ignored.
@throws Exception the exception
|
runFlowOnArbitraryCondition
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerConditionalFlowTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerConditionalFlowTest.java
|
Apache-2.0
|
private void setUp(final String flowName, final HashMap<String, String> flowProps)
throws Exception {
final String flowYamlFile = flowName + ".flow";
when(this.testUtil.getProjectLoader()
.getLatestFlowVersion(this.project.getId(), this.project.getVersion(), flowYamlFile))
.thenReturn(1);
when(this.testUtil.getProjectLoader()
.getUploadedFlowFile(eq(this.project.getId()), eq(this.project.getVersion()),
eq(flowYamlFile),
eq(1), any(File.class)))
.thenReturn(ExecutionsTestUtil.getFlowFile(FLOW_YAML_DIR, flowYamlFile));
this.runner = this.testUtil.createFromFlowMap(flowName, flowProps);
FlowRunnerTestUtil.startThread(this.runner);
}
|
JobB has defined "condition: var fImport = new JavaImporter(java.io.File); with(fImport) { var
f = new File('new'); f.createNewFile(); }"
Null ProtectionDomain will restrict this arbitrary code from creating a new file.
However it will not kick in when the change for condition whitelisting is implemented.
As a result, this test case will be ignored.
@throws Exception the exception
|
setUp
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerConditionalFlowTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerConditionalFlowTest.java
|
Apache-2.0
|
@Ignore
@Test
public void runFlowOnArbitraryCondition() throws Exception {
final HashMap<String, String> flowProps = new HashMap<>();
setUp(CONDITIONAL_FLOW_7, flowProps, "jobD");
final ExecutableFlow flow = this.runner.getExecutableFlow();
assertStatus(flow, "jobA", Status.SUCCEEDED);
assertStatus(flow, "jobB", Status.CANCELLED);
assertFlowStatus(flow, Status.KILLED);
// The arbitrary code should be restricted from creating a new file.
final File file = new File("new.txt");
Assert.assertFalse(file.exists());
}
|
JobB has defined "condition: var fImport = new JavaImporter(java.io.File); with(fImport) { var
f = new File('new'); f.createNewFile(); }"
Null ProtectionDomain will restrict this arbitrary code from creating a new file.
However it will not kick in when the change for condition allow-listing is implemented.
As a result, this test case will be ignored.
@throws Exception the exception
|
runFlowOnArbitraryCondition
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerConditionalJobsTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerConditionalJobsTest.java
|
Apache-2.0
|
private void setUp(final String flowName, final HashMap<String, String> flowProps, String rootJob)
throws Exception {
String dir = FLOW_DIR + "/" + flowName;
this.testUtil = new FlowRunnerTestUtil(dir, this.temporaryFolder);
if (System.getSecurityManager() == null) {
Policy.setPolicy(new Policy() {
@Override
public boolean implies(final ProtectionDomain domain, final Permission permission) {
return true; // allow all
}
});
System.setSecurityManager(new SecurityManager());
}
this.project = this.testUtil.getProject();
for (String job : Arrays.asList("jobA", "jobB", "jobC", "jobD")) {
final String jobFile = job + ".job";
when(this.testUtil.getProjectLoader()
.getLatestFlowVersion(
this.project.getId(),
this.project.getVersion(),
jobFile
))
.thenReturn(1);
when(this.testUtil.getProjectLoader()
.getUploadedFlowFile(
eq(this.project.getId()),
eq(this.project.getVersion()),
eq(jobFile),
eq(1),
any(File.class)
))
.thenReturn(ExecutionsTestUtil.getFlowFile(dir, jobFile));
}
this.runner = this.testUtil.createFromFlowMap(rootJob, flowName);
FlowRunnerTestUtil.startThread(this.runner);
}
|
JobB has defined "condition: var fImport = new JavaImporter(java.io.File); with(fImport) { var
f = new File('new'); f.createNewFile(); }"
Null ProtectionDomain will restrict this arbitrary code from creating a new file.
However it will not kick in when the change for condition allow-listing is implemented.
As a result, this test case will be ignored.
@throws Exception the exception
|
setUp
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerConditionalJobsTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerConditionalJobsTest.java
|
Apache-2.0
|
@Override
public boolean implies(final ProtectionDomain domain, final Permission permission) {
return true; // allow all
}
|
JobB has defined "condition: var fImport = new JavaImporter(java.io.File); with(fImport) { var
f = new File('new'); f.createNewFile(); }"
Null ProtectionDomain will restrict this arbitrary code from creating a new file.
However it will not kick in when the change for condition allow-listing is implemented.
As a result, this test case will be ignored.
@throws Exception the exception
|
implies
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerConditionalJobsTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerConditionalJobsTest.java
|
Apache-2.0
|
@Before
public void setUp() throws Exception {
this.testUtil = new FlowRunnerTestUtil("embedded2", this.temporaryFolder);
}
|
Flows in this test: joba jobb joba1 jobc->joba jobd->joba jobe->jobb,jobc,jobd jobf->jobe,joba1
<p>
jobb = innerFlow innerJobA innerJobB->innerJobA innerJobC->innerJobB
innerFlow->innerJobB,innerJobC
<p>
jobd=innerFlow2 innerFlow2->innerJobA
@author rpark
|
setUp
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPipelineTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPipelineTest.java
|
Apache-2.0
|
@Test
public void testBasicPipelineLevel1RunDisabledJobs() throws Exception {
final FlowRunner previousRunner = this.testUtil.createFromFlowMap("jobf", "prev");
final ExecutionOptions options = new ExecutionOptions();
options.setPipelineExecutionId(previousRunner.getExecutableFlow()
.getExecutionId());
options.setPipelineLevel(1);
final FlowWatcher watcher = new LocalFlowWatcher(previousRunner);
final FlowRunner pipelineRunner = this.testUtil.createFromFlowMap("jobf", "pipe", options);
pipelineRunner.setFlowWatcher(watcher);
// 1. START FLOW
final ExecutableFlow pipelineFlow = pipelineRunner.getExecutableFlow();
final ExecutableFlow previousFlow = previousRunner.getExecutableFlow();
// disable the innerFlow (entire sub-flow)
previousFlow.getExecutableNodePath("jobb").setStatus(Status.DISABLED);
FlowRunnerTestUtil.startThread(previousRunner);
assertStatus(previousFlow, "joba", Status.RUNNING);
assertStatus(previousFlow, "joba", Status.RUNNING);
assertStatus(previousFlow, "joba1", Status.RUNNING);
FlowRunnerTestUtil.startThread(pipelineRunner);
assertStatus(pipelineFlow, "joba", Status.QUEUED);
assertStatus(pipelineFlow, "joba1", Status.QUEUED);
InteractiveTestJob.getTestJob("prev:joba").succeedJob();
assertStatus(previousFlow, "joba", Status.SUCCEEDED);
assertStatus(previousFlow, "jobb", Status.SKIPPED);
assertStatus(previousFlow, "jobb:innerJobA", Status.READY);
assertStatus(previousFlow, "jobd", Status.RUNNING);
assertStatus(previousFlow, "jobc", Status.RUNNING);
assertStatus(previousFlow, "jobd:innerJobA", Status.RUNNING);
assertStatus(pipelineFlow, "joba", Status.RUNNING);
assertStatus(previousFlow, "jobb:innerJobA", Status.READY);
assertStatus(previousFlow, "jobb:innerJobB", Status.READY);
assertStatus(previousFlow, "jobb:innerJobC", Status.READY);
InteractiveTestJob.getTestJob("pipe:joba").succeedJob();
assertStatus(pipelineFlow, "joba", Status.SUCCEEDED);
assertStatus(pipelineFlow, "jobb", Status.RUNNING);
assertStatus(pipelineFlow, "jobd", Status.RUNNING);
assertStatus(pipelineFlow, "jobc", Status.QUEUED);
assertStatus(pipelineFlow, "jobd:innerJobA", Status.QUEUED);
assertStatus(pipelineFlow, "jobb:innerJobA", Status.RUNNING);
InteractiveTestJob.getTestJob("prev:jobd:innerJobA").succeedJob();
assertStatus(previousFlow, "jobd:innerJobA", Status.SUCCEEDED);
assertStatus(previousFlow, "jobd:innerFlow2", Status.RUNNING);
assertStatus(pipelineFlow, "jobd:innerJobA", Status.RUNNING);
// Finish the previous d side
InteractiveTestJob.getTestJob("prev:jobd:innerFlow2").succeedJob();
assertStatus(previousFlow, "jobd:innerFlow2", Status.SUCCEEDED);
assertStatus(previousFlow, "jobd", Status.SUCCEEDED);
InteractiveTestJob.getTestJob("pipe:jobb:innerJobA").succeedJob();
InteractiveTestJob.getTestJob("prev:jobc").succeedJob();
assertStatus(previousFlow, "jobb:innerJobB", Status.READY);
assertStatus(previousFlow, "jobb:innerJobC", Status.READY);
assertStatus(previousFlow, "jobb:innerFlow", Status.READY);
assertStatus(previousFlow, "jobc", Status.SUCCEEDED);
assertStatus(pipelineFlow, "jobb:innerJobA", Status.SUCCEEDED);
assertStatus(pipelineFlow, "jobc", Status.RUNNING);
assertStatus(pipelineFlow, "jobb:innerJobB", Status.RUNNING);
assertStatus(pipelineFlow, "jobb:innerJobC", Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:jobc").succeedJob();
assertStatus(previousFlow, "jobb:innerFlow", Status.READY);
assertStatus(previousFlow, "jobb", Status.SKIPPED);
assertStatus(previousFlow, "jobe", Status.RUNNING);
assertStatus(pipelineFlow, "jobc", Status.SUCCEEDED);
InteractiveTestJob.getTestJob("pipe:jobb:innerJobB").succeedJob();
InteractiveTestJob.getTestJob("pipe:jobb:innerJobC").succeedJob();
InteractiveTestJob.getTestJob("prev:jobe").succeedJob();
assertStatus(previousFlow, "jobe", Status.SUCCEEDED);
assertStatus(pipelineFlow, "jobb:innerJobB", Status.SUCCEEDED);
assertStatus(pipelineFlow, "jobb:innerJobC", Status.SUCCEEDED);
assertStatus(pipelineFlow, "jobb:innerFlow", Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:jobd:innerJobA").succeedJob();
InteractiveTestJob.getTestJob("pipe:jobb:innerFlow").succeedJob();
assertStatus(pipelineFlow, "jobb", Status.SUCCEEDED);
assertStatus(pipelineFlow, "jobd:innerJobA", Status.SUCCEEDED);
assertStatus(pipelineFlow, "jobb:innerFlow", Status.SUCCEEDED);
assertStatus(pipelineFlow, "jobd:innerFlow2", Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:jobd:innerFlow2").succeedJob();
InteractiveTestJob.getTestJob("prev:joba1").succeedJob();
assertStatus(pipelineFlow, "jobd:innerFlow2", Status.SUCCEEDED);
assertStatus(pipelineFlow, "jobd", Status.SUCCEEDED);
assertStatus(previousFlow, "jobf", Status.RUNNING);
assertStatus(previousFlow, "joba1", Status.SUCCEEDED);
assertStatus(pipelineFlow, "joba1", Status.RUNNING);
assertStatus(pipelineFlow, "jobe", Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:jobe").succeedJob();
InteractiveTestJob.getTestJob("prev:jobf").succeedJob();
assertStatus(pipelineFlow, "jobe", Status.SUCCEEDED);
assertStatus(previousFlow, "jobf", Status.SUCCEEDED);
assertFlowStatus(previousFlow, Status.SUCCEEDED);
InteractiveTestJob.getTestJob("pipe:joba1").succeedJob();
assertStatus(pipelineFlow, "joba1", Status.SUCCEEDED);
assertStatus(pipelineFlow, "jobf", Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:jobf").succeedJob();
assertThreadShutDown(previousRunner);
assertThreadShutDown(pipelineRunner);
assertFlowStatus(pipelineFlow, Status.SUCCEEDED);
}
|
Flows in this test: joba jobb joba1 jobc->joba jobd->joba jobe->jobb,jobc,jobd jobf->jobe,joba1
<p>
jobb = innerFlow innerJobA innerJobB->innerJobA innerJobC->innerJobB
innerFlow->innerJobB,innerJobC
<p>
jobd=innerFlow2 innerFlow2->innerJobA
@author rpark
|
testBasicPipelineLevel1RunDisabledJobs
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPipelineTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPipelineTest.java
|
Apache-2.0
|
@Test
public void testBasicPipelineLevel1Run() throws Exception {
final FlowRunner previousRunner = this.testUtil.createFromFlowMap("jobf", "prev");
final ExecutionOptions options = new ExecutionOptions();
options.setPipelineExecutionId(previousRunner.getExecutableFlow()
.getExecutionId());
options.setPipelineLevel(1);
final FlowWatcher watcher = new LocalFlowWatcher(previousRunner);
final FlowRunner pipelineRunner = this.testUtil.createFromFlowMap("jobf", "pipe", options);
pipelineRunner.setFlowWatcher(watcher);
// 1. START FLOW
final ExecutableFlow pipelineFlow = pipelineRunner.getExecutableFlow();
final ExecutableFlow previousFlow = previousRunner.getExecutableFlow();
FlowRunnerTestUtil.startThread(previousRunner);
assertStatus(previousFlow, "joba", Status.RUNNING);
assertStatus(previousFlow, "joba", Status.RUNNING);
assertStatus(previousFlow, "joba1", Status.RUNNING);
FlowRunnerTestUtil.startThread(pipelineRunner);
assertStatus(pipelineFlow, "joba", Status.QUEUED);
assertStatus(pipelineFlow, "joba1", Status.QUEUED);
InteractiveTestJob.getTestJob("prev:joba").succeedJob();
assertStatus(previousFlow, "joba", Status.SUCCEEDED);
assertStatus(previousFlow, "jobb", Status.RUNNING);
assertStatus(previousFlow, "jobb:innerJobA", Status.RUNNING);
assertStatus(previousFlow, "jobd", Status.RUNNING);
assertStatus(previousFlow, "jobc", Status.RUNNING);
assertStatus(previousFlow, "jobd:innerJobA", Status.RUNNING);
assertStatus(pipelineFlow, "joba", Status.RUNNING);
InteractiveTestJob.getTestJob("prev:jobb:innerJobA").succeedJob();
assertStatus(previousFlow, "jobb:innerJobA", Status.SUCCEEDED);
assertStatus(previousFlow, "jobb:innerJobB", Status.RUNNING);
assertStatus(previousFlow, "jobb:innerJobC", Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:joba").succeedJob();
assertStatus(pipelineFlow, "joba", Status.SUCCEEDED);
assertStatus(pipelineFlow, "jobb", Status.RUNNING);
assertStatus(pipelineFlow, "jobd", Status.RUNNING);
assertStatus(pipelineFlow, "jobc", Status.QUEUED);
assertStatus(pipelineFlow, "jobd:innerJobA", Status.QUEUED);
assertStatus(pipelineFlow, "jobb:innerJobA", Status.RUNNING);
InteractiveTestJob.getTestJob("prev:jobd:innerJobA").succeedJob();
assertStatus(previousFlow, "jobd:innerJobA", Status.SUCCEEDED);
assertStatus(previousFlow, "jobd:innerFlow2", Status.RUNNING);
assertStatus(pipelineFlow, "jobd:innerJobA", Status.RUNNING);
// Finish the previous d side
InteractiveTestJob.getTestJob("prev:jobd:innerFlow2").succeedJob();
assertStatus(previousFlow, "jobd:innerFlow2", Status.SUCCEEDED);
assertStatus(previousFlow, "jobd", Status.SUCCEEDED);
InteractiveTestJob.getTestJob("prev:jobb:innerJobB").succeedJob();
InteractiveTestJob.getTestJob("prev:jobb:innerJobC").succeedJob();
InteractiveTestJob.getTestJob("prev:jobc").succeedJob();
InteractiveTestJob.getTestJob("pipe:jobb:innerJobA").succeedJob();
assertStatus(previousFlow, "jobb:innerJobB", Status.SUCCEEDED);
assertStatus(previousFlow, "jobb:innerJobC", Status.SUCCEEDED);
assertStatus(previousFlow, "jobb:innerFlow", Status.RUNNING);
assertStatus(previousFlow, "jobc", Status.SUCCEEDED);
assertStatus(pipelineFlow, "jobb:innerJobA", Status.SUCCEEDED);
assertStatus(pipelineFlow, "jobc", Status.RUNNING);
assertStatus(pipelineFlow, "jobb:innerJobB", Status.RUNNING);
assertStatus(pipelineFlow, "jobb:innerJobC", Status.RUNNING);
InteractiveTestJob.getTestJob("prev:jobb:innerFlow").succeedJob();
InteractiveTestJob.getTestJob("pipe:jobc").succeedJob();
assertStatus(previousFlow, "jobb:innerFlow", Status.SUCCEEDED);
assertStatus(previousFlow, "jobb", Status.SUCCEEDED);
assertStatus(previousFlow, "jobe", Status.RUNNING);
assertStatus(pipelineFlow, "jobc", Status.SUCCEEDED);
InteractiveTestJob.getTestJob("pipe:jobb:innerJobB").succeedJob();
InteractiveTestJob.getTestJob("pipe:jobb:innerJobC").succeedJob();
InteractiveTestJob.getTestJob("prev:jobe").succeedJob();
assertStatus(previousFlow, "jobe", Status.SUCCEEDED);
assertStatus(pipelineFlow, "jobb:innerJobB", Status.SUCCEEDED);
assertStatus(pipelineFlow, "jobb:innerJobC", Status.SUCCEEDED);
assertStatus(pipelineFlow, "jobb:innerFlow", Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:jobd:innerJobA").succeedJob();
InteractiveTestJob.getTestJob("pipe:jobb:innerFlow").succeedJob();
assertStatus(pipelineFlow, "jobb", Status.SUCCEEDED);
assertStatus(pipelineFlow, "jobd:innerJobA", Status.SUCCEEDED);
assertStatus(pipelineFlow, "jobb:innerFlow", Status.SUCCEEDED);
assertStatus(pipelineFlow, "jobd:innerFlow2", Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:jobd:innerFlow2").succeedJob();
InteractiveTestJob.getTestJob("prev:joba1").succeedJob();
assertStatus(pipelineFlow, "jobd:innerFlow2", Status.SUCCEEDED);
assertStatus(pipelineFlow, "jobd", Status.SUCCEEDED);
assertStatus(previousFlow, "jobf", Status.RUNNING);
assertStatus(previousFlow, "joba1", Status.SUCCEEDED);
assertStatus(pipelineFlow, "joba1", Status.RUNNING);
assertStatus(pipelineFlow, "jobe", Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:jobe").succeedJob();
InteractiveTestJob.getTestJob("prev:jobf").succeedJob();
assertStatus(pipelineFlow, "jobe", Status.SUCCEEDED);
assertStatus(previousFlow, "jobf", Status.SUCCEEDED);
assertFlowStatus(previousFlow, Status.SUCCEEDED);
InteractiveTestJob.getTestJob("pipe:joba1").succeedJob();
assertStatus(pipelineFlow, "joba1", Status.SUCCEEDED);
assertStatus(pipelineFlow, "jobf", Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:jobf").succeedJob();
assertThreadShutDown(previousRunner);
assertThreadShutDown(pipelineRunner);
assertFlowStatus(pipelineFlow, Status.SUCCEEDED);
}
|
Flows in this test: joba jobb joba1 jobc->joba jobd->joba jobe->jobb,jobc,jobd jobf->jobe,joba1
<p>
jobb = innerFlow innerJobA innerJobB->innerJobA innerJobC->innerJobB
innerFlow->innerJobB,innerJobC
<p>
jobd=innerFlow2 innerFlow2->innerJobA
@author rpark
|
testBasicPipelineLevel1Run
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPipelineTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPipelineTest.java
|
Apache-2.0
|
@Test
public void testBasicPipelineLevel2Run() throws Exception {
final FlowRunner previousRunner = this.testUtil.createFromFlowMap("pipelineFlow", "prev");
final ExecutionOptions options = new ExecutionOptions();
options.setPipelineExecutionId(previousRunner.getExecutableFlow()
.getExecutionId());
options.setPipelineLevel(2);
final FlowWatcher watcher = new LocalFlowWatcher(previousRunner);
final FlowRunner pipelineRunner = this.testUtil
.createFromFlowMap("pipelineFlow", "pipe", options);
pipelineRunner.setFlowWatcher(watcher);
// 1. START FLOW
final ExecutableFlow pipelineFlow = pipelineRunner.getExecutableFlow();
final ExecutableFlow previousFlow = previousRunner.getExecutableFlow();
FlowRunnerTestUtil.startThread(previousRunner);
assertStatus(previousFlow, "pipeline1", Status.RUNNING);
FlowRunnerTestUtil.startThread(pipelineRunner);
assertStatus(pipelineFlow, "pipeline1", Status.QUEUED);
InteractiveTestJob.getTestJob("prev:pipeline1").succeedJob();
assertStatus(previousFlow, "pipeline1", Status.SUCCEEDED);
assertStatus(previousFlow, "pipeline2", Status.RUNNING);
InteractiveTestJob.getTestJob("prev:pipeline2").succeedJob();
assertStatus(previousFlow, "pipeline2", Status.SUCCEEDED);
assertStatus(previousFlow, "pipelineEmbeddedFlow3", Status.RUNNING);
assertStatus(previousFlow, "pipelineEmbeddedFlow3:innerJobA",
Status.RUNNING);
assertStatus(pipelineFlow, "pipeline1", Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:pipeline1").succeedJob();
assertStatus(pipelineFlow, "pipeline1", Status.SUCCEEDED);
assertStatus(pipelineFlow, "pipeline2", Status.QUEUED);
InteractiveTestJob.getTestJob("prev:pipelineEmbeddedFlow3:innerJobA")
.succeedJob();
InteractiveTestJob.getTestJob("prev:pipelineEmbeddedFlow4:innerJobA").succeedJob();
assertStatus(previousFlow, "pipelineEmbeddedFlow3:innerJobA",
Status.SUCCEEDED);
assertStatus(previousFlow, "pipelineEmbeddedFlow3:innerJobB",
Status.RUNNING);
assertStatus(previousFlow, "pipelineEmbeddedFlow3:innerJobC",
Status.RUNNING);
assertStatus(previousFlow, "pipelineEmbeddedFlow4:innerJobA", Status.SUCCEEDED);
assertStatus(pipelineFlow, "pipeline2", Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:pipeline2").succeedJob();
assertStatus(pipelineFlow, "pipeline2", Status.SUCCEEDED);
assertStatus(pipelineFlow, "pipelineEmbeddedFlow3", Status.RUNNING);
assertStatus(pipelineFlow, "pipelineEmbeddedFlow3:innerJobA",
Status.QUEUED);
InteractiveTestJob.getTestJob("prev:pipelineEmbeddedFlow3:innerJobB")
.succeedJob();
assertStatus(previousFlow, "pipelineEmbeddedFlow3:innerJobB",
Status.SUCCEEDED);
InteractiveTestJob.getTestJob("prev:pipelineEmbeddedFlow3:innerJobC")
.succeedJob();
assertStatus(previousFlow, "pipelineEmbeddedFlow3:innerFlow",
Status.RUNNING);
assertStatus(previousFlow, "pipelineEmbeddedFlow3:innerJobC",
Status.SUCCEEDED);
assertStatus(pipelineFlow, "pipelineEmbeddedFlow3:innerJobA",
Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:pipelineEmbeddedFlow3:innerJobA")
.succeedJob();
assertStatus(pipelineFlow, "pipelineEmbeddedFlow3:innerJobA",
Status.SUCCEEDED);
assertStatus(pipelineFlow, "pipelineEmbeddedFlow3:innerJobC",
Status.QUEUED);
assertStatus(pipelineFlow, "pipelineEmbeddedFlow3:innerJobB",
Status.QUEUED);
assertStatus(previousFlow, "pipelineEmbeddedFlow3:innerFlow",
Status.RUNNING);
InteractiveTestJob.getTestJob("prev:pipelineEmbeddedFlow3:innerFlow")
.succeedJob();
assertStatus(previousFlow, "pipelineEmbeddedFlow3:innerFlow",
Status.SUCCEEDED);
assertStatus(previousFlow, "pipelineEmbeddedFlow3", Status.SUCCEEDED);
InteractiveTestJob.getTestJob("prev:pipelineEmbeddedFlow4:innerFlow2").succeedJob();
assertStatus(previousFlow, "pipeline4", Status.RUNNING);
assertStatus(pipelineFlow, "pipelineEmbeddedFlow3:innerJobC",
Status.RUNNING);
assertStatus(pipelineFlow, "pipelineEmbeddedFlow3:innerJobB",
Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:pipelineEmbeddedFlow3:innerJobB")
.succeedJob();
InteractiveTestJob.getTestJob("pipe:pipelineEmbeddedFlow3:innerJobC")
.succeedJob();
assertStatus(pipelineFlow, "pipelineEmbeddedFlow3:innerJobC",
Status.SUCCEEDED);
assertStatus(pipelineFlow, "pipelineEmbeddedFlow3:innerJobB",
Status.SUCCEEDED);
assertStatus(pipelineFlow, "pipelineEmbeddedFlow3:innerFlow",
Status.QUEUED);
InteractiveTestJob.getTestJob("prev:pipeline4").succeedJob();
assertStatus(previousFlow, "pipeline4", Status.SUCCEEDED);
assertStatus(previousFlow, "pipelineFlow", Status.RUNNING);
assertStatus(pipelineFlow, "pipelineEmbeddedFlow3:innerFlow",
Status.RUNNING);
InteractiveTestJob.getTestJob("prev:pipelineFlow").succeedJob();
assertStatus(previousFlow, "pipelineFlow", Status.SUCCEEDED);
assertFlowStatus(previousFlow, Status.SUCCEEDED);
assertThreadShutDown(previousRunner);
InteractiveTestJob.getTestJob("pipe:pipelineEmbeddedFlow3:innerFlow")
.succeedJob();
assertStatus(pipelineFlow, "pipelineEmbeddedFlow3:innerFlow",
Status.SUCCEEDED);
assertStatus(pipelineFlow, "pipelineEmbeddedFlow3", Status.SUCCEEDED);
InteractiveTestJob.getTestJob("pipe:pipelineEmbeddedFlow4:innerJobA").succeedJob();
InteractiveTestJob.getTestJob("pipe:pipelineEmbeddedFlow4:innerFlow2").succeedJob();
assertStatus(pipelineFlow, "pipeline4", Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:pipeline4").succeedJob();
assertStatus(pipelineFlow, "pipeline4", Status.SUCCEEDED);
assertStatus(pipelineFlow, "pipelineFlow", Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:pipelineFlow").succeedJob();
assertStatus(pipelineFlow, "pipelineFlow", Status.SUCCEEDED);
assertFlowStatus(pipelineFlow, Status.SUCCEEDED);
assertThreadShutDown(pipelineRunner);
}
|
Flows in this test: joba jobb joba1 jobc->joba jobd->joba jobe->jobb,jobc,jobd jobf->jobe,joba1
<p>
jobb = innerFlow innerJobA innerJobB->innerJobA innerJobC->innerJobB
innerFlow->innerJobB,innerJobC
<p>
jobd=innerFlow2 innerFlow2->innerJobA
@author rpark
|
testBasicPipelineLevel2Run
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPipelineTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPipelineTest.java
|
Apache-2.0
|
@Test
public void testBasicPipelineLevel2Run2() throws Exception {
final FlowRunner previousRunner = this.testUtil.createFromFlowMap("pipeline1_2", "prev");
final ExecutionOptions options = new ExecutionOptions();
options.setPipelineExecutionId(previousRunner.getExecutableFlow()
.getExecutionId());
options.setPipelineLevel(2);
final FlowWatcher watcher = new LocalFlowWatcher(previousRunner);
final FlowRunner pipelineRunner = this.testUtil
.createFromFlowMap("pipeline1_2", "pipe", options);
pipelineRunner.setFlowWatcher(watcher);
// 1. START FLOW
final ExecutableFlow pipelineFlow = pipelineRunner.getExecutableFlow();
final ExecutableFlow previousFlow = previousRunner.getExecutableFlow();
FlowRunnerTestUtil.startThread(previousRunner);
assertStatus(previousFlow, "pipeline1_1", Status.RUNNING);
assertStatus(previousFlow, "pipeline1_1:innerJobA", Status.RUNNING);
FlowRunnerTestUtil.startThread(pipelineRunner);
assertStatus(pipelineFlow, "pipeline1_1", Status.RUNNING);
assertStatus(pipelineFlow, "pipeline1_1:innerJobA", Status.QUEUED);
InteractiveTestJob.getTestJob("prev:pipeline1_1:innerJobA").succeedJob();
assertStatus(previousFlow, "pipeline1_1:innerJobA", Status.SUCCEEDED);
assertStatus(previousFlow, "pipeline1_1:innerFlow2", Status.RUNNING);
InteractiveTestJob.getTestJob("prev:pipeline1_1:innerFlow2").succeedJob();
assertStatus(previousFlow, "pipeline1_1", Status.SUCCEEDED);
assertStatus(previousFlow, "pipeline1_1:innerFlow2", Status.SUCCEEDED);
assertStatus(previousFlow, "pipeline1_2", Status.RUNNING);
assertStatus(previousFlow, "pipeline1_2:innerJobA", Status.RUNNING);
assertStatus(pipelineFlow, "pipeline1_1:innerJobA", Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:pipeline1_1:innerJobA").succeedJob();
assertStatus(pipelineFlow, "pipeline1_1:innerJobA", Status.SUCCEEDED);
assertStatus(pipelineFlow, "pipeline1_1:innerFlow2", Status.QUEUED);
InteractiveTestJob.getTestJob("prev:pipeline1_2:innerJobA").succeedJob();
assertStatus(previousFlow, "pipeline1_2:innerJobA", Status.SUCCEEDED);
assertStatus(previousFlow, "pipeline1_2:innerFlow2", Status.RUNNING);
assertStatus(pipelineFlow, "pipeline1_1:innerFlow2", Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:pipeline1_1:innerFlow2").succeedJob();
assertStatus(pipelineFlow, "pipeline1_1", Status.SUCCEEDED);
assertStatus(pipelineFlow, "pipeline1_1:innerFlow2", Status.SUCCEEDED);
assertStatus(pipelineFlow, "pipeline1_2", Status.RUNNING);
assertStatus(pipelineFlow, "pipeline1_2:innerJobA", Status.QUEUED);
InteractiveTestJob.getTestJob("pipe:pipeline1_1:innerFlow2").succeedJob();
assertStatus(pipelineFlow, "pipeline1_1", Status.SUCCEEDED);
assertStatus(pipelineFlow, "pipeline1_1:innerFlow2", Status.SUCCEEDED);
assertStatus(pipelineFlow, "pipeline1_2", Status.RUNNING);
assertStatus(pipelineFlow, "pipeline1_2:innerJobA", Status.QUEUED);
InteractiveTestJob.getTestJob("prev:pipeline1_2:innerFlow2").succeedJob();
assertStatus(previousFlow, "pipeline1_2:innerFlow2", Status.SUCCEEDED);
assertStatus(previousFlow, "pipeline1_2", Status.SUCCEEDED);
assertFlowStatus(previousFlow, Status.SUCCEEDED);
assertThreadShutDown(previousRunner);
assertStatus(pipelineFlow, "pipeline1_2:innerJobA", Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:pipeline1_2:innerJobA").succeedJob();
assertStatus(pipelineFlow, "pipeline1_2:innerJobA", Status.SUCCEEDED);
assertStatus(pipelineFlow, "pipeline1_2:innerFlow2", Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:pipeline1_2:innerFlow2").succeedJob();
assertStatus(pipelineFlow, "pipeline1_2", Status.SUCCEEDED);
assertStatus(pipelineFlow, "pipeline1_2:innerFlow2", Status.SUCCEEDED);
assertFlowStatus(pipelineFlow, Status.SUCCEEDED);
assertThreadShutDown(pipelineRunner);
}
|
Flows in this test: joba jobb joba1 jobc->joba jobd->joba jobe->jobb,jobc,jobd jobf->jobe,joba1
<p>
jobb = innerFlow innerJobA innerJobB->innerJobA innerJobC->innerJobB
innerFlow->innerJobB,innerJobC
<p>
jobd=innerFlow2 innerFlow2->innerJobA
@author rpark
|
testBasicPipelineLevel2Run2
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPipelineTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPipelineTest.java
|
Apache-2.0
|
@Test
public void testBasicPipelineLevel3RunWithDisabledSubFlows() throws Exception {
// disable either sub-flow in each execution: pipelineEmbeddedFlow4 or pipelineEmbeddedFlow3
final ExecutionOptions prevOptions = new ExecutionOptions();
prevOptions.setDisabledJobs(Arrays.asList(new DisabledJob("pipelineEmbeddedFlow4")));
final FlowRunner previousRunner = this.testUtil.createFromFlowMap("pipelineFlow", "prev",
prevOptions);
final ExecutionOptions pipeOptions = new ExecutionOptions();
pipeOptions.setPipelineExecutionId(previousRunner.getExecutableFlow()
.getExecutionId());
pipeOptions.setPipelineLevel(3);
pipeOptions.setDisabledJobs(Arrays.asList(new DisabledJob("pipelineEmbeddedFlow3")));
final FlowWatcher watcher = new LocalFlowWatcher(previousRunner);
final FlowRunner pipelineRunner = this.testUtil
.createFromFlowMap("pipelineFlow", "pipe", pipeOptions);
pipelineRunner.setFlowWatcher(watcher);
// 1. START FLOW
final ExecutableFlow pipelineFlow = pipelineRunner.getExecutableFlow();
final ExecutableFlow previousFlow = previousRunner.getExecutableFlow();
FlowRunnerTestUtil.startThread(previousRunner);
FlowRunnerTestUtil.startThread(pipelineRunner);
assertStatus(previousFlow, "pipeline1", Status.RUNNING);
assertStatus(pipelineFlow, "pipeline1", Status.QUEUED);
InteractiveTestJob.getTestJob("prev:pipeline1").succeedJob();
InteractiveTestJob.getTestJob("prev:pipeline2").succeedJob();
assertStatus(previousFlow, "pipelineEmbeddedFlow3", Status.RUNNING);
assertStatus(previousFlow, "pipelineEmbeddedFlow3:innerJobA",
Status.RUNNING);
// this should still be queued because some jobs are still running in previousFlow
assertStatus(pipelineFlow, "pipeline1", Status.QUEUED);
}
|
Flows in this test: joba jobb joba1 jobc->joba jobd->joba jobe->jobb,jobc,jobd jobf->jobe,joba1
<p>
jobb = innerFlow innerJobA innerJobB->innerJobA innerJobC->innerJobB
innerFlow->innerJobB,innerJobC
<p>
jobd=innerFlow2 innerFlow2->innerJobA
@author rpark
|
testBasicPipelineLevel3RunWithDisabledSubFlows
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPipelineTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPipelineTest.java
|
Apache-2.0
|
@Test
public void testBasicPipelineLevel3RunWithMoreDisabledJobs() throws Exception {
// disable either sub-flow in each execution: pipelineEmbeddedFlow4 or pipelineEmbeddedFlow3
// also disable pipeline1 & pipeline2 in both flows so that the first job that should be blocked
// is actually inside a sub-flow
final ExecutionOptions prevOptions = new ExecutionOptions();
prevOptions.setDisabledJobs(Arrays.asList(new DisabledJob("pipelineEmbeddedFlow4"),
new DisabledJob("pipeline1"), new DisabledJob("pipeline2"), new DisabledJob("pipelineFlow")
));
final FlowRunner previousRunner = this.testUtil.createFromFlowMap("pipelineFlow", "prev",
prevOptions);
final ExecutionOptions pipeOptions = new ExecutionOptions();
pipeOptions.setPipelineExecutionId(previousRunner.getExecutableFlow()
.getExecutionId());
pipeOptions.setPipelineLevel(3);
pipeOptions.setDisabledJobs(Arrays.asList(new DisabledJob("pipelineEmbeddedFlow3"),
new DisabledJob("pipeline1"), new DisabledJob("pipeline2")));
final FlowWatcher watcher = new LocalFlowWatcher(previousRunner);
final FlowRunner pipelineRunner = this.testUtil
.createFromFlowMap("pipelineFlow", "pipe", pipeOptions);
pipelineRunner.setFlowWatcher(watcher);
// 1. START FLOW
final ExecutableFlow pipelineFlow = pipelineRunner.getExecutableFlow();
final ExecutableFlow previousFlow = previousRunner.getExecutableFlow();
FlowRunnerTestUtil.startThread(previousRunner);
FlowRunnerTestUtil.startThread(pipelineRunner);
assertStatus(previousFlow, "pipelineEmbeddedFlow3", Status.RUNNING);
assertStatus(previousFlow, "pipelineEmbeddedFlow3:innerJobA",
Status.RUNNING);
// flow nodes that are just wrappers are immediately started even if there would be something
// to block basically
assertStatus(pipelineFlow, "pipelineEmbeddedFlow4", Status.RUNNING);
// this should still be queued because some jobs are still running in previousFlow
assertStatus(pipelineFlow, "pipelineEmbeddedFlow4:innerJobA", Status.QUEUED);
// succeed prev until pipeline4
InteractiveTestJob.getTestJob("prev:pipelineEmbeddedFlow3:innerJobA").succeedJob();
InteractiveTestJob.getTestJob("prev:pipelineEmbeddedFlow3:innerJobB").succeedJob();
InteractiveTestJob.getTestJob("prev:pipelineEmbeddedFlow3:innerJobC").succeedJob();
InteractiveTestJob.getTestJob("prev:pipelineEmbeddedFlow3:innerFlow").succeedJob();
// check that pipeline job is still queued
assertStatus(pipelineFlow, "pipelineEmbeddedFlow4:innerJobA", Status.QUEUED);
// now finish the last pending job
InteractiveTestJob.getTestJob("prev:pipeline4").succeedJob();
// pipelineFlow was disabled, so nothing to do manually
// InteractiveTestJob.getTestJob("prev:pipelineFlow").succeedJob();
assertStatus(previousFlow, "pipelineFlow", Status.SKIPPED);
assertFlowStatus(previousFlow, Status.SUCCEEDED);
assertThreadShutDown(previousRunner);
// now this job should've been unblocked
assertStatus(pipelineFlow, "pipelineEmbeddedFlow4:innerJobA", Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:pipelineEmbeddedFlow4:innerJobA").succeedJob();
InteractiveTestJob.getTestJob("pipe:pipelineEmbeddedFlow4:innerFlow2").succeedJob();
InteractiveTestJob.getTestJob("pipe:pipeline4").succeedJob();
InteractiveTestJob.getTestJob("pipe:pipelineFlow").succeedJob();
assertStatus(pipelineFlow, "pipelineFlow", Status.SUCCEEDED);
assertFlowStatus(pipelineFlow, Status.SUCCEEDED);
assertThreadShutDown(pipelineRunner);
}
|
Flows in this test: joba jobb joba1 jobc->joba jobd->joba jobe->jobb,jobc,jobd jobf->jobe,joba1
<p>
jobb = innerFlow innerJobA innerJobB->innerJobA innerJobC->innerJobB
innerFlow->innerJobB,innerJobC
<p>
jobd=innerFlow2 innerFlow2->innerJobA
@author rpark
|
testBasicPipelineLevel3RunWithMoreDisabledJobs
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPipelineTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPipelineTest.java
|
Apache-2.0
|
@Test
public void testBasicPipelineLevel3Run() throws Exception {
final FlowRunner previousRunner = this.testUtil.createFromFlowMap("pipelineFlow", "prev");
final ExecutionOptions options = new ExecutionOptions();
options.setPipelineExecutionId(previousRunner.getExecutableFlow()
.getExecutionId());
options.setPipelineLevel(3);
final FlowWatcher watcher = new LocalFlowWatcher(previousRunner);
final FlowRunner pipelineRunner = this.testUtil
.createFromFlowMap("pipelineFlow", "pipe", options);
pipelineRunner.setFlowWatcher(watcher);
// 1. START FLOW
final ExecutableFlow pipelineFlow = pipelineRunner.getExecutableFlow();
final ExecutableFlow previousFlow = previousRunner.getExecutableFlow();
FlowRunnerTestUtil.startThread(previousRunner);
assertStatus(previousFlow, "pipeline1", Status.RUNNING);
FlowRunnerTestUtil.startThread(pipelineRunner);
assertStatus(pipelineFlow, "pipeline1", Status.QUEUED);
InteractiveTestJob.getTestJob("prev:pipeline1").succeedJob();
assertStatus(previousFlow, "pipeline1", Status.SUCCEEDED);
assertStatus(previousFlow, "pipeline2", Status.RUNNING);
InteractiveTestJob.getTestJob("prev:pipeline2").succeedJob();
assertStatus(previousFlow, "pipeline2", Status.SUCCEEDED);
assertStatus(previousFlow, "pipelineEmbeddedFlow3", Status.RUNNING);
assertStatus(previousFlow, "pipelineEmbeddedFlow3:innerJobA",
Status.RUNNING);
InteractiveTestJob.getTestJob("prev:pipelineEmbeddedFlow3:innerJobA")
.succeedJob();
InteractiveTestJob.getTestJob("prev:pipelineEmbeddedFlow4:innerJobA").succeedJob();
assertStatus(previousFlow, "pipelineEmbeddedFlow3:innerJobA",
Status.SUCCEEDED);
assertStatus(previousFlow, "pipelineEmbeddedFlow3:innerJobB",
Status.RUNNING);
assertStatus(previousFlow, "pipelineEmbeddedFlow3:innerJobC",
Status.RUNNING);
assertStatus(previousFlow, "pipelineEmbeddedFlow4:innerJobA", Status.SUCCEEDED);
InteractiveTestJob.getTestJob("prev:pipelineEmbeddedFlow3:innerJobB")
.succeedJob();
assertStatus(previousFlow, "pipelineEmbeddedFlow3:innerJobB",
Status.SUCCEEDED);
InteractiveTestJob.getTestJob("prev:pipelineEmbeddedFlow3:innerJobC")
.succeedJob();
assertStatus(previousFlow, "pipelineEmbeddedFlow3:innerFlow",
Status.RUNNING);
assertStatus(previousFlow, "pipelineEmbeddedFlow3:innerJobC",
Status.SUCCEEDED);
assertStatus(previousFlow, "pipelineEmbeddedFlow3:innerFlow",
Status.RUNNING);
InteractiveTestJob.getTestJob("prev:pipelineEmbeddedFlow3:innerFlow")
.succeedJob();
assertStatus(previousFlow, "pipelineEmbeddedFlow3:innerFlow",
Status.SUCCEEDED);
assertStatus(previousFlow, "pipelineEmbeddedFlow3", Status.SUCCEEDED);
InteractiveTestJob.getTestJob("prev:pipelineEmbeddedFlow4:innerFlow2").succeedJob();
assertStatus(previousFlow, "pipeline4", Status.RUNNING);
InteractiveTestJob.getTestJob("prev:pipeline4").succeedJob();
assertStatus(previousFlow, "pipeline4", Status.SUCCEEDED);
assertStatus(previousFlow, "pipelineFlow", Status.RUNNING);
// Should still be queued until the last job in the previous flow has finished.
assertStatus(pipelineFlow, "pipeline1", Status.QUEUED);
InteractiveTestJob.getTestJob("prev:pipelineFlow").succeedJob();
assertStatus(previousFlow, "pipelineFlow", Status.SUCCEEDED);
assertFlowStatus(previousFlow, Status.SUCCEEDED);
assertThreadShutDown(previousRunner);
assertStatus(pipelineFlow, "pipeline1", Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:pipeline1").succeedJob();
assertStatus(pipelineFlow, "pipeline1", Status.SUCCEEDED);
assertStatus(pipelineFlow, "pipeline2", Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:pipeline2").succeedJob();
assertStatus(pipelineFlow, "pipeline2", Status.SUCCEEDED);
assertStatus(pipelineFlow, "pipelineEmbeddedFlow3", Status.RUNNING);
assertStatus(pipelineFlow, "pipelineEmbeddedFlow3:innerJobA",
Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:pipelineEmbeddedFlow3:innerJobA")
.succeedJob();
assertStatus(pipelineFlow, "pipelineEmbeddedFlow3:innerJobA",
Status.SUCCEEDED);
assertStatus(pipelineFlow, "pipelineEmbeddedFlow3:innerJobC",
Status.RUNNING);
assertStatus(pipelineFlow, "pipelineEmbeddedFlow3:innerJobB",
Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:pipelineEmbeddedFlow3:innerJobB")
.succeedJob();
InteractiveTestJob.getTestJob("pipe:pipelineEmbeddedFlow3:innerJobC")
.succeedJob();
assertStatus(pipelineFlow, "pipelineEmbeddedFlow3:innerJobC",
Status.SUCCEEDED);
assertStatus(pipelineFlow, "pipelineEmbeddedFlow3:innerJobB",
Status.SUCCEEDED);
assertStatus(pipelineFlow, "pipelineEmbeddedFlow3:innerFlow",
Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:pipelineEmbeddedFlow3:innerFlow")
.succeedJob();
assertStatus(pipelineFlow, "pipelineEmbeddedFlow3:innerFlow",
Status.SUCCEEDED);
assertStatus(pipelineFlow, "pipelineEmbeddedFlow3", Status.SUCCEEDED);
InteractiveTestJob.getTestJob("pipe:pipelineEmbeddedFlow4:innerJobA").succeedJob();
InteractiveTestJob.getTestJob("pipe:pipelineEmbeddedFlow4:innerFlow2").succeedJob();
assertStatus(pipelineFlow, "pipeline4", Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:pipeline4").succeedJob();
assertStatus(pipelineFlow, "pipeline4", Status.SUCCEEDED);
assertStatus(pipelineFlow, "pipelineFlow", Status.RUNNING);
InteractiveTestJob.getTestJob("pipe:pipelineFlow").succeedJob();
assertStatus(pipelineFlow, "pipelineFlow", Status.SUCCEEDED);
assertFlowStatus(pipelineFlow, Status.SUCCEEDED);
assertThreadShutDown(pipelineRunner);
}
|
Flows in this test: joba jobb joba1 jobc->joba jobd->joba jobe->jobb,jobc,jobd jobf->jobe,joba1
<p>
jobb = innerFlow innerJobA innerJobB->innerJobA innerJobC->innerJobB
innerFlow->innerJobB,innerJobC
<p>
jobd=innerFlow2 innerFlow2->innerJobA
@author rpark
|
testBasicPipelineLevel3Run
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPipelineTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPipelineTest.java
|
Apache-2.0
|
@Test
public void testFlow1_0RuntimePropertyResolution() throws Exception {
this.testUtil = new FlowRunnerTestUtil(FlowRunnerPropertyResolutionTest.EXEC_FLOW_DIR,
this.temporaryFolder);
assertProperties();
}
|
Test the property resolution of jobs in a flow.
<p>
The tests are contained in execpropstest, and should be resolved in the following fashion, where
the later props take precedence over the previous ones.
<p>
1. Global props (set in the FlowRunner) 2. Shared job props (depends on job directory) 3. Flow
Override properties 4. Previous job outputs to the embedded flow (Only if contained in embedded
flow) 5. Embedded flow properties (Only if contained in embedded flow) 6. Previous job outputs
(if exists) 7. Job Props
<p>
The test contains the following structure: job2 -> innerFlow (job1 -> job4 ) -> job3
<p>
job2 and 4 are in nested directories so should have different shared properties than other jobs.
|
testFlow1_0RuntimePropertyResolution
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPropertyResolutionTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPropertyResolutionTest.java
|
Apache-2.0
|
@Test
public void testFlow1_0RuntimePropertyResolutionWithHighestPrecedenceToRuntimePropsEnabled()
throws Exception {
this.testUtil = new FlowRunnerTestUtil(FlowRunnerPropertyResolutionTest.EXEC_FLOW_DIR,
this.temporaryFolder);
assertPropertiesWithHighestPrecedenceToRuntimePropsEnabled();
}
|
Test the property resolution of jobs in a flow.
<p>
The tests are contained in execpropstest, and should be resolved in the following fashion, where
the later props take precedence over the previous ones.
<p>
1. Global props (set in the FlowRunner) 2. Shared job props (depends on job directory) 3. Flow
Override properties 4. Previous job outputs to the embedded flow (Only if contained in embedded
flow) 5. Embedded flow properties (Only if contained in embedded flow) 6. Previous job outputs
(if exists) 7. Job Props
<p>
The test contains the following structure: job2 -> innerFlow (job1 -> job4 ) -> job3
<p>
job2 and 4 are in nested directories so should have different shared properties than other jobs.
|
testFlow1_0RuntimePropertyResolutionWithHighestPrecedenceToRuntimePropsEnabled
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPropertyResolutionTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPropertyResolutionTest.java
|
Apache-2.0
|
@Test
public void testFlow2_0RuntimePropertyResolution() throws Exception {
this.testUtil = new FlowRunnerTestUtil(FlowRunnerPropertyResolutionTest.FLOW_YAML_DIR,
this.temporaryFolder);
final Project project = this.testUtil.getProject();
when(this.testUtil.getProjectLoader().isFlowFileUploaded(project.getId(), project.getVersion()))
.thenReturn(true);
when(this.testUtil.getProjectLoader()
.getLatestFlowVersion(project.getId(), project.getVersion(),
FlowRunnerPropertyResolutionTest.FLOW_YAML_FILE)).thenReturn(1);
when(this.testUtil.getProjectLoader()
.getUploadedFlowFile(eq(project.getId()), eq(project.getVersion()), eq(
FlowRunnerPropertyResolutionTest.FLOW_YAML_FILE),
eq(1), any(File.class)))
.thenReturn(ExecutionsTestUtil.getFlowFile(FlowRunnerPropertyResolutionTest.FLOW_YAML_DIR,
FlowRunnerPropertyResolutionTest.FLOW_YAML_FILE));
assertProperties();
}
|
Test the property resolution of jobs in a flow.
<p>
The tests are contained in execpropstest, and should be resolved in the following fashion, where
the later props take precedence over the previous ones.
<p>
1. Global props (set in the FlowRunner) 2. Shared job props (depends on job directory) 3. Flow
Override properties 4. Previous job outputs to the embedded flow (Only if contained in embedded
flow) 5. Embedded flow properties (Only if contained in embedded flow) 6. Previous job outputs
(if exists) 7. Job Props
<p>
The test contains the following structure: job2 -> innerFlow (job1 -> job4 ) -> job3
<p>
job2 and 4 are in nested directories so should have different shared properties than other jobs.
|
testFlow2_0RuntimePropertyResolution
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPropertyResolutionTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPropertyResolutionTest.java
|
Apache-2.0
|
@Test
public void testFlow2_0RuntimePropertyResolutionWithHighestPrecedenceToRuntimePropsEnabled()
throws Exception {
this.testUtil = new FlowRunnerTestUtil(FlowRunnerPropertyResolutionTest.FLOW_YAML_DIR,
this.temporaryFolder);
final Project project = this.testUtil.getProject();
when(this.testUtil.getProjectLoader().isFlowFileUploaded(project.getId(), project.getVersion()))
.thenReturn(true);
when(this.testUtil.getProjectLoader()
.getLatestFlowVersion(project.getId(), project.getVersion(),
FlowRunnerPropertyResolutionTest.FLOW_YAML_FILE)).thenReturn(1);
when(this.testUtil.getProjectLoader()
.getUploadedFlowFile(eq(project.getId()), eq(project.getVersion()), eq(
FlowRunnerPropertyResolutionTest.FLOW_YAML_FILE),
eq(1), any(File.class)))
.thenReturn(ExecutionsTestUtil.getFlowFile(FlowRunnerPropertyResolutionTest.FLOW_YAML_DIR,
FlowRunnerPropertyResolutionTest.FLOW_YAML_FILE));
assertPropertiesWithHighestPrecedenceToRuntimePropsEnabled();
}
|
Test the property resolution of jobs in a flow.
<p>
The tests are contained in execpropstest, and should be resolved in the following fashion, where
the later props take precedence over the previous ones.
<p>
1. Global props (set in the FlowRunner) 2. Shared job props (depends on job directory) 3. Flow
Override properties 4. Previous job outputs to the embedded flow (Only if contained in embedded
flow) 5. Embedded flow properties (Only if contained in embedded flow) 6. Previous job outputs
(if exists) 7. Job Props
<p>
The test contains the following structure: job2 -> innerFlow (job1 -> job4 ) -> job3
<p>
job2 and 4 are in nested directories so should have different shared properties than other jobs.
|
testFlow2_0RuntimePropertyResolutionWithHighestPrecedenceToRuntimePropsEnabled
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPropertyResolutionTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPropertyResolutionTest.java
|
Apache-2.0
|
private void assertProperties() throws Exception {
final HashMap<String, String> rootFlowNodeRuntimeProps = new HashMap<>();
rootFlowNodeRuntimeProps.put("props7", "execflow7");
rootFlowNodeRuntimeProps.put("props6", "execflow6");
rootFlowNodeRuntimeProps.put("props5", "execflow5");
rootFlowNodeRuntimeProps.put("runtime1", "runtime1-ROOT");
rootFlowNodeRuntimeProps.put("runtime2", "runtime2-ROOT");
// Set some node (root flow + other DAG nodes) runtime properties.
final FlowRunner runner = this.testUtil.createFromFlowMap(
FlowRunnerPropertyResolutionTest.FLOW_NAME, rootFlowNodeRuntimeProps);
runner.getExecutableFlow().getExecutionOptions().addAllRuntimeProperties(ImmutableMap.of(
"job2", ImmutableMap.of(
"job-prop-2", "job2-val-2",
"props6", "job2-val-6"),
"innerflow", ImmutableMap.of(
"props6", "innerflow-val-6",
"props4", "innerflow-val-4",
"props10", "innerflow-val-10"),
// overrides by nested job id (or fully qualified name): this is the most specific, so
// always wins
"innerflow:job4", ImmutableMap.of(
"runtime1", "runtime1-job4",
"props4", "innerflow-job4-val-4",
"props5", "innerflow-job4-val-5"),
// job3 is a job, but it's also the root node of this flow
"job3", ImmutableMap.of("prop-job3", "should-be-set-only-for-job3")
));
final Map<String, ExecutableNode> nodeMap = new HashMap<>();
createNodeMap(runner.getExecutableFlow(), nodeMap);
final ExecutableFlow flow = runner.getExecutableFlow();
// Start flow. Job 2 should start
FlowRunnerTestUtil.startThread(runner);
assertStatus(flow, "job2", Status.RUNNING);
// The priority order should be:
// job2-overrides -> job2 -> root-flow-node-overrides -> flow-or-shared-props
final Props job2Props = nodeMap.get("job2").getInputProps();
Assert.assertEquals("shared1", job2Props.get("props1"));
Assert.assertEquals("job2", job2Props.get("props2"));
Assert.assertEquals("moo3", job2Props.get("props3"));
Assert.assertEquals("job7", job2Props.get("props7"));
Assert.assertEquals("execflow5", job2Props.get("props5"));
Assert.assertEquals("job2-val-6", job2Props.get("props6"));
Assert.assertEquals("shared4", job2Props.get("props4"));
Assert.assertEquals("shared8", job2Props.get("props8"));
Assert.assertEquals("job2-val-2", job2Props.get("job-prop-2"));
Assert.assertEquals("runtime1-ROOT", job2Props.get("runtime1"));
Assert.assertEquals("runtime2-ROOT", job2Props.get("runtime2"));
Assert.assertNull(job2Props.get("props10"));
// The priority order should be:
// job1-overrides -> job1 -> innerflow-overrides -> innerflow -> job2-output ->
// root-flow-node-overrides -> flow-or-shared-props
final Props job2Generated = new Props();
job2Generated.put("props6", "g2job6");
job2Generated.put("props4", "g2job4");
job2Generated.put("props5", "g2job5");
job2Generated.put("props7", "g2job7");
job2Generated.put("props10", "g2job10");
InteractiveTestJob.getTestJob("job2").succeedJob(job2Generated);
assertStatus(flow, "innerflow:job1", Status.RUNNING);
final Props job1Props = nodeMap.get("innerflow:job1").getInputProps();
Assert.assertEquals("job1", job1Props.get("props1"));
Assert.assertEquals("job2", job1Props.get("props2"));
Assert.assertEquals("job8", job1Props.get("props8"));
Assert.assertEquals("innerflow-val-6", job1Props.get("props6"));
Assert.assertEquals("innerflow5", job1Props.get("props5"));
Assert.assertEquals("g2job7", job1Props.get("props7"));
Assert.assertEquals("moo3", job1Props.get("props3"));
Assert.assertEquals("innerflow-val-4", job1Props.get("props4"));
Assert.assertEquals("innerflow-val-10", job1Props.get("props10"));
Assert.assertEquals("runtime1-ROOT", job1Props.get("runtime1"));
Assert.assertEquals("runtime2-ROOT", job1Props.get("runtime2"));
// The priority order should be:
// job4-overrides -> job4 -> job1-output -> innerflow-overrides -> innerflow ->
// job2-output -> root-flow-node-overrides -> flow-or-shared-props
final Props job1GeneratedProps = new Props();
job1GeneratedProps.put("props4", "g1job4");
job1GeneratedProps.put("props10", "g1job10");
InteractiveTestJob.getTestJob("innerflow:job1").succeedJob(job1GeneratedProps);
assertStatus(flow, "innerflow:job4", Status.RUNNING);
final Props job4Props = nodeMap.get("innerflow:job4").getInputProps();
Assert.assertEquals("job8", job4Props.get("props8"));
Assert.assertEquals("job9", job4Props.get("props9"));
Assert.assertEquals("innerflow-job4-val-4", job4Props.get("props4"));
Assert.assertEquals("g1job10", job4Props.get("props10"));
Assert.assertEquals("innerflow-val-6", job4Props.get("props6"));
Assert.assertEquals("g2job7", job4Props.get("props7"));
Assert.assertEquals("innerflow-job4-val-5", job4Props.get("props5"));
Assert.assertEquals("shared1", job4Props.get("props1"));
Assert.assertEquals("shared2", job4Props.get("props2"));
Assert.assertEquals("moo3", job4Props.get("props3"));
Assert.assertEquals("runtime1-job4", job4Props.get("runtime1"));
Assert.assertEquals("runtime2-ROOT", job4Props.get("runtime2"));
// The priority order should be:
// job3-overrides -> job3 -> innerflow-output -> root-flow-node-overrides -> flow-or-shared-props
final Props job4GeneratedProps = new Props();
job4GeneratedProps.put("props9", "g4job9");
job4GeneratedProps.put("props6", "g4job6");
InteractiveTestJob.getTestJob("innerflow:job4").succeedJob(job4GeneratedProps);
assertStatus(flow, FlowRunnerPropertyResolutionTest.FLOW_NAME, Status.RUNNING);
final Props job3Props = nodeMap.get("job3").getInputProps();
Assert.assertEquals("job3", job3Props.get("props3"));
Assert.assertEquals("g4job6", job3Props.get("props6"));
Assert.assertEquals("g4job9", job3Props.get("props9"));
Assert.assertEquals("execflow7", job3Props.get("props7"));
Assert.assertEquals("execflow5", job3Props.get("props5"));
Assert.assertEquals("shared1", job3Props.get("props1"));
Assert.assertEquals("shared2", job3Props.get("props2"));
Assert.assertEquals("moo4", job3Props.get("props4"));
Assert.assertNull(job3Props.get("props10"));
Assert.assertEquals("runtime1-ROOT", job3Props.get("runtime1"));
Assert.assertEquals("runtime2-ROOT", job3Props.get("runtime2"));
Assert.assertEquals("should-be-set-only-for-job3", job3Props.get("prop-job3"));
Assert.assertNull(job2Props.get("prop-job3"));
Assert.assertNull(job4Props.get("prop-job3"));
}
|
Test the property resolution of jobs in a flow.
<p>
The tests are contained in execpropstest, and should be resolved in the following fashion, where
the later props take precedence over the previous ones.
<p>
1. Global props (set in the FlowRunner) 2. Shared job props (depends on job directory) 3. Flow
Override properties 4. Previous job outputs to the embedded flow (Only if contained in embedded
flow) 5. Embedded flow properties (Only if contained in embedded flow) 6. Previous job outputs
(if exists) 7. Job Props
<p>
The test contains the following structure: job2 -> innerFlow (job1 -> job4 ) -> job3
<p>
job2 and 4 are in nested directories so should have different shared properties than other jobs.
|
assertProperties
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPropertyResolutionTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPropertyResolutionTest.java
|
Apache-2.0
|
private void assertPropertiesWithHighestPrecedenceToRuntimePropsEnabled() throws Exception {
final HashMap<String, String> rootFlowNodeRuntimeProps = new HashMap<>();
rootFlowNodeRuntimeProps.put("props7", "execflow7");
rootFlowNodeRuntimeProps.put("props6", "execflow6");
rootFlowNodeRuntimeProps.put("props5", "execflow5");
final FlowRunner runner = this.testUtil.createFromFlowMap(
FlowRunnerPropertyResolutionTest.FLOW_NAME, null, rootFlowNodeRuntimeProps,
Props.of(ConfigurationKeys.AZKABAN_EXECUTOR_RUNTIME_PROPS_OVERRIDE_EAGER, "true"));
// Set some node (root flow + other DAG nodes) runtime properties.
runner.getExecutableFlow().getExecutionOptions().addAllRuntimeProperties(ImmutableMap.of(
"job2", ImmutableMap.of(
"job-prop-2", "job2-val-2",
"props6", "job2-val-6"),
"innerflow", ImmutableMap.of(
"props6", "innerflow-val-6",
"props4", "innerflow-val-4",
"props10", "innerflow-val-10"),
// overrides by nested job id: this is the most specific, so always wins
"innerflow:job4", ImmutableMap.of(
"props4", "innerflow-job4-val-4",
"props5", "innerflow-job4-val-5"),
// job3 is a job, but it's also the root node of this flow
"job3", ImmutableMap.of("prop-job3", "should-be-set-only-for-job3")
));
final Map<String, ExecutableNode> nodeMap = new HashMap<>();
createNodeMap(runner.getExecutableFlow(), nodeMap);
final ExecutableFlow flow = runner.getExecutableFlow();
// Start flow. Job 2 should start
FlowRunnerTestUtil.startThread(runner);
assertStatus(flow, "job2", Status.RUNNING);
// The priority order should be:
// job2-overrides -> root-flow-node-overrides -> job2 -> flow-or-shared-props
final Props job2Props = nodeMap.get("job2").getInputProps();
Assert.assertEquals("shared1", job2Props.get("props1"));
Assert.assertEquals("job2", job2Props.get("props2"));
Assert.assertEquals("moo3", job2Props.get("props3"));
Assert.assertEquals("execflow7", job2Props.get("props7"));
Assert.assertEquals("execflow5", job2Props.get("props5"));
Assert.assertEquals("job2-val-6", job2Props.get("props6"));
Assert.assertEquals("shared4", job2Props.get("props4"));
Assert.assertEquals("shared8", job2Props.get("props8"));
Assert.assertEquals("job2-val-2", job2Props.get("job-prop-2"));
Assert.assertNull(job2Props.get("props10"));
// The priority order should be:
// job1-overrides -> innerflow-overrides -> root-flow-node-overrides -> job1 -> innerflow ->
// job2-output -> flow-or-shared-props
final Props job2Generated = new Props();
job2Generated.put("props6", "g2job6");
job2Generated.put("props8", "g2job8");
job2Generated.put("props10", "g2job10");
InteractiveTestJob.getTestJob("job2").succeedJob(job2Generated);
assertStatus(flow, "innerflow:job1", Status.RUNNING);
final Props job1Props = nodeMap.get("innerflow:job1").getInputProps();
Assert.assertEquals("job1", job1Props.get("props1"));
Assert.assertEquals("job2", job1Props.get("props2"));
Assert.assertEquals("job8", job1Props.get("props8"));
Assert.assertEquals("innerflow-val-6", job1Props.get("props6"));
Assert.assertEquals("innerflow-val-10", job1Props.get("props10"));
Assert.assertEquals("innerflow-val-4", job1Props.get("props4"));
Assert.assertEquals("execflow5", job1Props.get("props5"));
Assert.assertEquals("execflow7", job1Props.get("props7"));
Assert.assertEquals("moo3", job1Props.get("props3"));
// The priority order should be:
// job4-overrides -> innerflow-overrides -> root-flow-node-overrides -> job4 ->
// job1-output -> innerflow -> job2-output -> flow-or-shared-props
final Props job1GeneratedProps = new Props();
job1GeneratedProps.put("props10", "g1job10");
job1GeneratedProps.put("props1", "g1job1");
InteractiveTestJob.getTestJob("innerflow:job1").succeedJob(job1GeneratedProps);
assertStatus(flow, "innerflow:job4", Status.RUNNING);
final Props job4Props = nodeMap.get("innerflow:job4").getInputProps();
Assert.assertEquals("job8", job4Props.get("props8"));
Assert.assertEquals("job9", job4Props.get("props9"));
Assert.assertEquals("innerflow-val-6", job4Props.get("props6"));
Assert.assertEquals("innerflow-val-10", job4Props.get("props10"));
Assert.assertEquals("execflow7", job4Props.get("props7"));
Assert.assertEquals("innerflow-job4-val-4", job4Props.get("props4"));
Assert.assertEquals("innerflow-job4-val-5", job4Props.get("props5"));
Assert.assertEquals("g1job1", job4Props.get("props1"));
Assert.assertEquals("shared2", job4Props.get("props2"));
Assert.assertEquals("moo3", job4Props.get("props3"));
// The priority order should be:
// job3-overrides -> root-flow-node-overrides -> job3 -> innerflow-output -> flow-or-shared-props
final Props job4GeneratedProps = new Props();
job4GeneratedProps.put("props9", "g4job9");
job4GeneratedProps.put("props6", "g4job6");
InteractiveTestJob.getTestJob("innerflow:job4").succeedJob(job4GeneratedProps);
assertStatus(flow, FlowRunnerPropertyResolutionTest.FLOW_NAME, Status.RUNNING);
final Props job3Props = nodeMap.get("job3").getInputProps();
Assert.assertEquals("job3", job3Props.get("props3"));
Assert.assertEquals("execflow6", job3Props.get("props6"));
Assert.assertEquals("g4job9", job3Props.get("props9"));
Assert.assertEquals("execflow7", job3Props.get("props7"));
Assert.assertEquals("execflow5", job3Props.get("props5"));
Assert.assertEquals("shared1", job3Props.get("props1"));
Assert.assertEquals("shared2", job3Props.get("props2"));
Assert.assertEquals("moo4", job3Props.get("props4"));
Assert.assertNull(job3Props.get("props10"));
Assert.assertEquals("should-be-set-only-for-job3", job3Props.get("prop-job3"));
Assert.assertNull(job2Props.get("prop-job3"));
Assert.assertNull(job4Props.get("prop-job3"));
}
|
Test the property resolution of jobs in a flow.
<p>
The tests are contained in execpropstest, and should be resolved in the following fashion, where
the later props take precedence over the previous ones.
<p>
1. Global props (set in the FlowRunner) 2. Shared job props (depends on job directory) 3. Flow
Override properties 4. Previous job outputs to the embedded flow (Only if contained in embedded
flow) 5. Embedded flow properties (Only if contained in embedded flow) 6. Previous job outputs
(if exists) 7. Job Props
<p>
The test contains the following structure: job2 -> innerFlow (job1 -> job4 ) -> job3
<p>
job2 and 4 are in nested directories so should have different shared properties than other jobs.
|
assertPropertiesWithHighestPrecedenceToRuntimePropsEnabled
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPropertyResolutionTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPropertyResolutionTest.java
|
Apache-2.0
|
private void createNodeMap(final ExecutableFlowBase flow,
final Map<String, ExecutableNode> nodeMap) {
for (final ExecutableNode node : flow.getExecutableNodes()) {
nodeMap.put(node.getNestedId(), node);
if (node instanceof ExecutableFlowBase) {
createNodeMap((ExecutableFlowBase) node, nodeMap);
}
}
}
|
Test the property resolution of jobs in a flow.
<p>
The tests are contained in execpropstest, and should be resolved in the following fashion, where
the later props take precedence over the previous ones.
<p>
1. Global props (set in the FlowRunner) 2. Shared job props (depends on job directory) 3. Flow
Override properties 4. Previous job outputs to the embedded flow (Only if contained in embedded
flow) 5. Embedded flow properties (Only if contained in embedded flow) 6. Previous job outputs
(if exists) 7. Job Props
<p>
The test contains the following structure: job2 -> innerFlow (job1 -> job4 ) -> job3
<p>
job2 and 4 are in nested directories so should have different shared properties than other jobs.
|
createNodeMap
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPropertyResolutionTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerPropertyResolutionTest.java
|
Apache-2.0
|
@Before
public void setUp() throws Exception {
this.testUtil = new FlowRunnerTestUtil("embedded2", this.temporaryFolder);
}
|
Test the flow run, especially with embedded flows.
This test uses executions/embedded2. It also mainly uses the flow named jobf. The test is
designed to control success/failures explicitly so we don't have to time the flow exactly.
Flow jobf looks like the following:
<pre>
joba joba1
/ | \ |
/ | \ |
jobb jobd jobc |
\ | / /
\ | / /
jobe /
| /
| /
jobf
</pre>
The job 'jobb' is an embedded flow:
jobb:innerFlow
<pre>
innerJobA
/ \
innerJobB innerJobC
\ /
innerFlow
</pre>
The job 'jobd' is a simple embedded flow:
jobd:innerFlow2
<pre>
innerJobA
|
innerFlow2
</pre>
The following tests checks each stage of the flow run by forcing jobs to succeed or fail.
|
setUp
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testBasicRun() throws Exception {
final Map<String, String> flowParams = new HashMap<>();
flowParams.put("param4", "override.4");
flowParams.put("param10", "override.10");
flowParams.put("param11", "override.11");
final ExecutionOptions options = new ExecutionOptions();
options.setFailureAction(FailureAction.FINISH_CURRENTLY_RUNNING);
Props props = new Props();
props.put(AZKABAN_WEBSERVER_URL, "http://localhost:8443");
this.runner = this.testUtil.createFromFlowMap("jobf", options, flowParams, props);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
final Props joba = this.runner.getExecutableFlow().getExecutableNodePath("joba")
.getInputProps();
assertEquals("joba.1", joba.get("param1"));
assertEquals("test1.2", joba.get("param2"));
assertEquals("test1.3", joba.get("param3"));
assertEquals("override.4", joba.get("param4"));
assertEquals("test2.5", joba.get("param5"));
assertEquals("test2.6", joba.get("param6"));
assertEquals("test2.7", joba.get("param7"));
assertEquals("test2.8", joba.get("param8"));
assertThat(joba.get(CommonJobProperties.JOB_ID)).isEqualTo("joba");
assertThat(joba.get(CommonJobProperties.JOB_ATTEMPT)).isEqualTo("0");
assertThat(joba.get(CommonJobProperties.EXECUTION_LINK))
.matches("http://localhost:8443/executor\\?execid=\\d\\d\\d");
assertThat(joba.get(CommonJobProperties.WORKFLOW_LINK))
.matches("http://localhost:8443/manager\\?project=testProject&flow=jobf");
assertThat(joba.get(CommonJobProperties.JOBEXEC_LINK))
.matches("http://localhost:8443/executor\\?execid=\\d\\d\\d&job=joba");
assertThat(joba.get(CommonJobProperties.JOB_LINK))
.matches("http://localhost:8443/manager\\?project=testProject&flow=jobf&job=joba");
assertThat(joba.get(CommonJobProperties.ATTEMPT_LINK))
.matches("http://localhost:8443/executor\\?execid=\\d\\d\\d&job=joba&attempt=0");
final Props joba1 = this.runner.getExecutableFlow().getExecutableNodePath("joba1")
.getInputProps();
assertEquals("test1.1", joba1.get("param1"));
assertEquals("test1.2", joba1.get("param2"));
assertEquals("test1.3", joba1.get("param3"));
assertEquals("override.4", joba1.get("param4"));
assertEquals("test2.5", joba1.get("param5"));
assertEquals("test2.6", joba1.get("param6"));
assertEquals("test2.7", joba1.get("param7"));
assertEquals("test2.8", joba1.get("param8"));
// 2. JOB A COMPLETES SUCCESSFULLY
InteractiveTestJob.getTestJob("joba").succeedJob(
Props.of("output.joba", "joba", "output.override", "joba"));
assertStatus("joba", Status.SUCCEEDED);
assertStatus("joba1", Status.RUNNING);
assertStatus("jobb", Status.RUNNING);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd", Status.RUNNING);
assertStatus("jobd:innerJobA", Status.RUNNING);
assertStatus("jobb:innerJobA", Status.RUNNING);
final ExecutableNode node = this.runner.getExecutableFlow().getExecutableNodePath("jobb");
assertEquals(Status.RUNNING, node.getStatus());
final Props jobb = node.getInputProps();
assertEquals("override.4", jobb.get("param4"));
// Test that jobb properties overwrites the output properties
assertEquals("moo", jobb.get("testprops"));
assertEquals("jobb", jobb.get("output.override"));
assertEquals("joba", jobb.get("output.joba"));
final Props jobbInnerJobA = this.runner.getExecutableFlow()
.getExecutableNodePath("jobb:innerJobA")
.getInputProps();
assertEquals("test1.1", jobbInnerJobA.get("param1"));
assertEquals("test1.2", jobbInnerJobA.get("param2"));
assertEquals("test1.3", jobbInnerJobA.get("param3"));
assertEquals("override.4", jobbInnerJobA.get("param4"));
assertEquals("test2.5", jobbInnerJobA.get("param5"));
assertEquals("test2.6", jobbInnerJobA.get("param6"));
assertEquals("test2.7", jobbInnerJobA.get("param7"));
assertEquals("test2.8", jobbInnerJobA.get("param8"));
assertEquals("joba", jobbInnerJobA.get("output.joba"));
assertThat(jobbInnerJobA.get(CommonJobProperties.JOB_ID)).isEqualTo("innerJobA");
assertThat(jobbInnerJobA.get(CommonJobProperties.JOB_ATTEMPT)).isEqualTo("0");
assertThat(jobbInnerJobA.get(CommonJobProperties.EXECUTION_LINK))
.matches("http://localhost:8443/executor\\?execid=\\d\\d\\d");
assertThat(jobbInnerJobA.get(CommonJobProperties.WORKFLOW_LINK))
.matches("http://localhost:8443/manager\\?project=testProject&flow=innerFlow");
assertThat(jobbInnerJobA.get(CommonJobProperties.JOBEXEC_LINK))
.matches("http://localhost:8443/executor\\?execid=\\d\\d\\d&job=jobb:innerJobA");
assertThat(jobbInnerJobA.get(CommonJobProperties.JOB_LINK))
.matches("http://localhost:8443/manager\\?project=testProject&flow=innerFlow&job=innerJobA");
assertThat(jobbInnerJobA.get(CommonJobProperties.ATTEMPT_LINK))
.matches("http://localhost:8443/executor\\?execid=\\d\\d\\d&job=jobb:innerJobA&attempt=0");
// 3. jobb:Inner completes
/// innerJobA completes
InteractiveTestJob.getTestJob("jobb:innerJobA").succeedJob(
Props.of("output.jobb.innerJobA", "jobb.innerJobA"));
assertStatus("jobb:innerJobA", Status.SUCCEEDED);
assertStatus("jobb:innerJobB", Status.RUNNING);
assertStatus("jobb:innerJobC", Status.RUNNING);
final Props jobbInnerJobB = this.runner.getExecutableFlow()
.getExecutableNodePath("jobb:innerJobB")
.getInputProps();
assertEquals("test1.1", jobbInnerJobB.get("param1"));
assertEquals("override.4", jobbInnerJobB.get("param4"));
assertEquals("jobb.innerJobA",
jobbInnerJobB.get("output.jobb.innerJobA"));
assertEquals("moo", jobbInnerJobB.get("testprops"));
/// innerJobB, C completes
InteractiveTestJob.getTestJob("jobb:innerJobB").succeedJob(
Props.of("output.jobb.innerJobB", "jobb.innerJobB"));
InteractiveTestJob.getTestJob("jobb:innerJobC").succeedJob(
Props.of("output.jobb.innerJobC", "jobb.innerJobC"));
assertStatus("jobb:innerJobB", Status.SUCCEEDED);
assertStatus("jobb:innerJobC", Status.SUCCEEDED);
assertStatus("jobb:innerFlow", Status.RUNNING);
final Props jobbInnerJobD = this.runner.getExecutableFlow()
.getExecutableNodePath("jobb:innerFlow")
.getInputProps();
assertEquals("test1.1", jobbInnerJobD.get("param1"));
assertEquals("override.4", jobbInnerJobD.get("param4"));
assertEquals("jobb.innerJobB",
jobbInnerJobD.get("output.jobb.innerJobB"));
assertEquals("jobb.innerJobC",
jobbInnerJobD.get("output.jobb.innerJobC"));
// 4. Finish up on inner flow for jobb
InteractiveTestJob.getTestJob("jobb:innerFlow").succeedJob(
Props.of("output1.jobb", "test1", "output2.jobb", "test2"));
assertStatus("jobb:innerFlow", Status.SUCCEEDED);
assertStatus("jobb", Status.SUCCEEDED);
final Props jobbOutput = this.runner.getExecutableFlow().getExecutableNodePath("jobb")
.getOutputProps();
assertEquals("test1", jobbOutput.get("output1.jobb"));
assertEquals("test2", jobbOutput.get("output2.jobb"));
// 5. Finish jobc, jobd
InteractiveTestJob.getTestJob("jobc").succeedJob(
Props.of("output.jobc", "jobc"));
assertStatus("jobc", Status.SUCCEEDED);
InteractiveTestJob.getTestJob("jobd:innerJobA").succeedJob();
InteractiveTestJob.getTestJob("jobd:innerFlow2").succeedJob();
assertStatus("jobd:innerJobA", Status.SUCCEEDED);
assertStatus("jobd:innerFlow2", Status.SUCCEEDED);
assertStatus("jobd", Status.SUCCEEDED);
assertStatus("jobe", Status.RUNNING);
final Props jobd = this.runner.getExecutableFlow().getExecutableNodePath("jobe")
.getInputProps();
assertEquals("test1", jobd.get("output1.jobb"));
assertEquals("jobc", jobd.get("output.jobc"));
// 6. Finish off flow
InteractiveTestJob.getTestJob("joba1").succeedJob();
InteractiveTestJob.getTestJob("jobe").succeedJob();
assertStatus("joba1", Status.SUCCEEDED);
assertStatus("jobe", Status.SUCCEEDED);
assertStatus("jobf", Status.RUNNING);
InteractiveTestJob.getTestJob("jobf").succeedJob();
assertStatus("jobf", Status.SUCCEEDED);
waitForAndAssertFlowStatus(Status.SUCCEEDED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Tests the basic successful flow run, and also tests all output variables from each job.
|
testBasicRun
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testDisabledNormal() throws Exception {
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.FINISH_CURRENTLY_RUNNING);
final ExecutableFlow flow = this.runner.getExecutableFlow();
flow.getExecutableNode("jobb").setStatus(Status.DISABLED);
((ExecutableFlowBase) flow.getExecutableNode("jobd")).getExecutableNode(
"innerJobA").setStatus(Status.DISABLED);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
// 2. JOB A COMPLETES SUCCESSFULLY, others should be skipped
InteractiveTestJob.getTestJob("joba").succeedJob();
assertStatus("joba", Status.SUCCEEDED);
assertStatus("joba1", Status.RUNNING);
assertStatus("jobb", Status.SKIPPED);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd", Status.RUNNING);
assertStatus("jobd:innerJobA", Status.SKIPPED);
assertStatus("jobd:innerFlow2", Status.RUNNING);
assertStatus("jobb:innerJobA", Status.READY);
assertStatus("jobb:innerJobB", Status.READY);
assertStatus("jobb:innerJobC", Status.READY);
assertStatus("jobb:innerFlow", Status.READY);
// 3. jobb:Inner completes
/// innerJobA completes
InteractiveTestJob.getTestJob("jobc").succeedJob();
InteractiveTestJob.getTestJob("jobd:innerFlow2").succeedJob();
assertStatus("jobd:innerFlow2", Status.SUCCEEDED);
assertStatus("jobd", Status.SUCCEEDED);
assertStatus("jobc", Status.SUCCEEDED);
assertStatus("jobe", Status.RUNNING);
InteractiveTestJob.getTestJob("jobe").succeedJob();
InteractiveTestJob.getTestJob("joba1").succeedJob();
assertStatus("jobe", Status.SUCCEEDED);
assertStatus("joba1", Status.SUCCEEDED);
assertStatus("jobf", Status.RUNNING);
// 4. Finish up on inner flow for jobb
InteractiveTestJob.getTestJob("jobf").succeedJob();
assertStatus("jobf", Status.SUCCEEDED);
waitForAndAssertFlowStatus(Status.SUCCEEDED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Tests a flow with Disabled jobs and flows. They should properly SKIP executions
|
testDisabledNormal
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testNormalFailure1() throws Exception {
// Test propagation of KILLED status to embedded flows.
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.FINISH_CURRENTLY_RUNNING);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
// 2. JOB A COMPLETES SUCCESSFULLY, others should be skipped
InteractiveTestJob.getTestJob("joba").failJob();
waitForAndAssertFlowStatus(Status.FAILED_FINISHING);
assertStatus("joba", Status.FAILED);
assertStatus("joba1", Status.RUNNING);
assertStatus("jobb", Status.CANCELLED);
assertStatus("jobc", Status.CANCELLED);
assertStatus("jobd", Status.CANCELLED);
assertStatus("jobd:innerJobA", Status.READY);
assertStatus("jobd:innerFlow2", Status.READY);
assertStatus("jobb:innerJobA", Status.READY);
assertStatus("jobb:innerFlow", Status.READY);
assertStatus("jobe", Status.CANCELLED);
// 3. jobb:Inner completes
/// innerJobA completes
InteractiveTestJob.getTestJob("joba1").succeedJob();
assertStatus("jobf", Status.CANCELLED);
waitForAndAssertFlowStatus(Status.FAILED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Tests a failure with the default FINISH_CURRENTLY_RUNNING. After the first failure, every job
that started should complete, and the rest of the jobs should be skipped.
|
testNormalFailure1
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testNormalFailure2() throws Exception {
// Test propagation of KILLED status to embedded flows different branch
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.FINISH_CURRENTLY_RUNNING);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
// 2. JOB A COMPLETES SUCCESSFULLY, others should be skipped
InteractiveTestJob.getTestJob("joba").succeedJob();
assertStatus("joba", Status.SUCCEEDED);
assertStatus("jobb", Status.RUNNING);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd", Status.RUNNING);
assertStatus("jobb:innerJobA", Status.RUNNING);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd:innerJobA", Status.RUNNING);
InteractiveTestJob.getTestJob("joba1").failJob();
assertStatus("joba1", Status.FAILED);
waitForAndAssertFlowStatus(Status.FAILED_FINISHING);
// 3. joba completes, everything is killed
InteractiveTestJob.getTestJob("jobb:innerJobA").succeedJob();
InteractiveTestJob.getTestJob("jobd:innerJobA").succeedJob();
assertStatus("jobb:innerJobA", Status.SUCCEEDED);
assertStatus("jobd:innerJobA", Status.SUCCEEDED);
assertStatus("jobb:innerJobB", Status.CANCELLED);
assertStatus("jobb:innerJobC", Status.CANCELLED);
assertStatus("jobb:innerFlow", Status.CANCELLED);
assertStatus("jobd:innerFlow2", Status.CANCELLED);
assertStatus("jobb", Status.KILLED);
assertStatus("jobd", Status.KILLED);
waitForAndAssertFlowStatus(Status.FAILED_FINISHING);
InteractiveTestJob.getTestJob("jobc").succeedJob();
assertStatus("jobc", Status.SUCCEEDED);
assertStatus("jobe", Status.CANCELLED);
assertStatus("jobf", Status.CANCELLED);
waitForAndAssertFlowStatus(Status.FAILED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Test #2 on the default failure case.
|
testNormalFailure2
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testNormalFailure3() throws Exception {
// Test propagation of CANCELLED status to embedded flows different branch
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.FINISH_CURRENTLY_RUNNING);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
// 2. JOB in subflow FAILS
InteractiveTestJob.getTestJob("joba").succeedJob();
assertStatus("joba", Status.SUCCEEDED);
assertStatus("jobb", Status.RUNNING);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd", Status.RUNNING);
assertStatus("jobb:innerJobA", Status.RUNNING);
assertStatus("jobd:innerJobA", Status.RUNNING);
InteractiveTestJob.getTestJob("joba1").succeedJob();
InteractiveTestJob.getTestJob("jobb:innerJobA").succeedJob();
assertStatus("jobb:innerJobA", Status.SUCCEEDED);
assertStatus("joba1", Status.SUCCEEDED);
assertStatus("jobb:innerJobB", Status.RUNNING);
assertStatus("jobb:innerJobC", Status.RUNNING);
InteractiveTestJob.getTestJob("jobb:innerJobB").failJob();
assertStatus("jobb", Status.FAILED_FINISHING);
assertStatus("jobb:innerJobB", Status.FAILED);
waitForAndAssertFlowStatus(Status.FAILED_FINISHING);
InteractiveTestJob.getTestJob("jobb:innerJobC").succeedJob();
InteractiveTestJob.getTestJob("jobd:innerJobA").succeedJob();
assertStatus("jobd:innerJobA", Status.SUCCEEDED);
assertStatus("jobd:innerFlow2", Status.CANCELLED);
assertStatus("jobd", Status.KILLED);
assertStatus("jobb:innerJobC", Status.SUCCEEDED);
assertStatus("jobb:innerFlow", Status.CANCELLED);
assertStatus("jobb", Status.FAILED);
// 3. jobc completes, everything is killed
InteractiveTestJob.getTestJob("jobc").succeedJob();
assertStatus("jobc", Status.SUCCEEDED);
assertStatus("jobe", Status.CANCELLED);
assertStatus("jobf", Status.CANCELLED);
waitForAndAssertFlowStatus(Status.FAILED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Test #2 on the default failure case.
|
testNormalFailure3
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testFailedFinishingFailure3() throws Exception {
// Test propagation of KILLED status to embedded flows different branch
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.FINISH_ALL_POSSIBLE);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
// 2. JOB in subflow FAILS
InteractiveTestJob.getTestJob("joba").succeedJob();
assertStatus("joba", Status.SUCCEEDED);
assertStatus("jobb", Status.RUNNING);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd", Status.RUNNING);
assertStatus("jobb:innerJobA", Status.RUNNING);
assertStatus("jobd:innerJobA", Status.RUNNING);
InteractiveTestJob.getTestJob("joba1").succeedJob();
InteractiveTestJob.getTestJob("jobb:innerJobA").succeedJob();
assertStatus("jobb:innerJobA", Status.SUCCEEDED);
assertStatus("joba1", Status.SUCCEEDED);
assertStatus("jobb:innerJobB", Status.RUNNING);
assertStatus("jobb:innerJobC", Status.RUNNING);
InteractiveTestJob.getTestJob("jobb:innerJobB").failJob();
assertStatus("jobb", Status.FAILED_FINISHING);
assertStatus("jobb:innerJobB", Status.FAILED);
waitForAndAssertFlowStatus(Status.FAILED_FINISHING);
InteractiveTestJob.getTestJob("jobb:innerJobC").succeedJob();
InteractiveTestJob.getTestJob("jobd:innerJobA").succeedJob();
assertStatus("jobb", Status.FAILED);
assertStatus("jobd:innerJobA", Status.SUCCEEDED);
assertStatus("jobd:innerFlow2", Status.RUNNING);
assertStatus("jobb:innerJobC", Status.SUCCEEDED);
assertStatus("jobb:innerFlow", Status.CANCELLED);
InteractiveTestJob.getTestJob("jobd:innerFlow2").succeedJob();
assertStatus("jobd:innerFlow2", Status.SUCCEEDED);
assertStatus("jobd", Status.SUCCEEDED);
// 3. jobc completes, everything is killed
InteractiveTestJob.getTestJob("jobc").succeedJob();
assertStatus("jobc", Status.SUCCEEDED);
assertStatus("jobe", Status.CANCELLED);
assertStatus("jobf", Status.CANCELLED);
waitForAndAssertFlowStatus(Status.FAILED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Tests failures when the fail behaviour is FINISH_ALL_POSSIBLE. In this case, all jobs which
have had its pre-requisite met can continue to run. Finishes when the failure is propagated to
the last node of the flow.
|
testFailedFinishingFailure3
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testCancelOnFailure() throws Exception {
// Test propagation of KILLED status to embedded flows different branch
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.CANCEL_ALL);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
// 2. JOB in subflow FAILS
InteractiveTestJob.getTestJob("joba").succeedJob();
assertStatus("joba", Status.SUCCEEDED);
assertStatus("jobb", Status.RUNNING);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd", Status.RUNNING);
assertStatus("jobb:innerJobA", Status.RUNNING);
assertStatus("jobd:innerJobA", Status.RUNNING);
InteractiveTestJob.getTestJob("joba1").succeedJob();
InteractiveTestJob.getTestJob("jobb:innerJobA").succeedJob();
assertStatus("jobb:innerJobA", Status.SUCCEEDED);
assertStatus("joba1", Status.SUCCEEDED);
assertStatus("jobb:innerJobB", Status.RUNNING);
assertStatus("jobb:innerJobC", Status.RUNNING);
InteractiveTestJob.getTestJob("jobb:innerJobB").failJob();
assertStatus("jobb", Status.FAILED);
assertStatus("jobb:innerJobB", Status.FAILED);
assertStatus("jobb:innerJobC", Status.KILLED);
assertStatus("jobb:innerFlow", Status.CANCELLED);
assertStatus("jobc", Status.KILLED);
assertStatus("jobd", Status.KILLED);
assertStatus("jobd:innerJobA", Status.KILLED);
assertStatus("jobd:innerFlow2", Status.CANCELLED);
assertStatus("jobe", Status.CANCELLED);
assertStatus("jobf", Status.CANCELLED);
waitForAndAssertFlowStatus(Status.KILLED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Tests the failure condition when a failure invokes a cancel (or killed) on the flow.
Any jobs that are running will be assigned a KILLED state, and any nodes which were skipped due
to prior errors will be given a CANCELLED state.
|
testCancelOnFailure
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testCancel() throws Exception {
// Test propagation of KILLED status to embedded flows different branch
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.CANCEL_ALL);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
// 2. JOB in subflow FAILS
InteractiveTestJob.getTestJob("joba").succeedJob();
assertStatus("joba", Status.SUCCEEDED);
assertStatus("jobb", Status.RUNNING);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd", Status.RUNNING);
assertStatus("jobb:innerJobA", Status.RUNNING);
assertStatus("jobd:innerJobA", Status.RUNNING);
InteractiveTestJob.getTestJob("joba1").succeedJob();
InteractiveTestJob.getTestJob("jobb:innerJobA").succeedJob();
assertStatus("jobb:innerJobA", Status.SUCCEEDED);
assertStatus("joba1", Status.SUCCEEDED);
assertStatus("jobb:innerJobB", Status.RUNNING);
assertStatus("jobb:innerJobC", Status.RUNNING);
this.runner.kill("me");
assertStatus("jobb", Status.KILLED);
assertStatus("jobb:innerJobB", Status.KILLED);
assertStatus("jobb:innerJobC", Status.KILLED);
assertStatus("jobb:innerFlow", Status.CANCELLED);
assertStatus("jobc", Status.KILLED);
assertStatus("jobd", Status.KILLED);
assertStatus("jobd:innerJobA", Status.KILLED);
assertStatus("jobd:innerFlow2", Status.CANCELLED);
assertStatus("jobe", Status.CANCELLED);
assertStatus("jobf", Status.CANCELLED);
waitForAndAssertFlowStatus(Status.KILLED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Tests the manual Killing of a flow. In this case, the flow is just fine before the cancel is
called.
|
testCancel
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testManualCancelOnFailure() throws Exception {
// Test propagation of KILLED status to embedded flows different branch
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.FINISH_CURRENTLY_RUNNING);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
// 2. JOB in subflow FAILS
InteractiveTestJob.getTestJob("joba").succeedJob();
assertStatus("joba", Status.SUCCEEDED);
assertStatus("jobb", Status.RUNNING);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd", Status.RUNNING);
assertStatus("jobb:innerJobA", Status.RUNNING);
assertStatus("jobd:innerJobA", Status.RUNNING);
InteractiveTestJob.getTestJob("joba1").succeedJob();
InteractiveTestJob.getTestJob("jobb:innerJobA").succeedJob();
assertStatus("jobb:innerJobA", Status.SUCCEEDED);
assertStatus("joba1", Status.SUCCEEDED);
assertStatus("jobb:innerJobB", Status.RUNNING);
assertStatus("jobb:innerJobC", Status.RUNNING);
InteractiveTestJob.getTestJob("jobb:innerJobB").failJob();
assertStatus("jobb:innerJobB", Status.FAILED);
assertStatus("jobb", Status.FAILED_FINISHING);
waitForAndAssertFlowStatus(Status.FAILED_FINISHING);
this.runner.kill("me");
assertStatus("jobb", Status.FAILED);
assertStatus("jobb:innerJobC", Status.KILLED);
assertStatus("jobb:innerFlow", Status.CANCELLED);
assertStatus("jobc", Status.KILLED);
assertStatus("jobd", Status.KILLED);
assertStatus("jobd:innerJobA", Status.KILLED);
assertStatus("jobd:innerFlow2", Status.CANCELLED);
assertStatus("jobe", Status.CANCELLED);
assertStatus("jobf", Status.CANCELLED);
waitForAndAssertFlowStatus(Status.KILLED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Tests the manual invocation of cancel on a flow that is FAILED_FINISHING
|
testManualCancelOnFailure
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testPause() throws Exception {
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.FINISH_CURRENTLY_RUNNING);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
this.runner.pause("test");
InteractiveTestJob.getTestJob("joba").succeedJob();
// 2.1 JOB A COMPLETES SUCCESSFULLY AFTER PAUSE
assertStatus("joba", Status.SUCCEEDED);
waitForAndAssertFlowStatus(Status.PAUSED);
// 2.2 Flow is unpaused
this.runner.resume("test");
waitForAndAssertFlowStatus(Status.RUNNING);
assertStatus("joba", Status.SUCCEEDED);
assertStatus("joba1", Status.RUNNING);
assertStatus("jobb", Status.RUNNING);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd", Status.RUNNING);
assertStatus("jobd:innerJobA", Status.RUNNING);
assertStatus("jobb:innerJobA", Status.RUNNING);
// 3. jobb:Inner completes
this.runner.pause("test");
/// innerJobA completes, but paused
InteractiveTestJob.getTestJob("jobb:innerJobA").succeedJob(
Props.of("output.jobb.innerJobA", "jobb.innerJobA"));
assertStatus("jobb:innerJobA", Status.SUCCEEDED);
this.runner.resume("test");
assertStatus("jobb:innerJobB", Status.RUNNING);
assertStatus("jobb:innerJobC", Status.RUNNING);
/// innerJobB, C completes
InteractiveTestJob.getTestJob("jobb:innerJobB").succeedJob(
Props.of("output.jobb.innerJobB", "jobb.innerJobB"));
InteractiveTestJob.getTestJob("jobb:innerJobC").succeedJob(
Props.of("output.jobb.innerJobC", "jobb.innerJobC"));
assertStatus("jobb:innerJobB", Status.SUCCEEDED);
assertStatus("jobb:innerJobC", Status.SUCCEEDED);
assertStatus("jobb:innerFlow", Status.RUNNING);
// 4. Finish up on inner flow for jobb
InteractiveTestJob.getTestJob("jobb:innerFlow").succeedJob(
Props.of("output1.jobb", "test1", "output2.jobb", "test2"));
assertStatus("jobb:innerFlow", Status.SUCCEEDED);
assertStatus("jobb", Status.SUCCEEDED);
// 5. Finish jobc, jobd
InteractiveTestJob.getTestJob("jobc").succeedJob(
Props.of("output.jobc", "jobc"));
assertStatus("jobc", Status.SUCCEEDED);
InteractiveTestJob.getTestJob("jobd:innerJobA").succeedJob();
InteractiveTestJob.getTestJob("jobd:innerFlow2").succeedJob();
assertStatus("jobd:innerJobA", Status.SUCCEEDED);
assertStatus("jobd:innerFlow2", Status.SUCCEEDED);
assertStatus("jobd", Status.SUCCEEDED);
assertStatus("jobe", Status.RUNNING);
// 6. Finish off flow
InteractiveTestJob.getTestJob("joba1").succeedJob();
InteractiveTestJob.getTestJob("jobe").succeedJob();
assertStatus("joba1", Status.SUCCEEDED);
assertStatus("jobe", Status.SUCCEEDED);
assertStatus("jobf", Status.RUNNING);
InteractiveTestJob.getTestJob("jobf").succeedJob();
assertStatus("jobf", Status.SUCCEEDED);
waitForAndAssertFlowStatus(Status.SUCCEEDED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Tests that pause and resume work
|
testPause
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testPauseKill() throws Exception {
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.FINISH_CURRENTLY_RUNNING);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
// 2. JOB A COMPLETES SUCCESSFULLY
InteractiveTestJob.getTestJob("joba").succeedJob();
assertStatus("joba", Status.SUCCEEDED);
assertStatus("joba1", Status.RUNNING);
assertStatus("jobb", Status.RUNNING);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd", Status.RUNNING);
assertStatus("jobd:innerJobA", Status.RUNNING);
assertStatus("jobb:innerJobA", Status.RUNNING);
this.runner.pause("me");
waitForAndAssertFlowStatus(Status.PAUSED);
InteractiveTestJob.getTestJob("jobb:innerJobA").succeedJob();
InteractiveTestJob.getTestJob("jobd:innerJobA").succeedJob();
assertStatus("jobb:innerJobA", Status.SUCCEEDED);
assertStatus("jobd:innerJobA", Status.SUCCEEDED);
this.runner.kill("me");
assertStatus("joba1", Status.KILLED);
assertStatus("jobb:innerJobB", Status.CANCELLED);
assertStatus("jobb:innerJobC", Status.CANCELLED);
assertStatus("jobb:innerFlow", Status.CANCELLED);
assertStatus("jobb", Status.KILLED);
assertStatus("jobc", Status.KILLED);
assertStatus("jobd:innerFlow2", Status.CANCELLED);
assertStatus("jobd", Status.KILLED);
assertStatus("jobe", Status.CANCELLED);
assertStatus("jobf", Status.CANCELLED);
waitForAndAssertFlowStatus(Status.KILLED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Test the condition for a manual invocation of a KILL (cancel) on a flow that has been paused.
The flow should unpause and be killed immediately.
|
testPauseKill
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testPauseFail() throws Exception {
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.FINISH_CURRENTLY_RUNNING);
final EventCollectorListener eventCollector = new EventCollectorListener();
this.runner.addListener(eventCollector);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
// 2. JOB A COMPLETES SUCCESSFULLY
InteractiveTestJob.getTestJob("joba").succeedJob();
assertStatus("joba", Status.SUCCEEDED);
assertStatus("joba1", Status.RUNNING);
assertStatus("jobb", Status.RUNNING);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd", Status.RUNNING);
assertStatus("jobd:innerJobA", Status.RUNNING);
assertStatus("jobb:innerJobA", Status.RUNNING);
this.runner.pause("me");
waitForAndAssertFlowStatus(Status.PAUSED);
InteractiveTestJob.getTestJob("jobb:innerJobA").succeedJob();
InteractiveTestJob.getTestJob("jobd:innerJobA").failJob();
assertStatus("jobd:innerJobA", Status.FAILED);
assertStatus("jobb:innerJobA", Status.SUCCEEDED);
// When flow is paused, no new jobs are started. So these two jobs that were already running
// are allowed to finish, but their dependencies aren't started.
// Now, ensure that jobd:innerJobA has completely finished as failed before resuming.
// If we would resume before the job failure has been completely processed, FlowRunner would be
// able to start some new jobs instead of cancelling everything.
FlowRunnerTestUtil.waitEventFired(eventCollector, "jobd:innerJobA", Status.FAILED);
waitForAndAssertFlowStatus(Status.PAUSED);
this.runner.resume("me");
assertStatus("jobb:innerJobB", Status.CANCELLED);
assertStatus("jobb:innerJobC", Status.CANCELLED);
assertStatus("jobb:innerFlow", Status.CANCELLED);
assertStatus("jobb", Status.KILLED);
assertStatus("jobd:innerFlow2", Status.CANCELLED);
assertStatus("jobd", Status.FAILED);
InteractiveTestJob.getTestJob("jobc").succeedJob();
InteractiveTestJob.getTestJob("joba1").succeedJob();
assertStatus("jobc", Status.SUCCEEDED);
assertStatus("joba1", Status.SUCCEEDED);
assertStatus("jobf", Status.CANCELLED);
assertStatus("jobe", Status.CANCELLED);
waitForAndAssertFlowStatus(Status.FAILED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Tests the case where a failure occurs on a Paused flow. In this case, the flow should stay
paused.
|
testPauseFail
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testPauseFailFinishAll() throws Exception {
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.FINISH_ALL_POSSIBLE);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
// 2. JOB A COMPLETES SUCCESSFULLY
InteractiveTestJob.getTestJob("joba").succeedJob();
assertStatus("joba", Status.SUCCEEDED);
assertStatus("joba1", Status.RUNNING);
assertStatus("jobb", Status.RUNNING);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd", Status.RUNNING);
assertStatus("jobd:innerJobA", Status.RUNNING);
assertStatus("jobb:innerJobA", Status.RUNNING);
this.runner.pause("me");
waitForAndAssertFlowStatus(Status.PAUSED);
InteractiveTestJob.getTestJob("jobb:innerJobA").succeedJob();
InteractiveTestJob.getTestJob("jobd:innerJobA").failJob();
assertStatus("jobd:innerJobA", Status.FAILED);
assertStatus("jobb:innerJobA", Status.SUCCEEDED);
this.runner.resume("me");
assertStatus("jobb:innerJobB", Status.RUNNING);
assertStatus("jobb:innerJobC", Status.RUNNING);
assertStatus("jobd:innerFlow2", Status.CANCELLED);
assertStatus("jobd", Status.FAILED);
InteractiveTestJob.getTestJob("jobc").succeedJob();
InteractiveTestJob.getTestJob("joba1").succeedJob();
InteractiveTestJob.getTestJob("jobb:innerJobB").succeedJob();
InteractiveTestJob.getTestJob("jobb:innerJobC").succeedJob();
InteractiveTestJob.getTestJob("jobb:innerFlow").succeedJob();
assertStatus("jobc", Status.SUCCEEDED);
assertStatus("joba1", Status.SUCCEEDED);
assertStatus("jobb:innerJobB", Status.SUCCEEDED);
assertStatus("jobb:innerJobC", Status.SUCCEEDED);
assertStatus("jobb:innerFlow", Status.SUCCEEDED);
assertStatus("jobb", Status.SUCCEEDED);
assertStatus("jobe", Status.CANCELLED);
assertStatus("jobf", Status.CANCELLED);
waitForAndAssertFlowStatus(Status.FAILED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Test the condition when a Finish all possible is called during a pause. The Failure is not
acted upon until the flow is resumed.
|
testPauseFailFinishAll
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testFlowKilledByJobLevelSLA() throws Exception {
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.CANCEL_ALL);
FlowRunnerTestUtil.startThread(this.runner);
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
for (final JobRunner jobRunner : this.runner.getActiveJobRunners()) {
if (jobRunner.getJobId().equals("joba")) {
jobRunner.killBySLA();
break;
}
}
waitForAndAssertFlowStatus(Status.KILLED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Tests the case when a job is killed by SLA causing a flow to fail. The flow should be in
"killed" status.
|
testFlowKilledByJobLevelSLA
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testPauseFailKill() throws Exception {
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.CANCEL_ALL);
// 1. START FLOW
FlowRunnerTestUtil.startThread(this.runner);
// After it starts up, only joba should be running
assertStatus("joba", Status.RUNNING);
assertStatus("joba1", Status.RUNNING);
// 2. JOB A COMPLETES SUCCESSFULLY
InteractiveTestJob.getTestJob("joba").succeedJob();
assertStatus("joba", Status.SUCCEEDED);
assertStatus("joba1", Status.RUNNING);
assertStatus("jobb", Status.RUNNING);
assertStatus("jobc", Status.RUNNING);
assertStatus("jobd", Status.RUNNING);
assertStatus("jobd:innerJobA", Status.RUNNING);
assertStatus("jobb:innerJobA", Status.RUNNING);
this.runner.pause("me");
waitForAndAssertFlowStatus(Status.PAUSED);
InteractiveTestJob.getTestJob("jobd:innerJobA").failJob();
assertStatus("jobd:innerJobA", Status.FAILED);
assertStatus("jobd:innerFlow2", Status.CANCELLED);
assertStatus("jobd", Status.FAILED);
assertStatus("jobb:innerJobA", Status.KILLED);
assertStatus("jobb:innerJobB", Status.CANCELLED);
assertStatus("jobb:innerJobC", Status.CANCELLED);
assertStatus("jobb:innerFlow", Status.CANCELLED);
assertStatus("jobb", Status.KILLED);
assertStatus("jobc", Status.KILLED);
assertStatus("jobe", Status.CANCELLED);
assertStatus("jobf", Status.CANCELLED);
assertStatus("joba1", Status.KILLED);
waitForAndAssertFlowStatus(Status.KILLED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Tests the case when a flow is paused and a failure causes a kill. The flow should die
immediately regardless of the 'paused' status.
|
testPauseFailKill
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
@Test
public void testKillBeforeStart() throws Exception {
this.runner = this.testUtil.createFromFlowMap("jobf", FailureAction.FINISH_ALL_POSSIBLE);
this.runner.addListener((event) -> {
if (event.getType().equals(EventType.FLOW_STARTED)) {
// kill interrupts the current thread which would cause an exception if called directly,
// so do it from another thread.
Thread aThread = new Thread( () -> this.runner.kill());
aThread.start();
try {
// give the thread a chance to kill the execution
aThread.join();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
});
FlowRunnerTestUtil.startThread(this.runner).join();
// children jobs shouldn't start
assertStatus("joba", Status.READY);
assertStatus("joba1", Status.READY);
waitForAndAssertFlowStatus(Status.KILLED);
assertThreadShutDown();
Assert.assertFalse(this.runner.getLogger().getAllAppenders().hasMoreElements());
}
|
Tests the case when an execution is killed before it has started. The final execution
status should "KILLED".
|
testKillBeforeStart
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTest2.java
|
Apache-2.0
|
public static Map<String, Flow> prepareProject(final Project project, final File sourceDir,
final File workingDir)
throws ProjectManagerException, IOException {
final FlowLoaderFactory loaderFactory = new FlowLoaderFactory(new Props(null));
final FlowLoader loader = loaderFactory.createFlowLoader(sourceDir);
LOG.info("Loading project flows from " + sourceDir);
loader.loadProjectFlow(project, sourceDir);
if (!loader.getErrors().isEmpty()) {
for (final String error : loader.getErrors()) {
System.out.println(error);
}
throw new RuntimeException(String.format(
"Errors found in loading flows into a project ( %s ). From the directory: ( %s ).",
project.getName(), sourceDir));
}
final Map<String, Flow> flowMap = loader.getFlowMap();
LOG.info("Loaded flows: " + flowMap.keySet());
project.setFlows(flowMap);
FileUtils.copyDirectory(sourceDir, workingDir);
return flowMap;
}
|
Initialize the project with the flow definitions stored in the given source directory. Also
copy the source directory to the working directory.
@param project project to initialize
@param sourceDir the source dir
@param workingDir the working dir
@return the flow name to flow map
@throws ProjectManagerException the project manager exception
@throws IOException the io exception
|
prepareProject
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
Apache-2.0
|
public static void waitEventFired(final EventCollectorListener eventCollector,
final String nestedId, final Status status)
throws InterruptedException {
for (int i = 0; i < 1000; i++) {
for (final Event event : eventCollector.getEventList()) {
if (event.getData().getStatus() == status && event.getData().getNestedId()
.equals(nestedId)) {
return;
}
}
synchronized (EventCollectorListener.handleEvent) {
EventCollectorListener.handleEvent.wait(10L);
}
}
fail("Event wasn't fired with [" + nestedId + "], " + status);
}
|
Initialize the project with the flow definitions stored in the given source directory. Also
copy the source directory to the working directory.
@param project project to initialize
@param sourceDir the source dir
@param workingDir the working dir
@return the flow name to flow map
@throws ProjectManagerException the project manager exception
@throws IOException the io exception
|
waitEventFired
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
Apache-2.0
|
public static ExecutableFlow prepareExecDir(final File workingDir, final File execDir,
final String flowName, final int execId) throws IOException {
FileUtils.copyDirectory(execDir, workingDir);
final File jsonFlowFile = new File(workingDir, flowName + ".flow");
final Object flowObj = JSONUtils.parseJSONFromFile(jsonFlowFile);
final Project project = new Project(1, "test");
final Flow flow = Flow.flowFromObject(flowObj);
final ExecutableFlow execFlow = new ExecutableFlow(project, flow);
execFlow.setExecutionId(execId);
execFlow.setExecutionPath(workingDir.getPath());
return execFlow;
}
|
Initialize the project with the flow definitions stored in the given source directory. Also
copy the source directory to the working directory.
@param project project to initialize
@param sourceDir the source dir
@param workingDir the working dir
@return the flow name to flow map
@throws ProjectManagerException the project manager exception
@throws IOException the io exception
|
prepareExecDir
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
Apache-2.0
|
public static Thread startThread(final FlowRunner runner) {
final Thread thread = new Thread(runner);
thread.start();
return thread;
}
|
Initialize the project with the flow definitions stored in the given source directory. Also
copy the source directory to the working directory.
@param project project to initialize
@param sourceDir the source dir
@param workingDir the working dir
@return the flow name to flow map
@throws ProjectManagerException the project manager exception
@throws IOException the io exception
|
startThread
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
Apache-2.0
|
public FlowRunner createFromFlowFile(final String flowName) throws Exception {
return createFromFlowFile(new EventCollectorListener(), flowName);
}
|
Initialize the project with the flow definitions stored in the given source directory. Also
copy the source directory to the working directory.
@param project project to initialize
@param sourceDir the source dir
@param workingDir the working dir
@return the flow name to flow map
@throws ProjectManagerException the project manager exception
@throws IOException the io exception
|
createFromFlowFile
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
Apache-2.0
|
public FlowRunner createFromFlowFile(final EventCollectorListener eventCollector,
final String flowName) throws Exception {
return createFromFlowFile(flowName, eventCollector, new ExecutionOptions());
}
|
Initialize the project with the flow definitions stored in the given source directory. Also
copy the source directory to the working directory.
@param project project to initialize
@param sourceDir the source dir
@param workingDir the working dir
@return the flow name to flow map
@throws ProjectManagerException the project manager exception
@throws IOException the io exception
|
createFromFlowFile
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
Apache-2.0
|
public FlowRunner createFromFlowFile(final String flowName,
final EventCollectorListener eventCollector, final ExecutionOptions options)
throws Exception {
return createFromFlowFile(flowName, eventCollector, options, null, null);
}
|
Initialize the project with the flow definitions stored in the given source directory. Also
copy the source directory to the working directory.
@param project project to initialize
@param sourceDir the source dir
@param workingDir the working dir
@return the flow name to flow map
@throws ProjectManagerException the project manager exception
@throws IOException the io exception
|
createFromFlowFile
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
Apache-2.0
|
public FlowRunner createFromFlowFile(final String flowName, final FlowWatcher watcher,
final Integer pipeline) throws Exception {
return createFromFlowFile(flowName, new EventCollectorListener(), new ExecutionOptions(),
watcher, pipeline);
}
|
Initialize the project with the flow definitions stored in the given source directory. Also
copy the source directory to the working directory.
@param project project to initialize
@param sourceDir the source dir
@param workingDir the working dir
@return the flow name to flow map
@throws ProjectManagerException the project manager exception
@throws IOException the io exception
|
createFromFlowFile
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
Apache-2.0
|
public FlowRunner createFromFlowFile(final String flowName,
final EventCollectorListener eventCollector,
final ExecutionOptions options, final FlowWatcher watcher, final Integer pipeline)
throws Exception {
final ExecutableFlow exFlow = FlowRunnerTestUtil
.prepareExecDir(this.workingDir, this.projectDir, flowName, 1);
exFlow.setSubmitUser("submitUser");
exFlow.setDispatchMethod(DispatchMethod.POLL);
if (watcher != null) {
options.setPipelineLevel(pipeline);
options.setPipelineExecutionId(watcher.getExecId());
}
// Add version set to executable flow
exFlow.setVersionSet(createVersionSet());
final FlowRunner runner = createFromExecutableFlow(eventCollector, exFlow, options,
new HashMap<>(), new Props(), mock(AlerterHolder.class));
runner.setFlowWatcher(watcher);
return runner;
}
|
Initialize the project with the flow definitions stored in the given source directory. Also
copy the source directory to the working directory.
@param project project to initialize
@param sourceDir the source dir
@param workingDir the working dir
@return the flow name to flow map
@throws ProjectManagerException the project manager exception
@throws IOException the io exception
|
createFromFlowFile
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
Apache-2.0
|
public FlowRunner createFromFlowMap(final String flowName, final String jobIdPrefix)
throws Exception {
return createFromFlowMap(flowName, jobIdPrefix, new ExecutionOptions());
}
|
Initialize the project with the flow definitions stored in the given source directory. Also
copy the source directory to the working directory.
@param project project to initialize
@param sourceDir the source dir
@param workingDir the working dir
@return the flow name to flow map
@throws ProjectManagerException the project manager exception
@throws IOException the io exception
|
createFromFlowMap
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
Apache-2.0
|
public FlowRunner createFromFlowMap(final String flowName, final String jobIdPrefix,
final ExecutionOptions options)
throws Exception {
return createFromFlowMap(flowName, jobIdPrefix, options, new Props());
}
|
Initialize the project with the flow definitions stored in the given source directory. Also
copy the source directory to the working directory.
@param project project to initialize
@param sourceDir the source dir
@param workingDir the working dir
@return the flow name to flow map
@throws ProjectManagerException the project manager exception
@throws IOException the io exception
|
createFromFlowMap
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
Apache-2.0
|
public FlowRunner createFromFlowMap(final String flowName,
final HashMap<String, String> flowParams) throws Exception {
return createFromFlowMap(null, flowName, null, flowParams, new Props(),
mock(AlerterHolder.class));
}
|
Initialize the project with the flow definitions stored in the given source directory. Also
copy the source directory to the working directory.
@param project project to initialize
@param sourceDir the source dir
@param workingDir the working dir
@return the flow name to flow map
@throws ProjectManagerException the project manager exception
@throws IOException the io exception
|
createFromFlowMap
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
Apache-2.0
|
public FlowRunner createFromFlowMap(final String flowName, final ExecutionOptions options,
final Map<String, String> flowParams, final Props azkabanProps) throws Exception {
return createFromFlowMap(null, flowName, options, flowParams,
azkabanProps, mock(AlerterHolder.class));
}
|
Initialize the project with the flow definitions stored in the given source directory. Also
copy the source directory to the working directory.
@param project project to initialize
@param sourceDir the source dir
@param workingDir the working dir
@return the flow name to flow map
@throws ProjectManagerException the project manager exception
@throws IOException the io exception
|
createFromFlowMap
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
Apache-2.0
|
public FlowRunner createFromFlowMap(final EventCollectorListener eventCollector,
final String flowName, final ExecutionOptions options, final Map<String, String> flowParams,
final Props azkabanProps, final AlerterHolder alerterHolder) throws Exception {
LOG.info("Creating a FlowRunner for flow '" + flowName + "'");
final Flow flow = this.flowMap.get(flowName);
final ExecutableFlow exFlow = new ExecutableFlow(this.project, flow);
exFlow.setSubmitUser("submitUser");
return createFromExecutableFlow(eventCollector, exFlow, options, flowParams,
azkabanProps, alerterHolder);
}
|
Initialize the project with the flow definitions stored in the given source directory. Also
copy the source directory to the working directory.
@param project project to initialize
@param sourceDir the source dir
@param workingDir the working dir
@return the flow name to flow map
@throws ProjectManagerException the project manager exception
@throws IOException the io exception
|
createFromFlowMap
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
Apache-2.0
|
public FlowRunner createFromFlowMap(final String flowName, final FailureAction action)
throws Exception {
final ExecutionOptions options = new ExecutionOptions();
options.setFailureAction(action);
return createFromFlowMap(flowName, options, new HashMap<>(), new Props());
}
|
Initialize the project with the flow definitions stored in the given source directory. Also
copy the source directory to the working directory.
@param project project to initialize
@param sourceDir the source dir
@param workingDir the working dir
@return the flow name to flow map
@throws ProjectManagerException the project manager exception
@throws IOException the io exception
|
createFromFlowMap
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
Apache-2.0
|
private FlowRunner createFromFlowMap(final String flowName, final String jobIdPrefix,
final ExecutionOptions options, final Props azkabanProps) throws Exception {
final Map<String, String> flowParams = new HashMap<>();
flowParams.put(InteractiveTestJob.JOB_ID_PREFIX, jobIdPrefix);
return createFromFlowMap(new EventCollectorListener(), flowName, options, flowParams,
azkabanProps, mock(AlerterHolder.class));
}
|
Initialize the project with the flow definitions stored in the given source directory. Also
copy the source directory to the working directory.
@param project project to initialize
@param sourceDir the source dir
@param workingDir the working dir
@return the flow name to flow map
@throws ProjectManagerException the project manager exception
@throws IOException the io exception
|
createFromFlowMap
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
Apache-2.0
|
private FlowRunner createFromExecutableFlow(final EventCollectorListener eventCollector,
final ExecutableFlow exFlow, final ExecutionOptions options,
final Map<String, String> flowParams, final Props azkabanProps,
final AlerterHolder alerterHolder) throws Exception {
final int exId = id++;
exFlow.setExecutionPath(this.workingDir.getPath());
exFlow.setExecutionId(exId);
if (options != null) {
exFlow.setExecutionOptions(options);
FlowUtils.applyDisabledJobs(options.getDisabledJobs(), exFlow);
}
exFlow.getExecutionOptions().addAllFlowParameters(flowParams);
this.executorLoader.uploadExecutableFlow(exFlow);
final MetricsManager metricsManager = new MetricsManager(new MetricRegistry());
final CommonMetrics commonMetrics = new CommonMetrics(metricsManager);
final ExecMetrics execMetrics = new ExecMetrics(metricsManager);
final FlowRunner runner =
new FlowRunner(exFlow, this.executorLoader, this.executionLogsLoader, this.projectLoader,
this.jobtypeManager, azkabanProps, null, alerterHolder, commonMetrics, execMetrics);
if (eventCollector != null) {
runner.addListener(eventCollector);
}
return runner;
}
|
Initialize the project with the flow definitions stored in the given source directory. Also
copy the source directory to the working directory.
@param project project to initialize
@param sourceDir the source dir
@param workingDir the working dir
@return the flow name to flow map
@throws ProjectManagerException the project manager exception
@throws IOException the io exception
|
createFromExecutableFlow
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
Apache-2.0
|
public ExecutorLoader getExecutorLoader() {
return this.executorLoader;
}
|
Initialize the project with the flow definitions stored in the given source directory. Also
copy the source directory to the working directory.
@param project project to initialize
@param sourceDir the source dir
@param workingDir the working dir
@return the flow name to flow map
@throws ProjectManagerException the project manager exception
@throws IOException the io exception
|
getExecutorLoader
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
Apache-2.0
|
public void setExecutorLoader(final MockExecutorLoader executorLoader) {
this.executorLoader = executorLoader;
}
|
Initialize the project with the flow definitions stored in the given source directory. Also
copy the source directory to the working directory.
@param project project to initialize
@param sourceDir the source dir
@param workingDir the working dir
@return the flow name to flow map
@throws ProjectManagerException the project manager exception
@throws IOException the io exception
|
setExecutorLoader
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
Apache-2.0
|
public ProjectLoader getProjectLoader() {
return this.projectLoader;
}
|
Initialize the project with the flow definitions stored in the given source directory. Also
copy the source directory to the working directory.
@param project project to initialize
@param sourceDir the source dir
@param workingDir the working dir
@return the flow name to flow map
@throws ProjectManagerException the project manager exception
@throws IOException the io exception
|
getProjectLoader
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
Apache-2.0
|
public Project getProject() {
return this.project;
}
|
Initialize the project with the flow definitions stored in the given source directory. Also
copy the source directory to the working directory.
@param project project to initialize
@param sourceDir the source dir
@param workingDir the working dir
@return the flow name to flow map
@throws ProjectManagerException the project manager exception
@throws IOException the io exception
|
getProject
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
Apache-2.0
|
public VersionSet createVersionSet() {
final String testJsonString1 = "{\"azkaban-base\":{\"version\":\"7.0.4\",\"path\":\"path1\","
+ "\"state\":\"ACTIVE\"},\"azkaban-config\":{\"version\":\"9.1.1\",\"path\":\"path2\","
+ "\"state\":\"ACTIVE\"},\"spark\":{\"version\":\"8.0\",\"path\":\"path3\","
+ "\"state\":\"ACTIVE\"}}";
final String testMd5Hex1 = "43966138aebfdc4438520cc5cd2aefa8";
return new VersionSet(testJsonString1, testMd5Hex1, 1);
}
|
Initialize the project with the flow definitions stored in the given source directory. Also
copy the source directory to the working directory.
@param project project to initialize
@param sourceDir the source dir
@param workingDir the working dir
@return the flow name to flow map
@throws ProjectManagerException the project manager exception
@throws IOException the io exception
|
createVersionSet
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerTestUtil.java
|
Apache-2.0
|
@Ignore
@Test
public void testKillBasicFlowWithoutEndNode() throws Exception {
setUp(BASIC_FLOW_YAML_DIR, BASIC_FLOW_YAML_FILE);
final HashMap<String, String> flowProps = new HashMap<>();
this.runner = this.testUtil.createFromFlowMap(BASIC_FLOW_NAME, flowProps);
final ExecutableFlow flow = this.runner.getExecutableFlow();
final Thread thread = FlowRunnerTestUtil.startThread(this.runner);
assertStatus("jobA", Status.SUCCEEDED);
assertStatus("jobB", Status.SUCCEEDED);
this.runner.kill();
assertStatus("jobC", Status.KILLED);
assertFlowStatus(flow, Status.KILLED);
thread.join();
}
|
There seems to be an actual race condition bug in the runtime code. See: issue #1921: Flaky
test FlowRunnerTestYaml & issue #1311: Potential race condition between flowRunner thread and
jetty killing thread. Disable this test until the potential bug is fixed or new DAG engine
code is ready.
|
testKillBasicFlowWithoutEndNode
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerYamlTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerYamlTest.java
|
Apache-2.0
|
@Ignore
@Test
public void testFailBasicFlowWithoutEndNode() throws Exception {
setUp(FAIL_BASIC_FLOW_YAML_DIR, FAIL_BASIC_FLOW_YAML_FILE);
final HashMap<String, String> flowProps = new HashMap<>();
this.runner = this.testUtil.createFromFlowMap(FAIL_BASIC_FLOW_NAME, flowProps);
final ExecutableFlow flow = this.runner.getExecutableFlow();
final Thread thread = FlowRunnerTestUtil.startThread(this.runner);
InteractiveTestJob.getTestJob("jobC").failJob();
assertStatus("jobC", Status.FAILED);
InteractiveTestJob.getTestJob("jobB").succeedJob();
assertStatus("jobB", Status.SUCCEEDED);
InteractiveTestJob.getTestJob("jobA").succeedJob();
assertStatus("jobA", Status.SUCCEEDED);
assertStatus("jobD", Status.CANCELLED);
assertFlowStatus(flow, Status.FAILED);
thread.join();
}
|
There seems to be an actual race condition bug in the runtime code. See: issue #1921: Flaky
test FlowRunnerTestYaml & issue #1311: Potential race condition between flowRunner thread and
jetty killing thread. Disable this test until the potential bug is fixed or new DAG engine
code is ready.
|
testFailBasicFlowWithoutEndNode
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerYamlTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerYamlTest.java
|
Apache-2.0
|
@Test
public void testEmbeddedFlowWithoutEndNode() throws Exception {
setUp(EMBEDDED_FLOW_YAML_DIR, EMBEDDED_FLOW_YAML_FILE);
final HashMap<String, String> flowProps = new HashMap<>();
this.runner = this.testUtil.createFromFlowMap(EMBEDDED_FLOW_NAME, flowProps);
final ExecutableFlow flow = this.runner.getExecutableFlow();
final Thread thread = FlowRunnerTestUtil.startThread(this.runner);
assertStatus("jobA", Status.SUCCEEDED);
assertStatus("embedded_flow1:jobB", Status.SUCCEEDED);
assertStatus("embedded_flow1:jobC", Status.SUCCEEDED);
assertStatus("embedded_flow1", Status.SUCCEEDED);
assertStatus("jobD", Status.SUCCEEDED);
assertFlowStatus(flow, Status.SUCCEEDED);
thread.join();
}
|
There seems to be an actual race condition bug in the runtime code. See: issue #1921: Flaky
test FlowRunnerTestYaml & issue #1311: Potential race condition between flowRunner thread and
jetty killing thread. Disable this test until the potential bug is fixed or new DAG engine
code is ready.
|
testEmbeddedFlowWithoutEndNode
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerYamlTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerYamlTest.java
|
Apache-2.0
|
@Test
public void testAlertOnFlowFinished() throws Exception {
setUp(ALERT_FLOW_YAML_DIR, ALERT_FLOW_YAML_FILE);
final Emailer emailAlerter = mock(Emailer.class);
final Props azkabanProps = new Props();
azkabanProps.put(ConfigurationKeys.AZKABAN_EXECUTION_DISPATCH_METHOD, "POLL");
final AlerterHolder alerterHolder = new AlerterHolder(azkabanProps, emailAlerter);
final ExecutionOptions executionOptions = new ExecutionOptions();
executionOptions.setFailureEmails(Arrays.asList("test@example.com"));
this.runner = this.testUtil.createFromFlowMap(null, ALERT_FLOW_NAME, executionOptions,
new HashMap<>(), azkabanProps, alerterHolder);
final ExecutableFlow flow = this.runner.getExecutableFlow();
final Thread thread = FlowRunnerTestUtil.startThread(this.runner);
InteractiveTestJob.getTestJob("jobA").failJob();
assertFlowStatus(flow, Status.FAILED_FINISHING);
InteractiveTestJob.getTestJob("jobB").failJob();
assertFlowStatus(flow, Status.FAILED_FINISHING);
InteractiveTestJob.getTestJob("jobC").succeedJob();
assertFlowStatus(flow, Status.FAILED);
thread.join();
verify(emailAlerter).alertOnError(flow, "Flow finished");
}
|
There seems to be an actual race condition bug in the runtime code. See: issue #1921: Flaky
test FlowRunnerTestYaml & issue #1311: Potential race condition between flowRunner thread and
jetty killing thread. Disable this test until the potential bug is fixed or new DAG engine
code is ready.
|
testAlertOnFlowFinished
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerYamlTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerYamlTest.java
|
Apache-2.0
|
@Test
public void testAlertOnFirstError() throws Exception {
setUp(ALERT_FLOW_YAML_DIR, ALERT_FLOW_YAML_FILE);
final Emailer emailAlerter = mock(Emailer.class);
final Props azkabanProps = new Props();
azkabanProps.put(ConfigurationKeys.AZKABAN_EXECUTION_DISPATCH_METHOD, "POLL");
final AlerterHolder alerterHolder = new AlerterHolder(azkabanProps, emailAlerter);
final ExecutionOptions executionOptions = new ExecutionOptions();
executionOptions.setNotifyOnFirstFailure(true);
this.runner = this.testUtil.createFromFlowMap(null, ALERT_FLOW_NAME, executionOptions,
new HashMap<>(), azkabanProps, alerterHolder);
final ExecutableFlow flow = this.runner.getExecutableFlow();
final Thread thread = FlowRunnerTestUtil.startThread(this.runner);
InteractiveTestJob.getTestJob("jobA").failJob();
assertFlowStatus(flow, Status.FAILED_FINISHING);
InteractiveTestJob.getTestJob("jobB").failJob();
assertFlowStatus(flow, Status.FAILED_FINISHING);
InteractiveTestJob.getTestJob("jobC").succeedJob();
assertFlowStatus(flow, Status.FAILED);
thread.join();
verify(emailAlerter, times(1)).alertOnFirstError(flow);
}
|
There seems to be an actual race condition bug in the runtime code. See: issue #1921: Flaky
test FlowRunnerTestYaml & issue #1311: Potential race condition between flowRunner thread and
jetty killing thread. Disable this test until the potential bug is fixed or new DAG engine
code is ready.
|
testAlertOnFirstError
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerYamlTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerYamlTest.java
|
Apache-2.0
|
private void setUp(final String projectDir, final String flowYamlFile) throws Exception {
this.testUtil = new FlowRunnerTestUtil(projectDir, this.temporaryFolder);
final Project project = this.testUtil.getProject();
when(this.testUtil.getProjectLoader()
.getLatestFlowVersion(project.getId(), project.getVersion(), flowYamlFile))
.thenReturn(1);
when(this.testUtil.getProjectLoader()
.getUploadedFlowFile(eq(project.getId()), eq(project.getVersion()),
eq(flowYamlFile),
eq(1), any(File.class)))
.thenReturn(
ExecutionsTestUtil.getFlowFile(projectDir, flowYamlFile));
}
|
There seems to be an actual race condition bug in the runtime code. See: issue #1921: Flaky
test FlowRunnerTestYaml & issue #1311: Potential race condition between flowRunner thread and
jetty killing thread. Disable this test until the potential bug is fixed or new DAG engine
code is ready.
|
setUp
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerYamlTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/FlowRunnerYamlTest.java
|
Apache-2.0
|
private VersionSet createVersionSet(){
final String testJsonString1 = "{\"azkaban-base\":{\"version\":\"7.0.4\",\"path\":\"path1\","
+ "\"state\":\"ACTIVE\"},\"azkaban-config\":{\"version\":\"9.1.1\",\"path\":\"path2\","
+ "\"state\":\"ACTIVE\"},\"spark\":{\"version\":\"8.0\",\"path\":\"path3\","
+ "\"state\":\"ACTIVE\"}}";
final String testMd5Hex1 = "43966138aebfdc4438520cc5cd2aefa8";
return new VersionSet(testJsonString1, testMd5Hex1, 1);
}
|
Creates a new version set from scratch
@return a new version set
|
createVersionSet
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/JobRunnerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/JobRunnerTest.java
|
Apache-2.0
|
@Test
/**
* Deleting everything in the cache to accommodate new item.
*/
public void testDeletingAll() {
final ProjectCacheCleaner cleaner = new ProjectCacheCleaner(this.cacheDir, 0.3);
cleaner.deleteProjectDirsIfNecessary(7000000);
cleaner.finishPendingCleanup();
assertThat(this.cacheDir.list()).hasSize(0);
}
|
There's still space in the cache, no deletion.
|
testDeletingAll
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/ProjectCacheCleanerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/ProjectCacheCleanerTest.java
|
Apache-2.0
|
@Test
/**
* Deleting two least recently used items in the cache to accommodate new item.
*/
public void testDeletingTwoLRUItems() {
final ProjectCacheCleaner cleaner = new ProjectCacheCleaner(this.cacheDir, 0.7);
cleaner.deleteProjectDirsIfNecessary(3000000);
cleaner.finishPendingCleanup();
assertThat(this.cacheDir.list()).hasSize(1);
assertThat(this.cacheDir.list()).contains("3.1");
}
|
Deleting everything in the cache to accommodate new item.
|
testDeletingTwoLRUItems
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/ProjectCacheCleanerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/ProjectCacheCleanerTest.java
|
Apache-2.0
|
@Test
/**
* Deleting the least recently used item in the cache to accommodate new item.
*/
public void testDeletingOneLRUItem() {
final ProjectCacheCleaner cleaner = new ProjectCacheCleaner(this.cacheDir, 0.7);
cleaner.deleteProjectDirsIfNecessary(2000000);
cleaner.finishPendingCleanup();
assertThat(this.cacheDir.list()).hasSize(2);
assertThat(this.cacheDir.list()).contains("3.1");
assertThat(this.cacheDir.list()).contains("2.1");
}
|
Deleting two least recently used items in the cache to accommodate new item.
|
testDeletingOneLRUItem
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/ProjectCacheCleanerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/ProjectCacheCleanerTest.java
|
Apache-2.0
|
@Test
/**
* Put enough items in the cache to invoke throttle condition.
*/
public void testThrottleCondition() {
final ProjectCacheCleaner cleaner = new ProjectCacheCleaner(this.cacheDir, 0.65, 0.7);
cleaner.deleteProjectDirsIfNecessary(3000000);
assertThat(this.cacheDir.list()).hasSize(1);
assertThat(this.cacheDir.list()).contains("3.1");
}
|
Deleting the least recently used item in the cache to accommodate new item.
|
testThrottleCondition
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/ProjectCacheCleanerTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/ProjectCacheCleanerTest.java
|
Apache-2.0
|
@Ignore
@Test
public void testFinishedBlock() {
final BlockingStatus status = new BlockingStatus(1, "test", Status.SKIPPED);
final WatchingThread thread = new WatchingThread(status);
thread.start();
try {
thread.join();
} catch (final InterruptedException e) {
e.printStackTrace();
}
System.out.println("Diff " + thread.getDiff());
Assert.assertTrue(thread.getDiff() < 100);
}
|
TODO: Ignore this test at present since travis in Github can not always pass this test. We will
modify the below code to make travis pass in future.
|
testFinishedBlock
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/event/BlockingStatusTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/event/BlockingStatusTest.java
|
Apache-2.0
|
@Ignore
@Test
public void testUnfinishedBlock() throws InterruptedException {
final BlockingStatus status = new BlockingStatus(1, "test", Status.QUEUED);
final WatchingThread thread = new WatchingThread(status);
thread.start();
Thread.sleep(3000);
status.changeStatus(Status.SUCCEEDED);
thread.join();
System.out.println("Diff " + thread.getDiff());
Assert.assertTrue(thread.getDiff() >= 3000 && thread.getDiff() < 3100);
}
|
TODO: Ignore this test at present since travis in Github can not always pass this test. We will
modify the below code to make travis pass in future.
|
testUnfinishedBlock
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/event/BlockingStatusTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/event/BlockingStatusTest.java
|
Apache-2.0
|
@Ignore
@Test
public void testUnfinishedBlockSeveralChanges() throws InterruptedException {
final BlockingStatus status = new BlockingStatus(1, "test", Status.QUEUED);
final WatchingThread thread = new WatchingThread(status);
thread.start();
Thread.sleep(3000);
status.changeStatus(Status.PAUSED);
Thread.sleep(1000);
status.changeStatus(Status.FAILED);
thread.join(1000);
System.out.println("Diff " + thread.getDiff());
Assert.assertTrue(thread.getDiff() >= 4000 && thread.getDiff() < 4100);
}
|
TODO: Ignore this test at present since travis in Github can not always pass this test. We will
modify the below code to make travis pass in future.
|
testUnfinishedBlockSeveralChanges
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/event/BlockingStatusTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/event/BlockingStatusTest.java
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.